KVM: SVM: Rename vmplX_ssp -> plX_ssp
[linux-2.6-block.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE      (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59                                  PTP_TCR_TSCTRLSSR)
60
61 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
63
64 /* Module parameters */
65 #define TX_TIMEO        5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77
78 #define STMMAC_TX_THRESH(x)     ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)     ((x)->dma_conf.dma_rx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX        256
83 #define STMMAC_TX_XSK_AVAIL             16
84 #define STMMAC_RX_FILL_BATCH            16
85
86 #define STMMAC_XDP_PASS         0
87 #define STMMAC_XDP_CONSUMED     BIT(0)
88 #define STMMAC_XDP_TX           BIT(1)
89 #define STMMAC_XDP_REDIRECT     BIT(2)
90
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108
109 #define STMMAC_RX_COPYBREAK     256
110
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
113                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114
115 #define STMMAC_DEFAULT_LPI_TIMER        1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140                                           u32 rxmode, u32 chan);
141
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152         int ret = 0;
153
154         if (enabled) {
155                 ret = clk_prepare_enable(priv->plat->stmmac_clk);
156                 if (ret)
157                         return ret;
158                 ret = clk_prepare_enable(priv->plat->pclk);
159                 if (ret) {
160                         clk_disable_unprepare(priv->plat->stmmac_clk);
161                         return ret;
162                 }
163                 if (priv->plat->clks_config) {
164                         ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165                         if (ret) {
166                                 clk_disable_unprepare(priv->plat->stmmac_clk);
167                                 clk_disable_unprepare(priv->plat->pclk);
168                                 return ret;
169                         }
170                 }
171         } else {
172                 clk_disable_unprepare(priv->plat->stmmac_clk);
173                 clk_disable_unprepare(priv->plat->pclk);
174                 if (priv->plat->clks_config)
175                         priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176         }
177
178         return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189         if (unlikely(watchdog < 0))
190                 watchdog = TX_TIMEO;
191         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192                 buf_sz = DEFAULT_BUFSIZE;
193         if (unlikely(flow_ctrl > 1))
194                 flow_ctrl = FLOW_AUTO;
195         else if (likely(flow_ctrl < 0))
196                 flow_ctrl = FLOW_OFF;
197         if (unlikely((pause < 0) || (pause > 0xffff)))
198                 pause = PAUSE_TIME;
199         if (eee_timer < 0)
200                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208         u32 queue;
209
210         for (queue = 0; queue < maxq; queue++) {
211                 struct stmmac_channel *ch = &priv->channel[queue];
212
213                 if (stmmac_xdp_is_enabled(priv) &&
214                     test_bit(queue, priv->af_xdp_zc_qps)) {
215                         napi_disable(&ch->rxtx_napi);
216                         continue;
217                 }
218
219                 if (queue < rx_queues_cnt)
220                         napi_disable(&ch->rx_napi);
221                 if (queue < tx_queues_cnt)
222                         napi_disable(&ch->tx_napi);
223         }
224 }
225
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233         struct stmmac_rx_queue *rx_q;
234         u32 queue;
235
236         /* synchronize_rcu() needed for pending XDP buffers to drain */
237         for (queue = 0; queue < rx_queues_cnt; queue++) {
238                 rx_q = &priv->dma_conf.rx_queue[queue];
239                 if (rx_q->xsk_pool) {
240                         synchronize_rcu();
241                         break;
242                 }
243         }
244
245         __stmmac_disable_all_queues(priv);
246 }
247
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257         u32 queue;
258
259         for (queue = 0; queue < maxq; queue++) {
260                 struct stmmac_channel *ch = &priv->channel[queue];
261
262                 if (stmmac_xdp_is_enabled(priv) &&
263                     test_bit(queue, priv->af_xdp_zc_qps)) {
264                         napi_enable(&ch->rxtx_napi);
265                         continue;
266                 }
267
268                 if (queue < rx_queues_cnt)
269                         napi_enable(&ch->rx_napi);
270                 if (queue < tx_queues_cnt)
271                         napi_enable(&ch->tx_napi);
272         }
273 }
274
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277         if (!test_bit(STMMAC_DOWN, &priv->state) &&
278             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279                 queue_work(priv->wq, &priv->service_task);
280 }
281
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284         netif_carrier_off(priv->dev);
285         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286         stmmac_service_event_schedule(priv);
287 }
288
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *      If a specific clk_csr value is passed from the platform
296  *      this means that the CSR Clock Range selection cannot be
297  *      changed at run-time and it is fixed (as reported in the driver
298  *      documentation). Viceversa the driver will try to set the MDC
299  *      clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303         u32 clk_rate;
304
305         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306
307         /* Platform provided default clk_csr would be assumed valid
308          * for all other cases except for the below mentioned ones.
309          * For values higher than the IEEE 802.3 specified frequency
310          * we can not estimate the proper divider as it is not known
311          * the frequency of clk_csr_i. So we do not change the default
312          * divider.
313          */
314         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315                 if (clk_rate < CSR_F_35M)
316                         priv->clk_csr = STMMAC_CSR_20_35M;
317                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318                         priv->clk_csr = STMMAC_CSR_35_60M;
319                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320                         priv->clk_csr = STMMAC_CSR_60_100M;
321                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322                         priv->clk_csr = STMMAC_CSR_100_150M;
323                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324                         priv->clk_csr = STMMAC_CSR_150_250M;
325                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326                         priv->clk_csr = STMMAC_CSR_250_300M;
327         }
328
329         if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330                 if (clk_rate > 160000000)
331                         priv->clk_csr = 0x03;
332                 else if (clk_rate > 80000000)
333                         priv->clk_csr = 0x02;
334                 else if (clk_rate > 40000000)
335                         priv->clk_csr = 0x01;
336                 else
337                         priv->clk_csr = 0;
338         }
339
340         if (priv->plat->has_xgmac) {
341                 if (clk_rate > 400000000)
342                         priv->clk_csr = 0x5;
343                 else if (clk_rate > 350000000)
344                         priv->clk_csr = 0x4;
345                 else if (clk_rate > 300000000)
346                         priv->clk_csr = 0x3;
347                 else if (clk_rate > 250000000)
348                         priv->clk_csr = 0x2;
349                 else if (clk_rate > 150000000)
350                         priv->clk_csr = 0x1;
351                 else
352                         priv->clk_csr = 0x0;
353         }
354 }
355
356 static void print_pkt(unsigned char *buf, int len)
357 {
358         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365         u32 avail;
366
367         if (tx_q->dirty_tx > tx_q->cur_tx)
368                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369         else
370                 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371
372         return avail;
373 }
374
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383         u32 dirty;
384
385         if (rx_q->dirty_rx <= rx_q->cur_rx)
386                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
387         else
388                 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389
390         return dirty;
391 }
392
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395         int tx_lpi_timer;
396
397         /* Clear/set the SW EEE timer flag based on LPI ET enablement */
398         priv->eee_sw_timer_en = en ? 0 : 1;
399         tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400         stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411         u32 tx_cnt = priv->plat->tx_queues_to_use;
412         u32 queue;
413
414         /* check if all TX queues have the work finished */
415         for (queue = 0; queue < tx_cnt; queue++) {
416                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417
418                 if (tx_q->dirty_tx != tx_q->cur_tx)
419                         return -EBUSY; /* still unfinished work */
420         }
421
422         /* Check and enter in LPI mode */
423         if (!priv->tx_path_in_lpi_mode)
424                 stmmac_set_eee_mode(priv, priv->hw,
425                         priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426         return 0;
427 }
428
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437         if (!priv->eee_sw_timer_en) {
438                 stmmac_lpi_entry_timer_config(priv, 0);
439                 return;
440         }
441
442         stmmac_reset_eee_mode(priv, priv->hw);
443         del_timer_sync(&priv->eee_ctrl_timer);
444         priv->tx_path_in_lpi_mode = false;
445 }
446
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457
458         if (stmmac_enable_eee_mode(priv))
459                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472         int eee_tw_timer = priv->eee_tw_timer;
473
474         /* Using PCS we cannot dial with the phy registers at this stage
475          * so we do not support extra feature like EEE.
476          */
477         if (priv->hw->pcs == STMMAC_PCS_TBI ||
478             priv->hw->pcs == STMMAC_PCS_RTBI)
479                 return false;
480
481         /* Check if MAC core supports the EEE feature. */
482         if (!priv->dma_cap.eee)
483                 return false;
484
485         mutex_lock(&priv->lock);
486
487         /* Check if it needs to be deactivated */
488         if (!priv->eee_active) {
489                 if (priv->eee_enabled) {
490                         netdev_dbg(priv->dev, "disable EEE\n");
491                         stmmac_lpi_entry_timer_config(priv, 0);
492                         del_timer_sync(&priv->eee_ctrl_timer);
493                         stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494                         if (priv->hw->xpcs)
495                                 xpcs_config_eee(priv->hw->xpcs,
496                                                 priv->plat->mult_fact_100ns,
497                                                 false);
498                 }
499                 mutex_unlock(&priv->lock);
500                 return false;
501         }
502
503         if (priv->eee_active && !priv->eee_enabled) {
504                 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505                 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506                                      eee_tw_timer);
507                 if (priv->hw->xpcs)
508                         xpcs_config_eee(priv->hw->xpcs,
509                                         priv->plat->mult_fact_100ns,
510                                         true);
511         }
512
513         if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514                 del_timer_sync(&priv->eee_ctrl_timer);
515                 priv->tx_path_in_lpi_mode = false;
516                 stmmac_lpi_entry_timer_config(priv, 1);
517         } else {
518                 stmmac_lpi_entry_timer_config(priv, 0);
519                 mod_timer(&priv->eee_ctrl_timer,
520                           STMMAC_LPI_T(priv->tx_lpi_timer));
521         }
522
523         mutex_unlock(&priv->lock);
524         netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525         return true;
526 }
527
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537                                    struct dma_desc *p, struct sk_buff *skb)
538 {
539         struct skb_shared_hwtstamps shhwtstamp;
540         bool found = false;
541         u64 ns = 0;
542
543         if (!priv->hwts_tx_en)
544                 return;
545
546         /* exit if skb doesn't support hw tstamp */
547         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548                 return;
549
550         /* check tx tstamp status */
551         if (stmmac_get_tx_timestamp_status(priv, p)) {
552                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553                 found = true;
554         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555                 found = true;
556         }
557
558         if (found) {
559                 ns -= priv->plat->cdc_error_adj;
560
561                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
563
564                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565                 /* pass tstamp to stack */
566                 skb_tstamp_tx(skb, &shhwtstamp);
567         }
568 }
569
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580                                    struct dma_desc *np, struct sk_buff *skb)
581 {
582         struct skb_shared_hwtstamps *shhwtstamp = NULL;
583         struct dma_desc *desc = p;
584         u64 ns = 0;
585
586         if (!priv->hwts_rx_en)
587                 return;
588         /* For GMAC4, the valid timestamp is from CTX next desc. */
589         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590                 desc = np;
591
592         /* Check if timestamp is available */
593         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595
596                 ns -= priv->plat->cdc_error_adj;
597
598                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599                 shhwtstamp = skb_hwtstamps(skb);
600                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
602         } else  {
603                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604         }
605 }
606
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620         struct stmmac_priv *priv = netdev_priv(dev);
621         struct hwtstamp_config config;
622         u32 ptp_v2 = 0;
623         u32 tstamp_all = 0;
624         u32 ptp_over_ipv4_udp = 0;
625         u32 ptp_over_ipv6_udp = 0;
626         u32 ptp_over_ethernet = 0;
627         u32 snap_type_sel = 0;
628         u32 ts_master_en = 0;
629         u32 ts_event_en = 0;
630
631         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632                 netdev_alert(priv->dev, "No support for HW time stamping\n");
633                 priv->hwts_tx_en = 0;
634                 priv->hwts_rx_en = 0;
635
636                 return -EOPNOTSUPP;
637         }
638
639         if (copy_from_user(&config, ifr->ifr_data,
640                            sizeof(config)))
641                 return -EFAULT;
642
643         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644                    __func__, config.flags, config.tx_type, config.rx_filter);
645
646         if (config.tx_type != HWTSTAMP_TX_OFF &&
647             config.tx_type != HWTSTAMP_TX_ON)
648                 return -ERANGE;
649
650         if (priv->adv_ts) {
651                 switch (config.rx_filter) {
652                 case HWTSTAMP_FILTER_NONE:
653                         /* time stamp no incoming packet at all */
654                         config.rx_filter = HWTSTAMP_FILTER_NONE;
655                         break;
656
657                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658                         /* PTP v1, UDP, any kind of event packet */
659                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660                         /* 'xmac' hardware can support Sync, Pdelay_Req and
661                          * Pdelay_resp by setting bit14 and bits17/16 to 01
662                          * This leaves Delay_Req timestamps out.
663                          * Enable all events *and* general purpose message
664                          * timestamping
665                          */
666                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669                         break;
670
671                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672                         /* PTP v1, UDP, Sync packet */
673                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674                         /* take time stamp for SYNC messages only */
675                         ts_event_en = PTP_TCR_TSEVNTENA;
676
677                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679                         break;
680
681                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682                         /* PTP v1, UDP, Delay_req packet */
683                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684                         /* take time stamp for Delay_Req messages only */
685                         ts_master_en = PTP_TCR_TSMSTRENA;
686                         ts_event_en = PTP_TCR_TSEVNTENA;
687
688                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690                         break;
691
692                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693                         /* PTP v2, UDP, any kind of event packet */
694                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695                         ptp_v2 = PTP_TCR_TSVER2ENA;
696                         /* take time stamp for all event messages */
697                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698
699                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701                         break;
702
703                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704                         /* PTP v2, UDP, Sync packet */
705                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706                         ptp_v2 = PTP_TCR_TSVER2ENA;
707                         /* take time stamp for SYNC messages only */
708                         ts_event_en = PTP_TCR_TSEVNTENA;
709
710                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712                         break;
713
714                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715                         /* PTP v2, UDP, Delay_req packet */
716                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717                         ptp_v2 = PTP_TCR_TSVER2ENA;
718                         /* take time stamp for Delay_Req messages only */
719                         ts_master_en = PTP_TCR_TSMSTRENA;
720                         ts_event_en = PTP_TCR_TSEVNTENA;
721
722                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724                         break;
725
726                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
727                         /* PTP v2/802.AS1 any layer, any kind of event packet */
728                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729                         ptp_v2 = PTP_TCR_TSVER2ENA;
730                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731                         if (priv->synopsys_id < DWMAC_CORE_4_10)
732                                 ts_event_en = PTP_TCR_TSEVNTENA;
733                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735                         ptp_over_ethernet = PTP_TCR_TSIPENA;
736                         break;
737
738                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
739                         /* PTP v2/802.AS1, any layer, Sync packet */
740                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741                         ptp_v2 = PTP_TCR_TSVER2ENA;
742                         /* take time stamp for SYNC messages only */
743                         ts_event_en = PTP_TCR_TSEVNTENA;
744
745                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747                         ptp_over_ethernet = PTP_TCR_TSIPENA;
748                         break;
749
750                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751                         /* PTP v2/802.AS1, any layer, Delay_req packet */
752                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753                         ptp_v2 = PTP_TCR_TSVER2ENA;
754                         /* take time stamp for Delay_Req messages only */
755                         ts_master_en = PTP_TCR_TSMSTRENA;
756                         ts_event_en = PTP_TCR_TSEVNTENA;
757
758                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760                         ptp_over_ethernet = PTP_TCR_TSIPENA;
761                         break;
762
763                 case HWTSTAMP_FILTER_NTP_ALL:
764                 case HWTSTAMP_FILTER_ALL:
765                         /* time stamp any incoming packet */
766                         config.rx_filter = HWTSTAMP_FILTER_ALL;
767                         tstamp_all = PTP_TCR_TSENALL;
768                         break;
769
770                 default:
771                         return -ERANGE;
772                 }
773         } else {
774                 switch (config.rx_filter) {
775                 case HWTSTAMP_FILTER_NONE:
776                         config.rx_filter = HWTSTAMP_FILTER_NONE;
777                         break;
778                 default:
779                         /* PTP v1, UDP, any kind of event packet */
780                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781                         break;
782                 }
783         }
784         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786
787         priv->systime_flags = STMMAC_HWTS_ACTIVE;
788
789         if (priv->hwts_tx_en || priv->hwts_rx_en) {
790                 priv->systime_flags |= tstamp_all | ptp_v2 |
791                                        ptp_over_ethernet | ptp_over_ipv6_udp |
792                                        ptp_over_ipv4_udp | ts_event_en |
793                                        ts_master_en | snap_type_sel;
794         }
795
796         stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797
798         memcpy(&priv->tstamp_config, &config, sizeof(config));
799
800         return copy_to_user(ifr->ifr_data, &config,
801                             sizeof(config)) ? -EFAULT : 0;
802 }
803
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815         struct stmmac_priv *priv = netdev_priv(dev);
816         struct hwtstamp_config *config = &priv->tstamp_config;
817
818         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819                 return -EOPNOTSUPP;
820
821         return copy_to_user(ifr->ifr_data, config,
822                             sizeof(*config)) ? -EFAULT : 0;
823 }
824
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838         struct timespec64 now;
839         u32 sec_inc = 0;
840         u64 temp = 0;
841
842         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843                 return -EOPNOTSUPP;
844
845         stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846         priv->systime_flags = systime_flags;
847
848         /* program Sub Second Increment reg */
849         stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850                                            priv->plat->clk_ptp_rate,
851                                            xmac, &sec_inc);
852         temp = div_u64(1000000000ULL, sec_inc);
853
854         /* Store sub second increment for later use */
855         priv->sub_second_inc = sec_inc;
856
857         /* calculate default added value:
858          * formula is :
859          * addend = (2^32)/freq_div_ratio;
860          * where, freq_div_ratio = 1e9ns/sec_inc
861          */
862         temp = (u64)(temp << 32);
863         priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864         stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865
866         /* initialize system time */
867         ktime_get_real_ts64(&now);
868
869         /* lower 32 bits of tv_sec are safe until y2106 */
870         stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871
872         return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886         int ret;
887
888         if (priv->plat->ptp_clk_freq_config)
889                 priv->plat->ptp_clk_freq_config(priv);
890
891         ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892         if (ret)
893                 return ret;
894
895         priv->adv_ts = 0;
896         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897         if (xmac && priv->dma_cap.atime_stamp)
898                 priv->adv_ts = 1;
899         /* Dwmac 3.x core with extend_desc can support adv_ts */
900         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901                 priv->adv_ts = 1;
902
903         if (priv->dma_cap.time_stamp)
904                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905
906         if (priv->adv_ts)
907                 netdev_info(priv->dev,
908                             "IEEE 1588-2008 Advanced Timestamp supported\n");
909
910         priv->hwts_tx_en = 0;
911         priv->hwts_rx_en = 0;
912
913         if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914                 stmmac_hwtstamp_correct_latency(priv, priv);
915
916         return 0;
917 }
918
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921         clk_disable_unprepare(priv->plat->clk_ptp_ref);
922         stmmac_ptp_unregister(priv);
923 }
924
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933         u32 tx_cnt = priv->plat->tx_queues_to_use;
934
935         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936                         priv->pause, tx_cnt);
937 }
938
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940                                                  phy_interface_t interface)
941 {
942         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943
944         if (priv->hw->xpcs)
945                 return &priv->hw->xpcs->pcs;
946
947         if (priv->hw->lynx_pcs)
948                 return priv->hw->lynx_pcs;
949
950         return NULL;
951 }
952
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954                               const struct phylink_link_state *state)
955 {
956         /* Nothing to do, xpcs_config() handles everything */
957 }
958
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964         bool *hs_enable = &fpe_cfg->hs_enable;
965
966         if (is_up && *hs_enable) {
967                 stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968                                         MPACKET_VERIFY);
969         } else {
970                 *lo_state = FPE_STATE_OFF;
971                 *lp_state = FPE_STATE_OFF;
972         }
973 }
974
975 static void stmmac_mac_link_down(struct phylink_config *config,
976                                  unsigned int mode, phy_interface_t interface)
977 {
978         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979
980         stmmac_mac_set(priv, priv->ioaddr, false);
981         priv->eee_active = false;
982         priv->tx_lpi_enabled = false;
983         priv->eee_enabled = stmmac_eee_init(priv);
984         stmmac_set_eee_pls(priv, priv->hw, false);
985
986         if (priv->dma_cap.fpesel)
987                 stmmac_fpe_link_state_handle(priv, false);
988 }
989
990 static void stmmac_mac_link_up(struct phylink_config *config,
991                                struct phy_device *phy,
992                                unsigned int mode, phy_interface_t interface,
993                                int speed, int duplex,
994                                bool tx_pause, bool rx_pause)
995 {
996         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997         u32 old_ctrl, ctrl;
998
999         if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000             priv->plat->serdes_powerup)
1001                 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002
1003         old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004         ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005
1006         if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007                 switch (speed) {
1008                 case SPEED_10000:
1009                         ctrl |= priv->hw->link.xgmii.speed10000;
1010                         break;
1011                 case SPEED_5000:
1012                         ctrl |= priv->hw->link.xgmii.speed5000;
1013                         break;
1014                 case SPEED_2500:
1015                         ctrl |= priv->hw->link.xgmii.speed2500;
1016                         break;
1017                 default:
1018                         return;
1019                 }
1020         } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021                 switch (speed) {
1022                 case SPEED_100000:
1023                         ctrl |= priv->hw->link.xlgmii.speed100000;
1024                         break;
1025                 case SPEED_50000:
1026                         ctrl |= priv->hw->link.xlgmii.speed50000;
1027                         break;
1028                 case SPEED_40000:
1029                         ctrl |= priv->hw->link.xlgmii.speed40000;
1030                         break;
1031                 case SPEED_25000:
1032                         ctrl |= priv->hw->link.xlgmii.speed25000;
1033                         break;
1034                 case SPEED_10000:
1035                         ctrl |= priv->hw->link.xgmii.speed10000;
1036                         break;
1037                 case SPEED_2500:
1038                         ctrl |= priv->hw->link.speed2500;
1039                         break;
1040                 case SPEED_1000:
1041                         ctrl |= priv->hw->link.speed1000;
1042                         break;
1043                 default:
1044                         return;
1045                 }
1046         } else {
1047                 switch (speed) {
1048                 case SPEED_2500:
1049                         ctrl |= priv->hw->link.speed2500;
1050                         break;
1051                 case SPEED_1000:
1052                         ctrl |= priv->hw->link.speed1000;
1053                         break;
1054                 case SPEED_100:
1055                         ctrl |= priv->hw->link.speed100;
1056                         break;
1057                 case SPEED_10:
1058                         ctrl |= priv->hw->link.speed10;
1059                         break;
1060                 default:
1061                         return;
1062                 }
1063         }
1064
1065         priv->speed = speed;
1066
1067         if (priv->plat->fix_mac_speed)
1068                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069
1070         if (!duplex)
1071                 ctrl &= ~priv->hw->link.duplex;
1072         else
1073                 ctrl |= priv->hw->link.duplex;
1074
1075         /* Flow Control operation */
1076         if (rx_pause && tx_pause)
1077                 priv->flow_ctrl = FLOW_AUTO;
1078         else if (rx_pause && !tx_pause)
1079                 priv->flow_ctrl = FLOW_RX;
1080         else if (!rx_pause && tx_pause)
1081                 priv->flow_ctrl = FLOW_TX;
1082         else
1083                 priv->flow_ctrl = FLOW_OFF;
1084
1085         stmmac_mac_flow_ctrl(priv, duplex);
1086
1087         if (ctrl != old_ctrl)
1088                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089
1090         stmmac_mac_set(priv, priv->ioaddr, true);
1091         if (phy && priv->dma_cap.eee) {
1092                 priv->eee_active =
1093                         phy_init_eee(phy, !(priv->plat->flags &
1094                                 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095                 priv->eee_enabled = stmmac_eee_init(priv);
1096                 priv->tx_lpi_enabled = priv->eee_enabled;
1097                 stmmac_set_eee_pls(priv, priv->hw, true);
1098         }
1099
1100         if (priv->dma_cap.fpesel)
1101                 stmmac_fpe_link_state_handle(priv, true);
1102
1103         if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104                 stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108         .mac_select_pcs = stmmac_mac_select_pcs,
1109         .mac_config = stmmac_mac_config,
1110         .mac_link_down = stmmac_mac_link_down,
1111         .mac_link_up = stmmac_mac_link_up,
1112 };
1113
1114 /**
1115  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116  * @priv: driver private structure
1117  * Description: this is to verify if the HW supports the PCS.
1118  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119  * configured for the TBI, RTBI, or SGMII PHY interface.
1120  */
1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123         int interface = priv->plat->mac_interface;
1124
1125         if (priv->dma_cap.pcs) {
1126                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131                         priv->hw->pcs = STMMAC_PCS_RGMII;
1132                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134                         priv->hw->pcs = STMMAC_PCS_SGMII;
1135                 }
1136         }
1137 }
1138
1139 /**
1140  * stmmac_init_phy - PHY initialization
1141  * @dev: net device structure
1142  * Description: it initializes the driver's PHY state, and attaches the PHY
1143  * to the mac driver.
1144  *  Return value:
1145  *  0 on success
1146  */
1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149         struct stmmac_priv *priv = netdev_priv(dev);
1150         struct fwnode_handle *phy_fwnode;
1151         struct fwnode_handle *fwnode;
1152         int ret;
1153
1154         if (!phylink_expects_phy(priv->phylink))
1155                 return 0;
1156
1157         fwnode = priv->plat->port_node;
1158         if (!fwnode)
1159                 fwnode = dev_fwnode(priv->device);
1160
1161         if (fwnode)
1162                 phy_fwnode = fwnode_get_phy_node(fwnode);
1163         else
1164                 phy_fwnode = NULL;
1165
1166         /* Some DT bindings do not set-up the PHY handle. Let's try to
1167          * manually parse it
1168          */
1169         if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170                 int addr = priv->plat->phy_addr;
1171                 struct phy_device *phydev;
1172
1173                 if (addr < 0) {
1174                         netdev_err(priv->dev, "no phy found\n");
1175                         return -ENODEV;
1176                 }
1177
1178                 phydev = mdiobus_get_phy(priv->mii, addr);
1179                 if (!phydev) {
1180                         netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181                         return -ENODEV;
1182                 }
1183
1184                 ret = phylink_connect_phy(priv->phylink, phydev);
1185         } else {
1186                 fwnode_handle_put(phy_fwnode);
1187                 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188         }
1189
1190         if (!priv->plat->pmt) {
1191                 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192
1193                 phylink_ethtool_get_wol(priv->phylink, &wol);
1194                 device_set_wakeup_capable(priv->device, !!wol.supported);
1195                 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196         }
1197
1198         return ret;
1199 }
1200
1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1202 {
1203         /* Half-Duplex can only work with single tx queue */
1204         if (priv->plat->tx_queues_to_use > 1)
1205                 priv->phylink_config.mac_capabilities &=
1206                         ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1207         else
1208                 priv->phylink_config.mac_capabilities |=
1209                         (MAC_10HD | MAC_100HD | MAC_1000HD);
1210 }
1211
1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214         struct stmmac_mdio_bus_data *mdio_bus_data;
1215         int mode = priv->plat->phy_interface;
1216         struct fwnode_handle *fwnode;
1217         struct phylink *phylink;
1218         int max_speed;
1219
1220         priv->phylink_config.dev = &priv->dev->dev;
1221         priv->phylink_config.type = PHYLINK_NETDEV;
1222         priv->phylink_config.mac_managed_pm = true;
1223
1224         mdio_bus_data = priv->plat->mdio_bus_data;
1225         if (mdio_bus_data)
1226                 priv->phylink_config.ovr_an_inband =
1227                         mdio_bus_data->xpcs_an_inband;
1228
1229         /* Set the platform/firmware specified interface mode. Note, phylink
1230          * deals with the PHY interface mode, not the MAC interface mode.
1231          */
1232         __set_bit(mode, priv->phylink_config.supported_interfaces);
1233
1234         /* If we have an xpcs, it defines which PHY interfaces are supported. */
1235         if (priv->hw->xpcs)
1236                 xpcs_get_interfaces(priv->hw->xpcs,
1237                                     priv->phylink_config.supported_interfaces);
1238
1239         priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1240                                                 MAC_10FD | MAC_100FD |
1241                                                 MAC_1000FD;
1242
1243         stmmac_set_half_duplex(priv);
1244
1245         /* Get the MAC specific capabilities */
1246         stmmac_mac_phylink_get_caps(priv);
1247
1248         max_speed = priv->plat->max_speed;
1249         if (max_speed)
1250                 phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1251
1252         fwnode = priv->plat->port_node;
1253         if (!fwnode)
1254                 fwnode = dev_fwnode(priv->device);
1255
1256         phylink = phylink_create(&priv->phylink_config, fwnode,
1257                                  mode, &stmmac_phylink_mac_ops);
1258         if (IS_ERR(phylink))
1259                 return PTR_ERR(phylink);
1260
1261         priv->phylink = phylink;
1262         return 0;
1263 }
1264
1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1266                                     struct stmmac_dma_conf *dma_conf)
1267 {
1268         u32 rx_cnt = priv->plat->rx_queues_to_use;
1269         unsigned int desc_size;
1270         void *head_rx;
1271         u32 queue;
1272
1273         /* Display RX rings */
1274         for (queue = 0; queue < rx_cnt; queue++) {
1275                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1276
1277                 pr_info("\tRX Queue %u rings\n", queue);
1278
1279                 if (priv->extend_desc) {
1280                         head_rx = (void *)rx_q->dma_erx;
1281                         desc_size = sizeof(struct dma_extended_desc);
1282                 } else {
1283                         head_rx = (void *)rx_q->dma_rx;
1284                         desc_size = sizeof(struct dma_desc);
1285                 }
1286
1287                 /* Display RX ring */
1288                 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1289                                     rx_q->dma_rx_phy, desc_size);
1290         }
1291 }
1292
1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1294                                     struct stmmac_dma_conf *dma_conf)
1295 {
1296         u32 tx_cnt = priv->plat->tx_queues_to_use;
1297         unsigned int desc_size;
1298         void *head_tx;
1299         u32 queue;
1300
1301         /* Display TX rings */
1302         for (queue = 0; queue < tx_cnt; queue++) {
1303                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1304
1305                 pr_info("\tTX Queue %d rings\n", queue);
1306
1307                 if (priv->extend_desc) {
1308                         head_tx = (void *)tx_q->dma_etx;
1309                         desc_size = sizeof(struct dma_extended_desc);
1310                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1311                         head_tx = (void *)tx_q->dma_entx;
1312                         desc_size = sizeof(struct dma_edesc);
1313                 } else {
1314                         head_tx = (void *)tx_q->dma_tx;
1315                         desc_size = sizeof(struct dma_desc);
1316                 }
1317
1318                 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1319                                     tx_q->dma_tx_phy, desc_size);
1320         }
1321 }
1322
1323 static void stmmac_display_rings(struct stmmac_priv *priv,
1324                                  struct stmmac_dma_conf *dma_conf)
1325 {
1326         /* Display RX ring */
1327         stmmac_display_rx_rings(priv, dma_conf);
1328
1329         /* Display TX ring */
1330         stmmac_display_tx_rings(priv, dma_conf);
1331 }
1332
1333 static int stmmac_set_bfsize(int mtu, int bufsize)
1334 {
1335         int ret = bufsize;
1336
1337         if (mtu >= BUF_SIZE_8KiB)
1338                 ret = BUF_SIZE_16KiB;
1339         else if (mtu >= BUF_SIZE_4KiB)
1340                 ret = BUF_SIZE_8KiB;
1341         else if (mtu >= BUF_SIZE_2KiB)
1342                 ret = BUF_SIZE_4KiB;
1343         else if (mtu > DEFAULT_BUFSIZE)
1344                 ret = BUF_SIZE_2KiB;
1345         else
1346                 ret = DEFAULT_BUFSIZE;
1347
1348         return ret;
1349 }
1350
1351 /**
1352  * stmmac_clear_rx_descriptors - clear RX descriptors
1353  * @priv: driver private structure
1354  * @dma_conf: structure to take the dma data
1355  * @queue: RX queue index
1356  * Description: this function is called to clear the RX descriptors
1357  * in case of both basic and extended descriptors are used.
1358  */
1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1360                                         struct stmmac_dma_conf *dma_conf,
1361                                         u32 queue)
1362 {
1363         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1364         int i;
1365
1366         /* Clear the RX descriptors */
1367         for (i = 0; i < dma_conf->dma_rx_size; i++)
1368                 if (priv->extend_desc)
1369                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1370                                         priv->use_riwt, priv->mode,
1371                                         (i == dma_conf->dma_rx_size - 1),
1372                                         dma_conf->dma_buf_sz);
1373                 else
1374                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1375                                         priv->use_riwt, priv->mode,
1376                                         (i == dma_conf->dma_rx_size - 1),
1377                                         dma_conf->dma_buf_sz);
1378 }
1379
1380 /**
1381  * stmmac_clear_tx_descriptors - clear tx descriptors
1382  * @priv: driver private structure
1383  * @dma_conf: structure to take the dma data
1384  * @queue: TX queue index.
1385  * Description: this function is called to clear the TX descriptors
1386  * in case of both basic and extended descriptors are used.
1387  */
1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1389                                         struct stmmac_dma_conf *dma_conf,
1390                                         u32 queue)
1391 {
1392         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1393         int i;
1394
1395         /* Clear the TX descriptors */
1396         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1397                 int last = (i == (dma_conf->dma_tx_size - 1));
1398                 struct dma_desc *p;
1399
1400                 if (priv->extend_desc)
1401                         p = &tx_q->dma_etx[i].basic;
1402                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1403                         p = &tx_q->dma_entx[i].basic;
1404                 else
1405                         p = &tx_q->dma_tx[i];
1406
1407                 stmmac_init_tx_desc(priv, p, priv->mode, last);
1408         }
1409 }
1410
1411 /**
1412  * stmmac_clear_descriptors - clear descriptors
1413  * @priv: driver private structure
1414  * @dma_conf: structure to take the dma data
1415  * Description: this function is called to clear the TX and RX descriptors
1416  * in case of both basic and extended descriptors are used.
1417  */
1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1419                                      struct stmmac_dma_conf *dma_conf)
1420 {
1421         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1422         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1423         u32 queue;
1424
1425         /* Clear the RX descriptors */
1426         for (queue = 0; queue < rx_queue_cnt; queue++)
1427                 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1428
1429         /* Clear the TX descriptors */
1430         for (queue = 0; queue < tx_queue_cnt; queue++)
1431                 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1432 }
1433
1434 /**
1435  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436  * @priv: driver private structure
1437  * @dma_conf: structure to take the dma data
1438  * @p: descriptor pointer
1439  * @i: descriptor index
1440  * @flags: gfp flag
1441  * @queue: RX queue index
1442  * Description: this function is called to allocate a receive buffer, perform
1443  * the DMA mapping and init the descriptor.
1444  */
1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1446                                   struct stmmac_dma_conf *dma_conf,
1447                                   struct dma_desc *p,
1448                                   int i, gfp_t flags, u32 queue)
1449 {
1450         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1452         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1453
1454         if (priv->dma_cap.host_dma_width <= 32)
1455                 gfp |= GFP_DMA32;
1456
1457         if (!buf->page) {
1458                 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459                 if (!buf->page)
1460                         return -ENOMEM;
1461                 buf->page_offset = stmmac_rx_offset(priv);
1462         }
1463
1464         if (priv->sph && !buf->sec_page) {
1465                 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1466                 if (!buf->sec_page)
1467                         return -ENOMEM;
1468
1469                 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471         } else {
1472                 buf->sec_page = NULL;
1473                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474         }
1475
1476         buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477
1478         stmmac_set_desc_addr(priv, p, buf->addr);
1479         if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1480                 stmmac_init_desc3(priv, p);
1481
1482         return 0;
1483 }
1484
1485 /**
1486  * stmmac_free_rx_buffer - free RX dma buffers
1487  * @priv: private structure
1488  * @rx_q: RX queue
1489  * @i: buffer index.
1490  */
1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1492                                   struct stmmac_rx_queue *rx_q,
1493                                   int i)
1494 {
1495         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1496
1497         if (buf->page)
1498                 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1499         buf->page = NULL;
1500
1501         if (buf->sec_page)
1502                 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1503         buf->sec_page = NULL;
1504 }
1505
1506 /**
1507  * stmmac_free_tx_buffer - free RX dma buffers
1508  * @priv: private structure
1509  * @dma_conf: structure to take the dma data
1510  * @queue: RX queue index
1511  * @i: buffer index.
1512  */
1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1514                                   struct stmmac_dma_conf *dma_conf,
1515                                   u32 queue, int i)
1516 {
1517         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1518
1519         if (tx_q->tx_skbuff_dma[i].buf &&
1520             tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1522                         dma_unmap_page(priv->device,
1523                                        tx_q->tx_skbuff_dma[i].buf,
1524                                        tx_q->tx_skbuff_dma[i].len,
1525                                        DMA_TO_DEVICE);
1526                 else
1527                         dma_unmap_single(priv->device,
1528                                          tx_q->tx_skbuff_dma[i].buf,
1529                                          tx_q->tx_skbuff_dma[i].len,
1530                                          DMA_TO_DEVICE);
1531         }
1532
1533         if (tx_q->xdpf[i] &&
1534             (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535              tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536                 xdp_return_frame(tx_q->xdpf[i]);
1537                 tx_q->xdpf[i] = NULL;
1538         }
1539
1540         if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541                 tx_q->xsk_frames_done++;
1542
1543         if (tx_q->tx_skbuff[i] &&
1544             tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546                 tx_q->tx_skbuff[i] = NULL;
1547         }
1548
1549         tx_q->tx_skbuff_dma[i].buf = 0;
1550         tx_q->tx_skbuff_dma[i].map_as_page = false;
1551 }
1552
1553 /**
1554  * dma_free_rx_skbufs - free RX dma buffers
1555  * @priv: private structure
1556  * @dma_conf: structure to take the dma data
1557  * @queue: RX queue index
1558  */
1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1560                                struct stmmac_dma_conf *dma_conf,
1561                                u32 queue)
1562 {
1563         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564         int i;
1565
1566         for (i = 0; i < dma_conf->dma_rx_size; i++)
1567                 stmmac_free_rx_buffer(priv, rx_q, i);
1568 }
1569
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1571                                    struct stmmac_dma_conf *dma_conf,
1572                                    u32 queue, gfp_t flags)
1573 {
1574         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1575         int i;
1576
1577         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1578                 struct dma_desc *p;
1579                 int ret;
1580
1581                 if (priv->extend_desc)
1582                         p = &((rx_q->dma_erx + i)->basic);
1583                 else
1584                         p = rx_q->dma_rx + i;
1585
1586                 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1587                                              queue);
1588                 if (ret)
1589                         return ret;
1590
1591                 rx_q->buf_alloc_num++;
1592         }
1593
1594         return 0;
1595 }
1596
1597 /**
1598  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599  * @priv: private structure
1600  * @dma_conf: structure to take the dma data
1601  * @queue: RX queue index
1602  */
1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1604                                 struct stmmac_dma_conf *dma_conf,
1605                                 u32 queue)
1606 {
1607         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608         int i;
1609
1610         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1611                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1612
1613                 if (!buf->xdp)
1614                         continue;
1615
1616                 xsk_buff_free(buf->xdp);
1617                 buf->xdp = NULL;
1618         }
1619 }
1620
1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1622                                       struct stmmac_dma_conf *dma_conf,
1623                                       u32 queue)
1624 {
1625         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626         int i;
1627
1628         /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629          * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630          * use this macro to make sure no size violations.
1631          */
1632         XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1633
1634         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1635                 struct stmmac_rx_buffer *buf;
1636                 dma_addr_t dma_addr;
1637                 struct dma_desc *p;
1638
1639                 if (priv->extend_desc)
1640                         p = (struct dma_desc *)(rx_q->dma_erx + i);
1641                 else
1642                         p = rx_q->dma_rx + i;
1643
1644                 buf = &rx_q->buf_pool[i];
1645
1646                 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1647                 if (!buf->xdp)
1648                         return -ENOMEM;
1649
1650                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1651                 stmmac_set_desc_addr(priv, p, dma_addr);
1652                 rx_q->buf_alloc_num++;
1653         }
1654
1655         return 0;
1656 }
1657
1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1659 {
1660         if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1661                 return NULL;
1662
1663         return xsk_get_pool_from_qid(priv->dev, queue);
1664 }
1665
1666 /**
1667  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668  * @priv: driver private structure
1669  * @dma_conf: structure to take the dma data
1670  * @queue: RX queue index
1671  * @flags: gfp flag.
1672  * Description: this function initializes the DMA RX descriptors
1673  * and allocates the socket buffers. It supports the chained and ring
1674  * modes.
1675  */
1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1677                                     struct stmmac_dma_conf *dma_conf,
1678                                     u32 queue, gfp_t flags)
1679 {
1680         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1681         int ret;
1682
1683         netif_dbg(priv, probe, priv->dev,
1684                   "(%s) dma_rx_phy=0x%08x\n", __func__,
1685                   (u32)rx_q->dma_rx_phy);
1686
1687         stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1688
1689         xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1690
1691         rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1692
1693         if (rx_q->xsk_pool) {
1694                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695                                                    MEM_TYPE_XSK_BUFF_POOL,
1696                                                    NULL));
1697                 netdev_info(priv->dev,
1698                             "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699                             rx_q->queue_index);
1700                 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1701         } else {
1702                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1703                                                    MEM_TYPE_PAGE_POOL,
1704                                                    rx_q->page_pool));
1705                 netdev_info(priv->dev,
1706                             "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1707                             rx_q->queue_index);
1708         }
1709
1710         if (rx_q->xsk_pool) {
1711                 /* RX XDP ZC buffer pool may not be populated, e.g.
1712                  * xdpsock TX-only.
1713                  */
1714                 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1715         } else {
1716                 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1717                 if (ret < 0)
1718                         return -ENOMEM;
1719         }
1720
1721         /* Setup the chained descriptor addresses */
1722         if (priv->mode == STMMAC_CHAIN_MODE) {
1723                 if (priv->extend_desc)
1724                         stmmac_mode_init(priv, rx_q->dma_erx,
1725                                          rx_q->dma_rx_phy,
1726                                          dma_conf->dma_rx_size, 1);
1727                 else
1728                         stmmac_mode_init(priv, rx_q->dma_rx,
1729                                          rx_q->dma_rx_phy,
1730                                          dma_conf->dma_rx_size, 0);
1731         }
1732
1733         return 0;
1734 }
1735
1736 static int init_dma_rx_desc_rings(struct net_device *dev,
1737                                   struct stmmac_dma_conf *dma_conf,
1738                                   gfp_t flags)
1739 {
1740         struct stmmac_priv *priv = netdev_priv(dev);
1741         u32 rx_count = priv->plat->rx_queues_to_use;
1742         int queue;
1743         int ret;
1744
1745         /* RX INITIALIZATION */
1746         netif_dbg(priv, probe, priv->dev,
1747                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748
1749         for (queue = 0; queue < rx_count; queue++) {
1750                 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1751                 if (ret)
1752                         goto err_init_rx_buffers;
1753         }
1754
1755         return 0;
1756
1757 err_init_rx_buffers:
1758         while (queue >= 0) {
1759                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1760
1761                 if (rx_q->xsk_pool)
1762                         dma_free_rx_xskbufs(priv, dma_conf, queue);
1763                 else
1764                         dma_free_rx_skbufs(priv, dma_conf, queue);
1765
1766                 rx_q->buf_alloc_num = 0;
1767                 rx_q->xsk_pool = NULL;
1768
1769                 queue--;
1770         }
1771
1772         return ret;
1773 }
1774
1775 /**
1776  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777  * @priv: driver private structure
1778  * @dma_conf: structure to take the dma data
1779  * @queue: TX queue index
1780  * Description: this function initializes the DMA TX descriptors
1781  * and allocates the socket buffers. It supports the chained and ring
1782  * modes.
1783  */
1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1785                                     struct stmmac_dma_conf *dma_conf,
1786                                     u32 queue)
1787 {
1788         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1789         int i;
1790
1791         netif_dbg(priv, probe, priv->dev,
1792                   "(%s) dma_tx_phy=0x%08x\n", __func__,
1793                   (u32)tx_q->dma_tx_phy);
1794
1795         /* Setup the chained descriptor addresses */
1796         if (priv->mode == STMMAC_CHAIN_MODE) {
1797                 if (priv->extend_desc)
1798                         stmmac_mode_init(priv, tx_q->dma_etx,
1799                                          tx_q->dma_tx_phy,
1800                                          dma_conf->dma_tx_size, 1);
1801                 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1802                         stmmac_mode_init(priv, tx_q->dma_tx,
1803                                          tx_q->dma_tx_phy,
1804                                          dma_conf->dma_tx_size, 0);
1805         }
1806
1807         tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1808
1809         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1810                 struct dma_desc *p;
1811
1812                 if (priv->extend_desc)
1813                         p = &((tx_q->dma_etx + i)->basic);
1814                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1815                         p = &((tx_q->dma_entx + i)->basic);
1816                 else
1817                         p = tx_q->dma_tx + i;
1818
1819                 stmmac_clear_desc(priv, p);
1820
1821                 tx_q->tx_skbuff_dma[i].buf = 0;
1822                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1823                 tx_q->tx_skbuff_dma[i].len = 0;
1824                 tx_q->tx_skbuff_dma[i].last_segment = false;
1825                 tx_q->tx_skbuff[i] = NULL;
1826         }
1827
1828         return 0;
1829 }
1830
1831 static int init_dma_tx_desc_rings(struct net_device *dev,
1832                                   struct stmmac_dma_conf *dma_conf)
1833 {
1834         struct stmmac_priv *priv = netdev_priv(dev);
1835         u32 tx_queue_cnt;
1836         u32 queue;
1837
1838         tx_queue_cnt = priv->plat->tx_queues_to_use;
1839
1840         for (queue = 0; queue < tx_queue_cnt; queue++)
1841                 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1842
1843         return 0;
1844 }
1845
1846 /**
1847  * init_dma_desc_rings - init the RX/TX descriptor rings
1848  * @dev: net device structure
1849  * @dma_conf: structure to take the dma data
1850  * @flags: gfp flag.
1851  * Description: this function initializes the DMA RX/TX descriptors
1852  * and allocates the socket buffers. It supports the chained and ring
1853  * modes.
1854  */
1855 static int init_dma_desc_rings(struct net_device *dev,
1856                                struct stmmac_dma_conf *dma_conf,
1857                                gfp_t flags)
1858 {
1859         struct stmmac_priv *priv = netdev_priv(dev);
1860         int ret;
1861
1862         ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1863         if (ret)
1864                 return ret;
1865
1866         ret = init_dma_tx_desc_rings(dev, dma_conf);
1867
1868         stmmac_clear_descriptors(priv, dma_conf);
1869
1870         if (netif_msg_hw(priv))
1871                 stmmac_display_rings(priv, dma_conf);
1872
1873         return ret;
1874 }
1875
1876 /**
1877  * dma_free_tx_skbufs - free TX dma buffers
1878  * @priv: private structure
1879  * @dma_conf: structure to take the dma data
1880  * @queue: TX queue index
1881  */
1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1883                                struct stmmac_dma_conf *dma_conf,
1884                                u32 queue)
1885 {
1886         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1887         int i;
1888
1889         tx_q->xsk_frames_done = 0;
1890
1891         for (i = 0; i < dma_conf->dma_tx_size; i++)
1892                 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893
1894         if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1895                 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1896                 tx_q->xsk_frames_done = 0;
1897                 tx_q->xsk_pool = NULL;
1898         }
1899 }
1900
1901 /**
1902  * stmmac_free_tx_skbufs - free TX skb buffers
1903  * @priv: private structure
1904  */
1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1906 {
1907         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1908         u32 queue;
1909
1910         for (queue = 0; queue < tx_queue_cnt; queue++)
1911                 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1912 }
1913
1914 /**
1915  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916  * @priv: private structure
1917  * @dma_conf: structure to take the dma data
1918  * @queue: RX queue index
1919  */
1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921                                          struct stmmac_dma_conf *dma_conf,
1922                                          u32 queue)
1923 {
1924         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1925
1926         /* Release the DMA RX socket buffers */
1927         if (rx_q->xsk_pool)
1928                 dma_free_rx_xskbufs(priv, dma_conf, queue);
1929         else
1930                 dma_free_rx_skbufs(priv, dma_conf, queue);
1931
1932         rx_q->buf_alloc_num = 0;
1933         rx_q->xsk_pool = NULL;
1934
1935         /* Free DMA regions of consistent memory previously allocated */
1936         if (!priv->extend_desc)
1937                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1938                                   sizeof(struct dma_desc),
1939                                   rx_q->dma_rx, rx_q->dma_rx_phy);
1940         else
1941                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1942                                   sizeof(struct dma_extended_desc),
1943                                   rx_q->dma_erx, rx_q->dma_rx_phy);
1944
1945         if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1946                 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1947
1948         kfree(rx_q->buf_pool);
1949         if (rx_q->page_pool)
1950                 page_pool_destroy(rx_q->page_pool);
1951 }
1952
1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1954                                        struct stmmac_dma_conf *dma_conf)
1955 {
1956         u32 rx_count = priv->plat->rx_queues_to_use;
1957         u32 queue;
1958
1959         /* Free RX queue resources */
1960         for (queue = 0; queue < rx_count; queue++)
1961                 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1962 }
1963
1964 /**
1965  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966  * @priv: private structure
1967  * @dma_conf: structure to take the dma data
1968  * @queue: TX queue index
1969  */
1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971                                          struct stmmac_dma_conf *dma_conf,
1972                                          u32 queue)
1973 {
1974         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975         size_t size;
1976         void *addr;
1977
1978         /* Release the DMA TX socket buffers */
1979         dma_free_tx_skbufs(priv, dma_conf, queue);
1980
1981         if (priv->extend_desc) {
1982                 size = sizeof(struct dma_extended_desc);
1983                 addr = tx_q->dma_etx;
1984         } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1985                 size = sizeof(struct dma_edesc);
1986                 addr = tx_q->dma_entx;
1987         } else {
1988                 size = sizeof(struct dma_desc);
1989                 addr = tx_q->dma_tx;
1990         }
1991
1992         size *= dma_conf->dma_tx_size;
1993
1994         dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1995
1996         kfree(tx_q->tx_skbuff_dma);
1997         kfree(tx_q->tx_skbuff);
1998 }
1999
2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2001                                        struct stmmac_dma_conf *dma_conf)
2002 {
2003         u32 tx_count = priv->plat->tx_queues_to_use;
2004         u32 queue;
2005
2006         /* Free TX queue resources */
2007         for (queue = 0; queue < tx_count; queue++)
2008                 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2009 }
2010
2011 /**
2012  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013  * @priv: private structure
2014  * @dma_conf: structure to take the dma data
2015  * @queue: RX queue index
2016  * Description: according to which descriptor can be used (extend or basic)
2017  * this function allocates the resources for TX and RX paths. In case of
2018  * reception, for example, it pre-allocated the RX socket buffer in order to
2019  * allow zero-copy mechanism.
2020  */
2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2022                                          struct stmmac_dma_conf *dma_conf,
2023                                          u32 queue)
2024 {
2025         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2026         struct stmmac_channel *ch = &priv->channel[queue];
2027         bool xdp_prog = stmmac_xdp_is_enabled(priv);
2028         struct page_pool_params pp_params = { 0 };
2029         unsigned int num_pages;
2030         unsigned int napi_id;
2031         int ret;
2032
2033         rx_q->queue_index = queue;
2034         rx_q->priv_data = priv;
2035
2036         pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2037         pp_params.pool_size = dma_conf->dma_rx_size;
2038         num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2039         pp_params.order = ilog2(num_pages);
2040         pp_params.nid = dev_to_node(priv->device);
2041         pp_params.dev = priv->device;
2042         pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2043         pp_params.offset = stmmac_rx_offset(priv);
2044         pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2045
2046         rx_q->page_pool = page_pool_create(&pp_params);
2047         if (IS_ERR(rx_q->page_pool)) {
2048                 ret = PTR_ERR(rx_q->page_pool);
2049                 rx_q->page_pool = NULL;
2050                 return ret;
2051         }
2052
2053         rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2054                                  sizeof(*rx_q->buf_pool),
2055                                  GFP_KERNEL);
2056         if (!rx_q->buf_pool)
2057                 return -ENOMEM;
2058
2059         if (priv->extend_desc) {
2060                 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2061                                                    dma_conf->dma_rx_size *
2062                                                    sizeof(struct dma_extended_desc),
2063                                                    &rx_q->dma_rx_phy,
2064                                                    GFP_KERNEL);
2065                 if (!rx_q->dma_erx)
2066                         return -ENOMEM;
2067
2068         } else {
2069                 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2070                                                   dma_conf->dma_rx_size *
2071                                                   sizeof(struct dma_desc),
2072                                                   &rx_q->dma_rx_phy,
2073                                                   GFP_KERNEL);
2074                 if (!rx_q->dma_rx)
2075                         return -ENOMEM;
2076         }
2077
2078         if (stmmac_xdp_is_enabled(priv) &&
2079             test_bit(queue, priv->af_xdp_zc_qps))
2080                 napi_id = ch->rxtx_napi.napi_id;
2081         else
2082                 napi_id = ch->rx_napi.napi_id;
2083
2084         ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2085                                rx_q->queue_index,
2086                                napi_id);
2087         if (ret) {
2088                 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2089                 return -EINVAL;
2090         }
2091
2092         return 0;
2093 }
2094
2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2096                                        struct stmmac_dma_conf *dma_conf)
2097 {
2098         u32 rx_count = priv->plat->rx_queues_to_use;
2099         u32 queue;
2100         int ret;
2101
2102         /* RX queues buffers and DMA */
2103         for (queue = 0; queue < rx_count; queue++) {
2104                 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2105                 if (ret)
2106                         goto err_dma;
2107         }
2108
2109         return 0;
2110
2111 err_dma:
2112         free_dma_rx_desc_resources(priv, dma_conf);
2113
2114         return ret;
2115 }
2116
2117 /**
2118  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119  * @priv: private structure
2120  * @dma_conf: structure to take the dma data
2121  * @queue: TX queue index
2122  * Description: according to which descriptor can be used (extend or basic)
2123  * this function allocates the resources for TX and RX paths. In case of
2124  * reception, for example, it pre-allocated the RX socket buffer in order to
2125  * allow zero-copy mechanism.
2126  */
2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2128                                          struct stmmac_dma_conf *dma_conf,
2129                                          u32 queue)
2130 {
2131         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2132         size_t size;
2133         void *addr;
2134
2135         tx_q->queue_index = queue;
2136         tx_q->priv_data = priv;
2137
2138         tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2139                                       sizeof(*tx_q->tx_skbuff_dma),
2140                                       GFP_KERNEL);
2141         if (!tx_q->tx_skbuff_dma)
2142                 return -ENOMEM;
2143
2144         tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2145                                   sizeof(struct sk_buff *),
2146                                   GFP_KERNEL);
2147         if (!tx_q->tx_skbuff)
2148                 return -ENOMEM;
2149
2150         if (priv->extend_desc)
2151                 size = sizeof(struct dma_extended_desc);
2152         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153                 size = sizeof(struct dma_edesc);
2154         else
2155                 size = sizeof(struct dma_desc);
2156
2157         size *= dma_conf->dma_tx_size;
2158
2159         addr = dma_alloc_coherent(priv->device, size,
2160                                   &tx_q->dma_tx_phy, GFP_KERNEL);
2161         if (!addr)
2162                 return -ENOMEM;
2163
2164         if (priv->extend_desc)
2165                 tx_q->dma_etx = addr;
2166         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2167                 tx_q->dma_entx = addr;
2168         else
2169                 tx_q->dma_tx = addr;
2170
2171         return 0;
2172 }
2173
2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2175                                        struct stmmac_dma_conf *dma_conf)
2176 {
2177         u32 tx_count = priv->plat->tx_queues_to_use;
2178         u32 queue;
2179         int ret;
2180
2181         /* TX queues buffers and DMA */
2182         for (queue = 0; queue < tx_count; queue++) {
2183                 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2184                 if (ret)
2185                         goto err_dma;
2186         }
2187
2188         return 0;
2189
2190 err_dma:
2191         free_dma_tx_desc_resources(priv, dma_conf);
2192         return ret;
2193 }
2194
2195 /**
2196  * alloc_dma_desc_resources - alloc TX/RX resources.
2197  * @priv: private structure
2198  * @dma_conf: structure to take the dma data
2199  * Description: according to which descriptor can be used (extend or basic)
2200  * this function allocates the resources for TX and RX paths. In case of
2201  * reception, for example, it pre-allocated the RX socket buffer in order to
2202  * allow zero-copy mechanism.
2203  */
2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2205                                     struct stmmac_dma_conf *dma_conf)
2206 {
2207         /* RX Allocation */
2208         int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2209
2210         if (ret)
2211                 return ret;
2212
2213         ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2214
2215         return ret;
2216 }
2217
2218 /**
2219  * free_dma_desc_resources - free dma desc resources
2220  * @priv: private structure
2221  * @dma_conf: structure to take the dma data
2222  */
2223 static void free_dma_desc_resources(struct stmmac_priv *priv,
2224                                     struct stmmac_dma_conf *dma_conf)
2225 {
2226         /* Release the DMA TX socket buffers */
2227         free_dma_tx_desc_resources(priv, dma_conf);
2228
2229         /* Release the DMA RX socket buffers later
2230          * to ensure all pending XDP_TX buffers are returned.
2231          */
2232         free_dma_rx_desc_resources(priv, dma_conf);
2233 }
2234
2235 /**
2236  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237  *  @priv: driver private structure
2238  *  Description: It is used for enabling the rx queues in the MAC
2239  */
2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2241 {
2242         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2243         int queue;
2244         u8 mode;
2245
2246         for (queue = 0; queue < rx_queues_count; queue++) {
2247                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2248                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2249         }
2250 }
2251
2252 /**
2253  * stmmac_start_rx_dma - start RX DMA channel
2254  * @priv: driver private structure
2255  * @chan: RX channel index
2256  * Description:
2257  * This starts a RX DMA channel
2258  */
2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2262         stmmac_start_rx(priv, priv->ioaddr, chan);
2263 }
2264
2265 /**
2266  * stmmac_start_tx_dma - start TX DMA channel
2267  * @priv: driver private structure
2268  * @chan: TX channel index
2269  * Description:
2270  * This starts a TX DMA channel
2271  */
2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2275         stmmac_start_tx(priv, priv->ioaddr, chan);
2276 }
2277
2278 /**
2279  * stmmac_stop_rx_dma - stop RX DMA channel
2280  * @priv: driver private structure
2281  * @chan: RX channel index
2282  * Description:
2283  * This stops a RX DMA channel
2284  */
2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2288         stmmac_stop_rx(priv, priv->ioaddr, chan);
2289 }
2290
2291 /**
2292  * stmmac_stop_tx_dma - stop TX DMA channel
2293  * @priv: driver private structure
2294  * @chan: TX channel index
2295  * Description:
2296  * This stops a TX DMA channel
2297  */
2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2301         stmmac_stop_tx(priv, priv->ioaddr, chan);
2302 }
2303
2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2305 {
2306         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2309         u32 chan;
2310
2311         for (chan = 0; chan < dma_csr_ch; chan++) {
2312                 struct stmmac_channel *ch = &priv->channel[chan];
2313                 unsigned long flags;
2314
2315                 spin_lock_irqsave(&ch->lock, flags);
2316                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2317                 spin_unlock_irqrestore(&ch->lock, flags);
2318         }
2319 }
2320
2321 /**
2322  * stmmac_start_all_dma - start all RX and TX DMA channels
2323  * @priv: driver private structure
2324  * Description:
2325  * This starts all the RX and TX DMA channels
2326  */
2327 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2328 {
2329         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331         u32 chan = 0;
2332
2333         for (chan = 0; chan < rx_channels_count; chan++)
2334                 stmmac_start_rx_dma(priv, chan);
2335
2336         for (chan = 0; chan < tx_channels_count; chan++)
2337                 stmmac_start_tx_dma(priv, chan);
2338 }
2339
2340 /**
2341  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342  * @priv: driver private structure
2343  * Description:
2344  * This stops the RX and TX DMA channels
2345  */
2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2347 {
2348         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350         u32 chan = 0;
2351
2352         for (chan = 0; chan < rx_channels_count; chan++)
2353                 stmmac_stop_rx_dma(priv, chan);
2354
2355         for (chan = 0; chan < tx_channels_count; chan++)
2356                 stmmac_stop_tx_dma(priv, chan);
2357 }
2358
2359 /**
2360  *  stmmac_dma_operation_mode - HW DMA operation mode
2361  *  @priv: driver private structure
2362  *  Description: it is used for configuring the DMA operation mode register in
2363  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364  */
2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2366 {
2367         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2368         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2369         int rxfifosz = priv->plat->rx_fifo_size;
2370         int txfifosz = priv->plat->tx_fifo_size;
2371         u32 txmode = 0;
2372         u32 rxmode = 0;
2373         u32 chan = 0;
2374         u8 qmode = 0;
2375
2376         if (rxfifosz == 0)
2377                 rxfifosz = priv->dma_cap.rx_fifo_size;
2378         if (txfifosz == 0)
2379                 txfifosz = priv->dma_cap.tx_fifo_size;
2380
2381         /* Adjust for real per queue fifo size */
2382         rxfifosz /= rx_channels_count;
2383         txfifosz /= tx_channels_count;
2384
2385         if (priv->plat->force_thresh_dma_mode) {
2386                 txmode = tc;
2387                 rxmode = tc;
2388         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2389                 /*
2390                  * In case of GMAC, SF mode can be enabled
2391                  * to perform the TX COE in HW. This depends on:
2392                  * 1) TX COE if actually supported
2393                  * 2) There is no bugged Jumbo frame support
2394                  *    that needs to not insert csum in the TDES.
2395                  */
2396                 txmode = SF_DMA_MODE;
2397                 rxmode = SF_DMA_MODE;
2398                 priv->xstats.threshold = SF_DMA_MODE;
2399         } else {
2400                 txmode = tc;
2401                 rxmode = SF_DMA_MODE;
2402         }
2403
2404         /* configure all channels */
2405         for (chan = 0; chan < rx_channels_count; chan++) {
2406                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2407                 u32 buf_size;
2408
2409                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2410
2411                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2412                                 rxfifosz, qmode);
2413
2414                 if (rx_q->xsk_pool) {
2415                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2416                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2417                                               buf_size,
2418                                               chan);
2419                 } else {
2420                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421                                               priv->dma_conf.dma_buf_sz,
2422                                               chan);
2423                 }
2424         }
2425
2426         for (chan = 0; chan < tx_channels_count; chan++) {
2427                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2428
2429                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2430                                 txfifosz, qmode);
2431         }
2432 }
2433
2434 static void stmmac_xsk_request_timestamp(void *_priv)
2435 {
2436         struct stmmac_metadata_request *meta_req = _priv;
2437
2438         stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2439         *meta_req->set_ic = true;
2440 }
2441
2442 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2443 {
2444         struct stmmac_xsk_tx_complete *tx_compl = _priv;
2445         struct stmmac_priv *priv = tx_compl->priv;
2446         struct dma_desc *desc = tx_compl->desc;
2447         bool found = false;
2448         u64 ns = 0;
2449
2450         if (!priv->hwts_tx_en)
2451                 return 0;
2452
2453         /* check tx tstamp status */
2454         if (stmmac_get_tx_timestamp_status(priv, desc)) {
2455                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2456                 found = true;
2457         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2458                 found = true;
2459         }
2460
2461         if (found) {
2462                 ns -= priv->plat->cdc_error_adj;
2463                 return ns_to_ktime(ns);
2464         }
2465
2466         return 0;
2467 }
2468
2469 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2470         .tmo_request_timestamp          = stmmac_xsk_request_timestamp,
2471         .tmo_fill_timestamp             = stmmac_xsk_fill_timestamp,
2472 };
2473
2474 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2475 {
2476         struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2477         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2478         struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2479         struct xsk_buff_pool *pool = tx_q->xsk_pool;
2480         unsigned int entry = tx_q->cur_tx;
2481         struct dma_desc *tx_desc = NULL;
2482         struct xdp_desc xdp_desc;
2483         bool work_done = true;
2484         u32 tx_set_ic_bit = 0;
2485         unsigned long flags;
2486
2487         /* Avoids TX time-out as we are sharing with slow path */
2488         txq_trans_cond_update(nq);
2489
2490         budget = min(budget, stmmac_tx_avail(priv, queue));
2491
2492         while (budget-- > 0) {
2493                 struct stmmac_metadata_request meta_req;
2494                 struct xsk_tx_metadata *meta = NULL;
2495                 dma_addr_t dma_addr;
2496                 bool set_ic;
2497
2498                 /* We are sharing with slow path and stop XSK TX desc submission when
2499                  * available TX ring is less than threshold.
2500                  */
2501                 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2502                     !netif_carrier_ok(priv->dev)) {
2503                         work_done = false;
2504                         break;
2505                 }
2506
2507                 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2508                         break;
2509
2510                 if (likely(priv->extend_desc))
2511                         tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2512                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2513                         tx_desc = &tx_q->dma_entx[entry].basic;
2514                 else
2515                         tx_desc = tx_q->dma_tx + entry;
2516
2517                 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2518                 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2519                 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2520
2521                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2522
2523                 /* To return XDP buffer to XSK pool, we simple call
2524                  * xsk_tx_completed(), so we don't need to fill up
2525                  * 'buf' and 'xdpf'.
2526                  */
2527                 tx_q->tx_skbuff_dma[entry].buf = 0;
2528                 tx_q->xdpf[entry] = NULL;
2529
2530                 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2531                 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2532                 tx_q->tx_skbuff_dma[entry].last_segment = true;
2533                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2534
2535                 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2536
2537                 tx_q->tx_count_frames++;
2538
2539                 if (!priv->tx_coal_frames[queue])
2540                         set_ic = false;
2541                 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2542                         set_ic = true;
2543                 else
2544                         set_ic = false;
2545
2546                 meta_req.priv = priv;
2547                 meta_req.tx_desc = tx_desc;
2548                 meta_req.set_ic = &set_ic;
2549                 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2550                                         &meta_req);
2551                 if (set_ic) {
2552                         tx_q->tx_count_frames = 0;
2553                         stmmac_set_tx_ic(priv, tx_desc);
2554                         tx_set_ic_bit++;
2555                 }
2556
2557                 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2558                                        true, priv->mode, true, true,
2559                                        xdp_desc.len);
2560
2561                 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2562
2563                 xsk_tx_metadata_to_compl(meta,
2564                                          &tx_q->tx_skbuff_dma[entry].xsk_meta);
2565
2566                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2567                 entry = tx_q->cur_tx;
2568         }
2569         flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2570         txq_stats->tx_set_ic_bit += tx_set_ic_bit;
2571         u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2572
2573         if (tx_desc) {
2574                 stmmac_flush_tx_descriptors(priv, queue);
2575                 xsk_tx_release(pool);
2576         }
2577
2578         /* Return true if all of the 3 conditions are met
2579          *  a) TX Budget is still available
2580          *  b) work_done = true when XSK TX desc peek is empty (no more
2581          *     pending XSK TX for transmission)
2582          */
2583         return !!budget && work_done;
2584 }
2585
2586 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2587 {
2588         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2589                 tc += 64;
2590
2591                 if (priv->plat->force_thresh_dma_mode)
2592                         stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2593                 else
2594                         stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2595                                                       chan);
2596
2597                 priv->xstats.threshold = tc;
2598         }
2599 }
2600
2601 /**
2602  * stmmac_tx_clean - to manage the transmission completion
2603  * @priv: driver private structure
2604  * @budget: napi budget limiting this functions packet handling
2605  * @queue: TX queue index
2606  * @pending_packets: signal to arm the TX coal timer
2607  * Description: it reclaims the transmit resources after transmission completes.
2608  * If some packets still needs to be handled, due to TX coalesce, set
2609  * pending_packets to true to make NAPI arm the TX coal timer.
2610  */
2611 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2612                            bool *pending_packets)
2613 {
2614         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2615         struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2616         unsigned int bytes_compl = 0, pkts_compl = 0;
2617         unsigned int entry, xmits = 0, count = 0;
2618         u32 tx_packets = 0, tx_errors = 0;
2619         unsigned long flags;
2620
2621         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2622
2623         tx_q->xsk_frames_done = 0;
2624
2625         entry = tx_q->dirty_tx;
2626
2627         /* Try to clean all TX complete frame in 1 shot */
2628         while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2629                 struct xdp_frame *xdpf;
2630                 struct sk_buff *skb;
2631                 struct dma_desc *p;
2632                 int status;
2633
2634                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2635                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2636                         xdpf = tx_q->xdpf[entry];
2637                         skb = NULL;
2638                 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2639                         xdpf = NULL;
2640                         skb = tx_q->tx_skbuff[entry];
2641                 } else {
2642                         xdpf = NULL;
2643                         skb = NULL;
2644                 }
2645
2646                 if (priv->extend_desc)
2647                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
2648                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2649                         p = &tx_q->dma_entx[entry].basic;
2650                 else
2651                         p = tx_q->dma_tx + entry;
2652
2653                 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2654                 /* Check if the descriptor is owned by the DMA */
2655                 if (unlikely(status & tx_dma_own))
2656                         break;
2657
2658                 count++;
2659
2660                 /* Make sure descriptor fields are read after reading
2661                  * the own bit.
2662                  */
2663                 dma_rmb();
2664
2665                 /* Just consider the last segment and ...*/
2666                 if (likely(!(status & tx_not_ls))) {
2667                         /* ... verify the status error condition */
2668                         if (unlikely(status & tx_err)) {
2669                                 tx_errors++;
2670                                 if (unlikely(status & tx_err_bump_tc))
2671                                         stmmac_bump_dma_threshold(priv, queue);
2672                         } else {
2673                                 tx_packets++;
2674                         }
2675                         if (skb) {
2676                                 stmmac_get_tx_hwtstamp(priv, p, skb);
2677                         } else {
2678                                 struct stmmac_xsk_tx_complete tx_compl = {
2679                                         .priv = priv,
2680                                         .desc = p,
2681                                 };
2682
2683                                 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2684                                                          &stmmac_xsk_tx_metadata_ops,
2685                                                          &tx_compl);
2686                         }
2687                 }
2688
2689                 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2690                            tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2691                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
2692                                 dma_unmap_page(priv->device,
2693                                                tx_q->tx_skbuff_dma[entry].buf,
2694                                                tx_q->tx_skbuff_dma[entry].len,
2695                                                DMA_TO_DEVICE);
2696                         else
2697                                 dma_unmap_single(priv->device,
2698                                                  tx_q->tx_skbuff_dma[entry].buf,
2699                                                  tx_q->tx_skbuff_dma[entry].len,
2700                                                  DMA_TO_DEVICE);
2701                         tx_q->tx_skbuff_dma[entry].buf = 0;
2702                         tx_q->tx_skbuff_dma[entry].len = 0;
2703                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
2704                 }
2705
2706                 stmmac_clean_desc3(priv, tx_q, p);
2707
2708                 tx_q->tx_skbuff_dma[entry].last_segment = false;
2709                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2710
2711                 if (xdpf &&
2712                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2713                         xdp_return_frame_rx_napi(xdpf);
2714                         tx_q->xdpf[entry] = NULL;
2715                 }
2716
2717                 if (xdpf &&
2718                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2719                         xdp_return_frame(xdpf);
2720                         tx_q->xdpf[entry] = NULL;
2721                 }
2722
2723                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2724                         tx_q->xsk_frames_done++;
2725
2726                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2727                         if (likely(skb)) {
2728                                 pkts_compl++;
2729                                 bytes_compl += skb->len;
2730                                 dev_consume_skb_any(skb);
2731                                 tx_q->tx_skbuff[entry] = NULL;
2732                         }
2733                 }
2734
2735                 stmmac_release_tx_desc(priv, p, priv->mode);
2736
2737                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2738         }
2739         tx_q->dirty_tx = entry;
2740
2741         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2742                                   pkts_compl, bytes_compl);
2743
2744         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2745                                                                 queue))) &&
2746             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2747
2748                 netif_dbg(priv, tx_done, priv->dev,
2749                           "%s: restart transmit\n", __func__);
2750                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2751         }
2752
2753         if (tx_q->xsk_pool) {
2754                 bool work_done;
2755
2756                 if (tx_q->xsk_frames_done)
2757                         xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2758
2759                 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2760                         xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2761
2762                 /* For XSK TX, we try to send as many as possible.
2763                  * If XSK work done (XSK TX desc empty and budget still
2764                  * available), return "budget - 1" to reenable TX IRQ.
2765                  * Else, return "budget" to make NAPI continue polling.
2766                  */
2767                 work_done = stmmac_xdp_xmit_zc(priv, queue,
2768                                                STMMAC_XSK_TX_BUDGET_MAX);
2769                 if (work_done)
2770                         xmits = budget - 1;
2771                 else
2772                         xmits = budget;
2773         }
2774
2775         if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2776             priv->eee_sw_timer_en) {
2777                 if (stmmac_enable_eee_mode(priv))
2778                         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2779         }
2780
2781         /* We still have pending packets, let's call for a new scheduling */
2782         if (tx_q->dirty_tx != tx_q->cur_tx)
2783                 *pending_packets = true;
2784
2785         flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2786         txq_stats->tx_packets += tx_packets;
2787         txq_stats->tx_pkt_n += tx_packets;
2788         txq_stats->tx_clean++;
2789         u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2790
2791         priv->xstats.tx_errors += tx_errors;
2792
2793         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2794
2795         /* Combine decisions from TX clean and XSK TX */
2796         return max(count, xmits);
2797 }
2798
2799 /**
2800  * stmmac_tx_err - to manage the tx error
2801  * @priv: driver private structure
2802  * @chan: channel index
2803  * Description: it cleans the descriptors and restarts the transmission
2804  * in case of transmission errors.
2805  */
2806 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2807 {
2808         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2809
2810         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2811
2812         stmmac_stop_tx_dma(priv, chan);
2813         dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2814         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2815         stmmac_reset_tx_queue(priv, chan);
2816         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2817                             tx_q->dma_tx_phy, chan);
2818         stmmac_start_tx_dma(priv, chan);
2819
2820         priv->xstats.tx_errors++;
2821         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2822 }
2823
2824 /**
2825  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2826  *  @priv: driver private structure
2827  *  @txmode: TX operating mode
2828  *  @rxmode: RX operating mode
2829  *  @chan: channel index
2830  *  Description: it is used for configuring of the DMA operation mode in
2831  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2832  *  mode.
2833  */
2834 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2835                                           u32 rxmode, u32 chan)
2836 {
2837         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2838         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2839         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2840         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2841         int rxfifosz = priv->plat->rx_fifo_size;
2842         int txfifosz = priv->plat->tx_fifo_size;
2843
2844         if (rxfifosz == 0)
2845                 rxfifosz = priv->dma_cap.rx_fifo_size;
2846         if (txfifosz == 0)
2847                 txfifosz = priv->dma_cap.tx_fifo_size;
2848
2849         /* Adjust for real per queue fifo size */
2850         rxfifosz /= rx_channels_count;
2851         txfifosz /= tx_channels_count;
2852
2853         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2854         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2855 }
2856
2857 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2858 {
2859         int ret;
2860
2861         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2862                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2863         if (ret && (ret != -EINVAL)) {
2864                 stmmac_global_err(priv);
2865                 return true;
2866         }
2867
2868         return false;
2869 }
2870
2871 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2872 {
2873         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2874                                                  &priv->xstats, chan, dir);
2875         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2876         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2877         struct stmmac_channel *ch = &priv->channel[chan];
2878         struct napi_struct *rx_napi;
2879         struct napi_struct *tx_napi;
2880         unsigned long flags;
2881
2882         rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2883         tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2884
2885         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2886                 if (napi_schedule_prep(rx_napi)) {
2887                         spin_lock_irqsave(&ch->lock, flags);
2888                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2889                         spin_unlock_irqrestore(&ch->lock, flags);
2890                         __napi_schedule(rx_napi);
2891                 }
2892         }
2893
2894         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2895                 if (napi_schedule_prep(tx_napi)) {
2896                         spin_lock_irqsave(&ch->lock, flags);
2897                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2898                         spin_unlock_irqrestore(&ch->lock, flags);
2899                         __napi_schedule(tx_napi);
2900                 }
2901         }
2902
2903         return status;
2904 }
2905
2906 /**
2907  * stmmac_dma_interrupt - DMA ISR
2908  * @priv: driver private structure
2909  * Description: this is the DMA ISR. It is called by the main ISR.
2910  * It calls the dwmac dma routine and schedule poll method in case of some
2911  * work can be done.
2912  */
2913 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2914 {
2915         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2916         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2917         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2918                                 tx_channel_count : rx_channel_count;
2919         u32 chan;
2920         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2921
2922         /* Make sure we never check beyond our status buffer. */
2923         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2924                 channels_to_check = ARRAY_SIZE(status);
2925
2926         for (chan = 0; chan < channels_to_check; chan++)
2927                 status[chan] = stmmac_napi_check(priv, chan,
2928                                                  DMA_DIR_RXTX);
2929
2930         for (chan = 0; chan < tx_channel_count; chan++) {
2931                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2932                         /* Try to bump up the dma threshold on this failure */
2933                         stmmac_bump_dma_threshold(priv, chan);
2934                 } else if (unlikely(status[chan] == tx_hard_error)) {
2935                         stmmac_tx_err(priv, chan);
2936                 }
2937         }
2938 }
2939
2940 /**
2941  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2942  * @priv: driver private structure
2943  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2944  */
2945 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2946 {
2947         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2948                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2949
2950         stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2951
2952         if (priv->dma_cap.rmon) {
2953                 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2954                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2955         } else
2956                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2957 }
2958
2959 /**
2960  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2961  * @priv: driver private structure
2962  * Description:
2963  *  new GMAC chip generations have a new register to indicate the
2964  *  presence of the optional feature/functions.
2965  *  This can be also used to override the value passed through the
2966  *  platform and necessary for old MAC10/100 and GMAC chips.
2967  */
2968 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2969 {
2970         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2971 }
2972
2973 /**
2974  * stmmac_check_ether_addr - check if the MAC addr is valid
2975  * @priv: driver private structure
2976  * Description:
2977  * it is to verify if the MAC address is valid, in case of failures it
2978  * generates a random MAC address
2979  */
2980 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2981 {
2982         u8 addr[ETH_ALEN];
2983
2984         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2985                 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2986                 if (is_valid_ether_addr(addr))
2987                         eth_hw_addr_set(priv->dev, addr);
2988                 else
2989                         eth_hw_addr_random(priv->dev);
2990                 dev_info(priv->device, "device MAC address %pM\n",
2991                          priv->dev->dev_addr);
2992         }
2993 }
2994
2995 /**
2996  * stmmac_init_dma_engine - DMA init.
2997  * @priv: driver private structure
2998  * Description:
2999  * It inits the DMA invoking the specific MAC/GMAC callback.
3000  * Some DMA parameters can be passed from the platform;
3001  * in case of these are not passed a default is kept for the MAC or GMAC.
3002  */
3003 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3004 {
3005         u32 rx_channels_count = priv->plat->rx_queues_to_use;
3006         u32 tx_channels_count = priv->plat->tx_queues_to_use;
3007         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3008         struct stmmac_rx_queue *rx_q;
3009         struct stmmac_tx_queue *tx_q;
3010         u32 chan = 0;
3011         int atds = 0;
3012         int ret = 0;
3013
3014         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3015                 dev_err(priv->device, "Invalid DMA configuration\n");
3016                 return -EINVAL;
3017         }
3018
3019         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3020                 atds = 1;
3021
3022         ret = stmmac_reset(priv, priv->ioaddr);
3023         if (ret) {
3024                 dev_err(priv->device, "Failed to reset the dma\n");
3025                 return ret;
3026         }
3027
3028         /* DMA Configuration */
3029         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3030
3031         if (priv->plat->axi)
3032                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3033
3034         /* DMA CSR Channel configuration */
3035         for (chan = 0; chan < dma_csr_ch; chan++) {
3036                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3037                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3038         }
3039
3040         /* DMA RX Channel Configuration */
3041         for (chan = 0; chan < rx_channels_count; chan++) {
3042                 rx_q = &priv->dma_conf.rx_queue[chan];
3043
3044                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3045                                     rx_q->dma_rx_phy, chan);
3046
3047                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3048                                      (rx_q->buf_alloc_num *
3049                                       sizeof(struct dma_desc));
3050                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3051                                        rx_q->rx_tail_addr, chan);
3052         }
3053
3054         /* DMA TX Channel Configuration */
3055         for (chan = 0; chan < tx_channels_count; chan++) {
3056                 tx_q = &priv->dma_conf.tx_queue[chan];
3057
3058                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3059                                     tx_q->dma_tx_phy, chan);
3060
3061                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3062                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3063                                        tx_q->tx_tail_addr, chan);
3064         }
3065
3066         return ret;
3067 }
3068
3069 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3070 {
3071         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3072         u32 tx_coal_timer = priv->tx_coal_timer[queue];
3073         struct stmmac_channel *ch;
3074         struct napi_struct *napi;
3075
3076         if (!tx_coal_timer)
3077                 return;
3078
3079         ch = &priv->channel[tx_q->queue_index];
3080         napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3081
3082         /* Arm timer only if napi is not already scheduled.
3083          * Try to cancel any timer if napi is scheduled, timer will be armed
3084          * again in the next scheduled napi.
3085          */
3086         if (unlikely(!napi_is_scheduled(napi)))
3087                 hrtimer_start(&tx_q->txtimer,
3088                               STMMAC_COAL_TIMER(tx_coal_timer),
3089                               HRTIMER_MODE_REL);
3090         else
3091                 hrtimer_try_to_cancel(&tx_q->txtimer);
3092 }
3093
3094 /**
3095  * stmmac_tx_timer - mitigation sw timer for tx.
3096  * @t: data pointer
3097  * Description:
3098  * This is the timer handler to directly invoke the stmmac_tx_clean.
3099  */
3100 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3101 {
3102         struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3103         struct stmmac_priv *priv = tx_q->priv_data;
3104         struct stmmac_channel *ch;
3105         struct napi_struct *napi;
3106
3107         ch = &priv->channel[tx_q->queue_index];
3108         napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3109
3110         if (likely(napi_schedule_prep(napi))) {
3111                 unsigned long flags;
3112
3113                 spin_lock_irqsave(&ch->lock, flags);
3114                 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3115                 spin_unlock_irqrestore(&ch->lock, flags);
3116                 __napi_schedule(napi);
3117         }
3118
3119         return HRTIMER_NORESTART;
3120 }
3121
3122 /**
3123  * stmmac_init_coalesce - init mitigation options.
3124  * @priv: driver private structure
3125  * Description:
3126  * This inits the coalesce parameters: i.e. timer rate,
3127  * timer handler and default threshold used for enabling the
3128  * interrupt on completion bit.
3129  */
3130 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3131 {
3132         u32 tx_channel_count = priv->plat->tx_queues_to_use;
3133         u32 rx_channel_count = priv->plat->rx_queues_to_use;
3134         u32 chan;
3135
3136         for (chan = 0; chan < tx_channel_count; chan++) {
3137                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3138
3139                 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3140                 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3141
3142                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3143                 tx_q->txtimer.function = stmmac_tx_timer;
3144         }
3145
3146         for (chan = 0; chan < rx_channel_count; chan++)
3147                 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3148 }
3149
3150 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3151 {
3152         u32 rx_channels_count = priv->plat->rx_queues_to_use;
3153         u32 tx_channels_count = priv->plat->tx_queues_to_use;
3154         u32 chan;
3155
3156         /* set TX ring length */
3157         for (chan = 0; chan < tx_channels_count; chan++)
3158                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3159                                        (priv->dma_conf.dma_tx_size - 1), chan);
3160
3161         /* set RX ring length */
3162         for (chan = 0; chan < rx_channels_count; chan++)
3163                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3164                                        (priv->dma_conf.dma_rx_size - 1), chan);
3165 }
3166
3167 /**
3168  *  stmmac_set_tx_queue_weight - Set TX queue weight
3169  *  @priv: driver private structure
3170  *  Description: It is used for setting TX queues weight
3171  */
3172 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3173 {
3174         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3175         u32 weight;
3176         u32 queue;
3177
3178         for (queue = 0; queue < tx_queues_count; queue++) {
3179                 weight = priv->plat->tx_queues_cfg[queue].weight;
3180                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3181         }
3182 }
3183
3184 /**
3185  *  stmmac_configure_cbs - Configure CBS in TX queue
3186  *  @priv: driver private structure
3187  *  Description: It is used for configuring CBS in AVB TX queues
3188  */
3189 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3190 {
3191         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3192         u32 mode_to_use;
3193         u32 queue;
3194
3195         /* queue 0 is reserved for legacy traffic */
3196         for (queue = 1; queue < tx_queues_count; queue++) {
3197                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3198                 if (mode_to_use == MTL_QUEUE_DCB)
3199                         continue;
3200
3201                 stmmac_config_cbs(priv, priv->hw,
3202                                 priv->plat->tx_queues_cfg[queue].send_slope,
3203                                 priv->plat->tx_queues_cfg[queue].idle_slope,
3204                                 priv->plat->tx_queues_cfg[queue].high_credit,
3205                                 priv->plat->tx_queues_cfg[queue].low_credit,
3206                                 queue);
3207         }
3208 }
3209
3210 /**
3211  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3212  *  @priv: driver private structure
3213  *  Description: It is used for mapping RX queues to RX dma channels
3214  */
3215 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3216 {
3217         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3218         u32 queue;
3219         u32 chan;
3220
3221         for (queue = 0; queue < rx_queues_count; queue++) {
3222                 chan = priv->plat->rx_queues_cfg[queue].chan;
3223                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3224         }
3225 }
3226
3227 /**
3228  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3229  *  @priv: driver private structure
3230  *  Description: It is used for configuring the RX Queue Priority
3231  */
3232 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3233 {
3234         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3235         u32 queue;
3236         u32 prio;
3237
3238         for (queue = 0; queue < rx_queues_count; queue++) {
3239                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3240                         continue;
3241
3242                 prio = priv->plat->rx_queues_cfg[queue].prio;
3243                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3244         }
3245 }
3246
3247 /**
3248  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3249  *  @priv: driver private structure
3250  *  Description: It is used for configuring the TX Queue Priority
3251  */
3252 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3253 {
3254         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3255         u32 queue;
3256         u32 prio;
3257
3258         for (queue = 0; queue < tx_queues_count; queue++) {
3259                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3260                         continue;
3261
3262                 prio = priv->plat->tx_queues_cfg[queue].prio;
3263                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3264         }
3265 }
3266
3267 /**
3268  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3269  *  @priv: driver private structure
3270  *  Description: It is used for configuring the RX queue routing
3271  */
3272 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3273 {
3274         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3275         u32 queue;
3276         u8 packet;
3277
3278         for (queue = 0; queue < rx_queues_count; queue++) {
3279                 /* no specific packet type routing specified for the queue */
3280                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3281                         continue;
3282
3283                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3284                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3285         }
3286 }
3287
3288 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3289 {
3290         if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3291                 priv->rss.enable = false;
3292                 return;
3293         }
3294
3295         if (priv->dev->features & NETIF_F_RXHASH)
3296                 priv->rss.enable = true;
3297         else
3298                 priv->rss.enable = false;
3299
3300         stmmac_rss_configure(priv, priv->hw, &priv->rss,
3301                              priv->plat->rx_queues_to_use);
3302 }
3303
3304 /**
3305  *  stmmac_mtl_configuration - Configure MTL
3306  *  @priv: driver private structure
3307  *  Description: It is used for configurring MTL
3308  */
3309 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3310 {
3311         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3312         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3313
3314         if (tx_queues_count > 1)
3315                 stmmac_set_tx_queue_weight(priv);
3316
3317         /* Configure MTL RX algorithms */
3318         if (rx_queues_count > 1)
3319                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3320                                 priv->plat->rx_sched_algorithm);
3321
3322         /* Configure MTL TX algorithms */
3323         if (tx_queues_count > 1)
3324                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3325                                 priv->plat->tx_sched_algorithm);
3326
3327         /* Configure CBS in AVB TX queues */
3328         if (tx_queues_count > 1)
3329                 stmmac_configure_cbs(priv);
3330
3331         /* Map RX MTL to DMA channels */
3332         stmmac_rx_queue_dma_chan_map(priv);
3333
3334         /* Enable MAC RX Queues */
3335         stmmac_mac_enable_rx_queues(priv);
3336
3337         /* Set RX priorities */
3338         if (rx_queues_count > 1)
3339                 stmmac_mac_config_rx_queues_prio(priv);
3340
3341         /* Set TX priorities */
3342         if (tx_queues_count > 1)
3343                 stmmac_mac_config_tx_queues_prio(priv);
3344
3345         /* Set RX routing */
3346         if (rx_queues_count > 1)
3347                 stmmac_mac_config_rx_queues_routing(priv);
3348
3349         /* Receive Side Scaling */
3350         if (rx_queues_count > 1)
3351                 stmmac_mac_config_rss(priv);
3352 }
3353
3354 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3355 {
3356         if (priv->dma_cap.asp) {
3357                 netdev_info(priv->dev, "Enabling Safety Features\n");
3358                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3359                                           priv->plat->safety_feat_cfg);
3360         } else {
3361                 netdev_info(priv->dev, "No Safety Features support found\n");
3362         }
3363 }
3364
3365 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3366 {
3367         char *name;
3368
3369         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3370         clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3371
3372         name = priv->wq_name;
3373         sprintf(name, "%s-fpe", priv->dev->name);
3374
3375         priv->fpe_wq = create_singlethread_workqueue(name);
3376         if (!priv->fpe_wq) {
3377                 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3378
3379                 return -ENOMEM;
3380         }
3381         netdev_info(priv->dev, "FPE workqueue start");
3382
3383         return 0;
3384 }
3385
3386 /**
3387  * stmmac_hw_setup - setup mac in a usable state.
3388  *  @dev : pointer to the device structure.
3389  *  @ptp_register: register PTP if set
3390  *  Description:
3391  *  this is the main function to setup the HW in a usable state because the
3392  *  dma engine is reset, the core registers are configured (e.g. AXI,
3393  *  Checksum features, timers). The DMA is ready to start receiving and
3394  *  transmitting.
3395  *  Return value:
3396  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3397  *  file on failure.
3398  */
3399 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3400 {
3401         struct stmmac_priv *priv = netdev_priv(dev);
3402         u32 rx_cnt = priv->plat->rx_queues_to_use;
3403         u32 tx_cnt = priv->plat->tx_queues_to_use;
3404         bool sph_en;
3405         u32 chan;
3406         int ret;
3407
3408         /* DMA initialization and SW reset */
3409         ret = stmmac_init_dma_engine(priv);
3410         if (ret < 0) {
3411                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3412                            __func__);
3413                 return ret;
3414         }
3415
3416         /* Copy the MAC addr into the HW  */
3417         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3418
3419         /* PS and related bits will be programmed according to the speed */
3420         if (priv->hw->pcs) {
3421                 int speed = priv->plat->mac_port_sel_speed;
3422
3423                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3424                     (speed == SPEED_1000)) {
3425                         priv->hw->ps = speed;
3426                 } else {
3427                         dev_warn(priv->device, "invalid port speed\n");
3428                         priv->hw->ps = 0;
3429                 }
3430         }
3431
3432         /* Initialize the MAC Core */
3433         stmmac_core_init(priv, priv->hw, dev);
3434
3435         /* Initialize MTL*/
3436         stmmac_mtl_configuration(priv);
3437
3438         /* Initialize Safety Features */
3439         stmmac_safety_feat_configuration(priv);
3440
3441         ret = stmmac_rx_ipc(priv, priv->hw);
3442         if (!ret) {
3443                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3444                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3445                 priv->hw->rx_csum = 0;
3446         }
3447
3448         /* Enable the MAC Rx/Tx */
3449         stmmac_mac_set(priv, priv->ioaddr, true);
3450
3451         /* Set the HW DMA mode and the COE */
3452         stmmac_dma_operation_mode(priv);
3453
3454         stmmac_mmc_setup(priv);
3455
3456         if (ptp_register) {
3457                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3458                 if (ret < 0)
3459                         netdev_warn(priv->dev,
3460                                     "failed to enable PTP reference clock: %pe\n",
3461                                     ERR_PTR(ret));
3462         }
3463
3464         ret = stmmac_init_ptp(priv);
3465         if (ret == -EOPNOTSUPP)
3466                 netdev_info(priv->dev, "PTP not supported by HW\n");
3467         else if (ret)
3468                 netdev_warn(priv->dev, "PTP init failed\n");
3469         else if (ptp_register)
3470                 stmmac_ptp_register(priv);
3471
3472         priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3473
3474         /* Convert the timer from msec to usec */
3475         if (!priv->tx_lpi_timer)
3476                 priv->tx_lpi_timer = eee_timer * 1000;
3477
3478         if (priv->use_riwt) {
3479                 u32 queue;
3480
3481                 for (queue = 0; queue < rx_cnt; queue++) {
3482                         if (!priv->rx_riwt[queue])
3483                                 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3484
3485                         stmmac_rx_watchdog(priv, priv->ioaddr,
3486                                            priv->rx_riwt[queue], queue);
3487                 }
3488         }
3489
3490         if (priv->hw->pcs)
3491                 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3492
3493         /* set TX and RX rings length */
3494         stmmac_set_rings_length(priv);
3495
3496         /* Enable TSO */
3497         if (priv->tso) {
3498                 for (chan = 0; chan < tx_cnt; chan++) {
3499                         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3500
3501                         /* TSO and TBS cannot co-exist */
3502                         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3503                                 continue;
3504
3505                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3506                 }
3507         }
3508
3509         /* Enable Split Header */
3510         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3511         for (chan = 0; chan < rx_cnt; chan++)
3512                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3513
3514
3515         /* VLAN Tag Insertion */
3516         if (priv->dma_cap.vlins)
3517                 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3518
3519         /* TBS */
3520         for (chan = 0; chan < tx_cnt; chan++) {
3521                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3522                 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3523
3524                 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3525         }
3526
3527         /* Configure real RX and TX queues */
3528         netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3529         netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3530
3531         /* Start the ball rolling... */
3532         stmmac_start_all_dma(priv);
3533
3534         stmmac_set_hw_vlan_mode(priv, priv->hw);
3535
3536         if (priv->dma_cap.fpesel) {
3537                 stmmac_fpe_start_wq(priv);
3538
3539                 if (priv->plat->fpe_cfg->enable)
3540                         stmmac_fpe_handshake(priv, true);
3541         }
3542
3543         return 0;
3544 }
3545
3546 static void stmmac_hw_teardown(struct net_device *dev)
3547 {
3548         struct stmmac_priv *priv = netdev_priv(dev);
3549
3550         clk_disable_unprepare(priv->plat->clk_ptp_ref);
3551 }
3552
3553 static void stmmac_free_irq(struct net_device *dev,
3554                             enum request_irq_err irq_err, int irq_idx)
3555 {
3556         struct stmmac_priv *priv = netdev_priv(dev);
3557         int j;
3558
3559         switch (irq_err) {
3560         case REQ_IRQ_ERR_ALL:
3561                 irq_idx = priv->plat->tx_queues_to_use;
3562                 fallthrough;
3563         case REQ_IRQ_ERR_TX:
3564                 for (j = irq_idx - 1; j >= 0; j--) {
3565                         if (priv->tx_irq[j] > 0) {
3566                                 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3567                                 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3568                         }
3569                 }
3570                 irq_idx = priv->plat->rx_queues_to_use;
3571                 fallthrough;
3572         case REQ_IRQ_ERR_RX:
3573                 for (j = irq_idx - 1; j >= 0; j--) {
3574                         if (priv->rx_irq[j] > 0) {
3575                                 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3576                                 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3577                         }
3578                 }
3579
3580                 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3581                         free_irq(priv->sfty_ue_irq, dev);
3582                 fallthrough;
3583         case REQ_IRQ_ERR_SFTY_UE:
3584                 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3585                         free_irq(priv->sfty_ce_irq, dev);
3586                 fallthrough;
3587         case REQ_IRQ_ERR_SFTY_CE:
3588                 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3589                         free_irq(priv->lpi_irq, dev);
3590                 fallthrough;
3591         case REQ_IRQ_ERR_LPI:
3592                 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3593                         free_irq(priv->wol_irq, dev);
3594                 fallthrough;
3595         case REQ_IRQ_ERR_WOL:
3596                 free_irq(dev->irq, dev);
3597                 fallthrough;
3598         case REQ_IRQ_ERR_MAC:
3599         case REQ_IRQ_ERR_NO:
3600                 /* If MAC IRQ request error, no more IRQ to free */
3601                 break;
3602         }
3603 }
3604
3605 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3606 {
3607         struct stmmac_priv *priv = netdev_priv(dev);
3608         enum request_irq_err irq_err;
3609         cpumask_t cpu_mask;
3610         int irq_idx = 0;
3611         char *int_name;
3612         int ret;
3613         int i;
3614
3615         /* For common interrupt */
3616         int_name = priv->int_name_mac;
3617         sprintf(int_name, "%s:%s", dev->name, "mac");
3618         ret = request_irq(dev->irq, stmmac_mac_interrupt,
3619                           0, int_name, dev);
3620         if (unlikely(ret < 0)) {
3621                 netdev_err(priv->dev,
3622                            "%s: alloc mac MSI %d (error: %d)\n",
3623                            __func__, dev->irq, ret);
3624                 irq_err = REQ_IRQ_ERR_MAC;
3625                 goto irq_error;
3626         }
3627
3628         /* Request the Wake IRQ in case of another line
3629          * is used for WoL
3630          */
3631         priv->wol_irq_disabled = true;
3632         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3633                 int_name = priv->int_name_wol;
3634                 sprintf(int_name, "%s:%s", dev->name, "wol");
3635                 ret = request_irq(priv->wol_irq,
3636                                   stmmac_mac_interrupt,
3637                                   0, int_name, dev);
3638                 if (unlikely(ret < 0)) {
3639                         netdev_err(priv->dev,
3640                                    "%s: alloc wol MSI %d (error: %d)\n",
3641                                    __func__, priv->wol_irq, ret);
3642                         irq_err = REQ_IRQ_ERR_WOL;
3643                         goto irq_error;
3644                 }
3645         }
3646
3647         /* Request the LPI IRQ in case of another line
3648          * is used for LPI
3649          */
3650         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3651                 int_name = priv->int_name_lpi;
3652                 sprintf(int_name, "%s:%s", dev->name, "lpi");
3653                 ret = request_irq(priv->lpi_irq,
3654                                   stmmac_mac_interrupt,
3655                                   0, int_name, dev);
3656                 if (unlikely(ret < 0)) {
3657                         netdev_err(priv->dev,
3658                                    "%s: alloc lpi MSI %d (error: %d)\n",
3659                                    __func__, priv->lpi_irq, ret);
3660                         irq_err = REQ_IRQ_ERR_LPI;
3661                         goto irq_error;
3662                 }
3663         }
3664
3665         /* Request the Safety Feature Correctible Error line in
3666          * case of another line is used
3667          */
3668         if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3669                 int_name = priv->int_name_sfty_ce;
3670                 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3671                 ret = request_irq(priv->sfty_ce_irq,
3672                                   stmmac_safety_interrupt,
3673                                   0, int_name, dev);
3674                 if (unlikely(ret < 0)) {
3675                         netdev_err(priv->dev,
3676                                    "%s: alloc sfty ce MSI %d (error: %d)\n",
3677                                    __func__, priv->sfty_ce_irq, ret);
3678                         irq_err = REQ_IRQ_ERR_SFTY_CE;
3679                         goto irq_error;
3680                 }
3681         }
3682
3683         /* Request the Safety Feature Uncorrectible Error line in
3684          * case of another line is used
3685          */
3686         if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3687                 int_name = priv->int_name_sfty_ue;
3688                 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3689                 ret = request_irq(priv->sfty_ue_irq,
3690                                   stmmac_safety_interrupt,
3691                                   0, int_name, dev);
3692                 if (unlikely(ret < 0)) {
3693                         netdev_err(priv->dev,
3694                                    "%s: alloc sfty ue MSI %d (error: %d)\n",
3695                                    __func__, priv->sfty_ue_irq, ret);
3696                         irq_err = REQ_IRQ_ERR_SFTY_UE;
3697                         goto irq_error;
3698                 }
3699         }
3700
3701         /* Request Rx MSI irq */
3702         for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3703                 if (i >= MTL_MAX_RX_QUEUES)
3704                         break;
3705                 if (priv->rx_irq[i] == 0)
3706                         continue;
3707
3708                 int_name = priv->int_name_rx_irq[i];
3709                 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3710                 ret = request_irq(priv->rx_irq[i],
3711                                   stmmac_msi_intr_rx,
3712                                   0, int_name, &priv->dma_conf.rx_queue[i]);
3713                 if (unlikely(ret < 0)) {
3714                         netdev_err(priv->dev,
3715                                    "%s: alloc rx-%d  MSI %d (error: %d)\n",
3716                                    __func__, i, priv->rx_irq[i], ret);
3717                         irq_err = REQ_IRQ_ERR_RX;
3718                         irq_idx = i;
3719                         goto irq_error;
3720                 }
3721                 cpumask_clear(&cpu_mask);
3722                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3723                 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3724         }
3725
3726         /* Request Tx MSI irq */
3727         for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3728                 if (i >= MTL_MAX_TX_QUEUES)
3729                         break;
3730                 if (priv->tx_irq[i] == 0)
3731                         continue;
3732
3733                 int_name = priv->int_name_tx_irq[i];
3734                 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3735                 ret = request_irq(priv->tx_irq[i],
3736                                   stmmac_msi_intr_tx,
3737                                   0, int_name, &priv->dma_conf.tx_queue[i]);
3738                 if (unlikely(ret < 0)) {
3739                         netdev_err(priv->dev,
3740                                    "%s: alloc tx-%d  MSI %d (error: %d)\n",
3741                                    __func__, i, priv->tx_irq[i], ret);
3742                         irq_err = REQ_IRQ_ERR_TX;
3743                         irq_idx = i;
3744                         goto irq_error;
3745                 }
3746                 cpumask_clear(&cpu_mask);
3747                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3748                 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3749         }
3750
3751         return 0;
3752
3753 irq_error:
3754         stmmac_free_irq(dev, irq_err, irq_idx);
3755         return ret;
3756 }
3757
3758 static int stmmac_request_irq_single(struct net_device *dev)
3759 {
3760         struct stmmac_priv *priv = netdev_priv(dev);
3761         enum request_irq_err irq_err;
3762         int ret;
3763
3764         ret = request_irq(dev->irq, stmmac_interrupt,
3765                           IRQF_SHARED, dev->name, dev);
3766         if (unlikely(ret < 0)) {
3767                 netdev_err(priv->dev,
3768                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3769                            __func__, dev->irq, ret);
3770                 irq_err = REQ_IRQ_ERR_MAC;
3771                 goto irq_error;
3772         }
3773
3774         /* Request the Wake IRQ in case of another line
3775          * is used for WoL
3776          */
3777         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3778                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3779                                   IRQF_SHARED, dev->name, dev);
3780                 if (unlikely(ret < 0)) {
3781                         netdev_err(priv->dev,
3782                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3783                                    __func__, priv->wol_irq, ret);
3784                         irq_err = REQ_IRQ_ERR_WOL;
3785                         goto irq_error;
3786                 }
3787         }
3788
3789         /* Request the IRQ lines */
3790         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3791                 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3792                                   IRQF_SHARED, dev->name, dev);
3793                 if (unlikely(ret < 0)) {
3794                         netdev_err(priv->dev,
3795                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3796                                    __func__, priv->lpi_irq, ret);
3797                         irq_err = REQ_IRQ_ERR_LPI;
3798                         goto irq_error;
3799                 }
3800         }
3801
3802         return 0;
3803
3804 irq_error:
3805         stmmac_free_irq(dev, irq_err, 0);
3806         return ret;
3807 }
3808
3809 static int stmmac_request_irq(struct net_device *dev)
3810 {
3811         struct stmmac_priv *priv = netdev_priv(dev);
3812         int ret;
3813
3814         /* Request the IRQ lines */
3815         if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3816                 ret = stmmac_request_irq_multi_msi(dev);
3817         else
3818                 ret = stmmac_request_irq_single(dev);
3819
3820         return ret;
3821 }
3822
3823 /**
3824  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3825  *  @priv: driver private structure
3826  *  @mtu: MTU to setup the dma queue and buf with
3827  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3828  *  Allocate the Tx/Rx DMA queue and init them.
3829  *  Return value:
3830  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3831  */
3832 static struct stmmac_dma_conf *
3833 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3834 {
3835         struct stmmac_dma_conf *dma_conf;
3836         int chan, bfsize, ret;
3837
3838         dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3839         if (!dma_conf) {
3840                 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3841                            __func__);
3842                 return ERR_PTR(-ENOMEM);
3843         }
3844
3845         bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3846         if (bfsize < 0)
3847                 bfsize = 0;
3848
3849         if (bfsize < BUF_SIZE_16KiB)
3850                 bfsize = stmmac_set_bfsize(mtu, 0);
3851
3852         dma_conf->dma_buf_sz = bfsize;
3853         /* Chose the tx/rx size from the already defined one in the
3854          * priv struct. (if defined)
3855          */
3856         dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3857         dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3858
3859         if (!dma_conf->dma_tx_size)
3860                 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3861         if (!dma_conf->dma_rx_size)
3862                 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3863
3864         /* Earlier check for TBS */
3865         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3866                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3867                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3868
3869                 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3870                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3871         }
3872
3873         ret = alloc_dma_desc_resources(priv, dma_conf);
3874         if (ret < 0) {
3875                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3876                            __func__);
3877                 goto alloc_error;
3878         }
3879
3880         ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3881         if (ret < 0) {
3882                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3883                            __func__);
3884                 goto init_error;
3885         }
3886
3887         return dma_conf;
3888
3889 init_error:
3890         free_dma_desc_resources(priv, dma_conf);
3891 alloc_error:
3892         kfree(dma_conf);
3893         return ERR_PTR(ret);
3894 }
3895
3896 /**
3897  *  __stmmac_open - open entry point of the driver
3898  *  @dev : pointer to the device structure.
3899  *  @dma_conf :  structure to take the dma data
3900  *  Description:
3901  *  This function is the open entry point of the driver.
3902  *  Return value:
3903  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3904  *  file on failure.
3905  */
3906 static int __stmmac_open(struct net_device *dev,
3907                          struct stmmac_dma_conf *dma_conf)
3908 {
3909         struct stmmac_priv *priv = netdev_priv(dev);
3910         int mode = priv->plat->phy_interface;
3911         u32 chan;
3912         int ret;
3913
3914         ret = pm_runtime_resume_and_get(priv->device);
3915         if (ret < 0)
3916                 return ret;
3917
3918         if (priv->hw->pcs != STMMAC_PCS_TBI &&
3919             priv->hw->pcs != STMMAC_PCS_RTBI &&
3920             (!priv->hw->xpcs ||
3921              xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3922             !priv->hw->lynx_pcs) {
3923                 ret = stmmac_init_phy(dev);
3924                 if (ret) {
3925                         netdev_err(priv->dev,
3926                                    "%s: Cannot attach to PHY (error: %d)\n",
3927                                    __func__, ret);
3928                         goto init_phy_error;
3929                 }
3930         }
3931
3932         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3933
3934         buf_sz = dma_conf->dma_buf_sz;
3935         memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3936
3937         stmmac_reset_queues_param(priv);
3938
3939         if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3940             priv->plat->serdes_powerup) {
3941                 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3942                 if (ret < 0) {
3943                         netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3944                                    __func__);
3945                         goto init_error;
3946                 }
3947         }
3948
3949         ret = stmmac_hw_setup(dev, true);
3950         if (ret < 0) {
3951                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3952                 goto init_error;
3953         }
3954
3955         stmmac_init_coalesce(priv);
3956
3957         phylink_start(priv->phylink);
3958         /* We may have called phylink_speed_down before */
3959         phylink_speed_up(priv->phylink);
3960
3961         ret = stmmac_request_irq(dev);
3962         if (ret)
3963                 goto irq_error;
3964
3965         stmmac_enable_all_queues(priv);
3966         netif_tx_start_all_queues(priv->dev);
3967         stmmac_enable_all_dma_irq(priv);
3968
3969         return 0;
3970
3971 irq_error:
3972         phylink_stop(priv->phylink);
3973
3974         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3975                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3976
3977         stmmac_hw_teardown(dev);
3978 init_error:
3979         phylink_disconnect_phy(priv->phylink);
3980 init_phy_error:
3981         pm_runtime_put(priv->device);
3982         return ret;
3983 }
3984
3985 static int stmmac_open(struct net_device *dev)
3986 {
3987         struct stmmac_priv *priv = netdev_priv(dev);
3988         struct stmmac_dma_conf *dma_conf;
3989         int ret;
3990
3991         dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3992         if (IS_ERR(dma_conf))
3993                 return PTR_ERR(dma_conf);
3994
3995         ret = __stmmac_open(dev, dma_conf);
3996         if (ret)
3997                 free_dma_desc_resources(priv, dma_conf);
3998
3999         kfree(dma_conf);
4000         return ret;
4001 }
4002
4003 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4004 {
4005         set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4006
4007         if (priv->fpe_wq)
4008                 destroy_workqueue(priv->fpe_wq);
4009
4010         netdev_info(priv->dev, "FPE workqueue stop");
4011 }
4012
4013 /**
4014  *  stmmac_release - close entry point of the driver
4015  *  @dev : device pointer.
4016  *  Description:
4017  *  This is the stop entry point of the driver.
4018  */
4019 static int stmmac_release(struct net_device *dev)
4020 {
4021         struct stmmac_priv *priv = netdev_priv(dev);
4022         u32 chan;
4023
4024         if (device_may_wakeup(priv->device))
4025                 phylink_speed_down(priv->phylink, false);
4026         /* Stop and disconnect the PHY */
4027         phylink_stop(priv->phylink);
4028         phylink_disconnect_phy(priv->phylink);
4029
4030         stmmac_disable_all_queues(priv);
4031
4032         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4033                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4034
4035         netif_tx_disable(dev);
4036
4037         /* Free the IRQ lines */
4038         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4039
4040         if (priv->eee_enabled) {
4041                 priv->tx_path_in_lpi_mode = false;
4042                 del_timer_sync(&priv->eee_ctrl_timer);
4043         }
4044
4045         /* Stop TX/RX DMA and clear the descriptors */
4046         stmmac_stop_all_dma(priv);
4047
4048         /* Release and free the Rx/Tx resources */
4049         free_dma_desc_resources(priv, &priv->dma_conf);
4050
4051         /* Disable the MAC Rx/Tx */
4052         stmmac_mac_set(priv, priv->ioaddr, false);
4053
4054         /* Powerdown Serdes if there is */
4055         if (priv->plat->serdes_powerdown)
4056                 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4057
4058         netif_carrier_off(dev);
4059
4060         stmmac_release_ptp(priv);
4061
4062         pm_runtime_put(priv->device);
4063
4064         if (priv->dma_cap.fpesel)
4065                 stmmac_fpe_stop_wq(priv);
4066
4067         return 0;
4068 }
4069
4070 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4071                                struct stmmac_tx_queue *tx_q)
4072 {
4073         u16 tag = 0x0, inner_tag = 0x0;
4074         u32 inner_type = 0x0;
4075         struct dma_desc *p;
4076
4077         if (!priv->dma_cap.vlins)
4078                 return false;
4079         if (!skb_vlan_tag_present(skb))
4080                 return false;
4081         if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4082                 inner_tag = skb_vlan_tag_get(skb);
4083                 inner_type = STMMAC_VLAN_INSERT;
4084         }
4085
4086         tag = skb_vlan_tag_get(skb);
4087
4088         if (tx_q->tbs & STMMAC_TBS_AVAIL)
4089                 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4090         else
4091                 p = &tx_q->dma_tx[tx_q->cur_tx];
4092
4093         if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4094                 return false;
4095
4096         stmmac_set_tx_owner(priv, p);
4097         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4098         return true;
4099 }
4100
4101 /**
4102  *  stmmac_tso_allocator - close entry point of the driver
4103  *  @priv: driver private structure
4104  *  @des: buffer start address
4105  *  @total_len: total length to fill in descriptors
4106  *  @last_segment: condition for the last descriptor
4107  *  @queue: TX queue index
4108  *  Description:
4109  *  This function fills descriptor and request new descriptors according to
4110  *  buffer length to fill
4111  */
4112 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4113                                  int total_len, bool last_segment, u32 queue)
4114 {
4115         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4116         struct dma_desc *desc;
4117         u32 buff_size;
4118         int tmp_len;
4119
4120         tmp_len = total_len;
4121
4122         while (tmp_len > 0) {
4123                 dma_addr_t curr_addr;
4124
4125                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4126                                                 priv->dma_conf.dma_tx_size);
4127                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4128
4129                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4130                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4131                 else
4132                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4133
4134                 curr_addr = des + (total_len - tmp_len);
4135                 if (priv->dma_cap.addr64 <= 32)
4136                         desc->des0 = cpu_to_le32(curr_addr);
4137                 else
4138                         stmmac_set_desc_addr(priv, desc, curr_addr);
4139
4140                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4141                             TSO_MAX_BUFF_SIZE : tmp_len;
4142
4143                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4144                                 0, 1,
4145                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4146                                 0, 0);
4147
4148                 tmp_len -= TSO_MAX_BUFF_SIZE;
4149         }
4150 }
4151
4152 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4153 {
4154         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4155         int desc_size;
4156
4157         if (likely(priv->extend_desc))
4158                 desc_size = sizeof(struct dma_extended_desc);
4159         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4160                 desc_size = sizeof(struct dma_edesc);
4161         else
4162                 desc_size = sizeof(struct dma_desc);
4163
4164         /* The own bit must be the latest setting done when prepare the
4165          * descriptor and then barrier is needed to make sure that
4166          * all is coherent before granting the DMA engine.
4167          */
4168         wmb();
4169
4170         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4171         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4172 }
4173
4174 /**
4175  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4176  *  @skb : the socket buffer
4177  *  @dev : device pointer
4178  *  Description: this is the transmit function that is called on TSO frames
4179  *  (support available on GMAC4 and newer chips).
4180  *  Diagram below show the ring programming in case of TSO frames:
4181  *
4182  *  First Descriptor
4183  *   --------
4184  *   | DES0 |---> buffer1 = L2/L3/L4 header
4185  *   | DES1 |---> TCP Payload (can continue on next descr...)
4186  *   | DES2 |---> buffer 1 and 2 len
4187  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4188  *   --------
4189  *      |
4190  *     ...
4191  *      |
4192  *   --------
4193  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4194  *   | DES1 | --|
4195  *   | DES2 | --> buffer 1 and 2 len
4196  *   | DES3 |
4197  *   --------
4198  *
4199  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4200  */
4201 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4202 {
4203         struct dma_desc *desc, *first, *mss_desc = NULL;
4204         struct stmmac_priv *priv = netdev_priv(dev);
4205         int nfrags = skb_shinfo(skb)->nr_frags;
4206         u32 queue = skb_get_queue_mapping(skb);
4207         unsigned int first_entry, tx_packets;
4208         struct stmmac_txq_stats *txq_stats;
4209         int tmp_pay_len = 0, first_tx;
4210         struct stmmac_tx_queue *tx_q;
4211         bool has_vlan, set_ic;
4212         u8 proto_hdr_len, hdr;
4213         unsigned long flags;
4214         u32 pay_len, mss;
4215         dma_addr_t des;
4216         int i;
4217
4218         tx_q = &priv->dma_conf.tx_queue[queue];
4219         txq_stats = &priv->xstats.txq_stats[queue];
4220         first_tx = tx_q->cur_tx;
4221
4222         /* Compute header lengths */
4223         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4224                 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4225                 hdr = sizeof(struct udphdr);
4226         } else {
4227                 proto_hdr_len = skb_tcp_all_headers(skb);
4228                 hdr = tcp_hdrlen(skb);
4229         }
4230
4231         /* Desc availability based on threshold should be enough safe */
4232         if (unlikely(stmmac_tx_avail(priv, queue) <
4233                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4234                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4235                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4236                                                                 queue));
4237                         /* This is a hard error, log it. */
4238                         netdev_err(priv->dev,
4239                                    "%s: Tx Ring full when queue awake\n",
4240                                    __func__);
4241                 }
4242                 return NETDEV_TX_BUSY;
4243         }
4244
4245         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4246
4247         mss = skb_shinfo(skb)->gso_size;
4248
4249         /* set new MSS value if needed */
4250         if (mss != tx_q->mss) {
4251                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4252                         mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4253                 else
4254                         mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4255
4256                 stmmac_set_mss(priv, mss_desc, mss);
4257                 tx_q->mss = mss;
4258                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4259                                                 priv->dma_conf.dma_tx_size);
4260                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4261         }
4262
4263         if (netif_msg_tx_queued(priv)) {
4264                 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4265                         __func__, hdr, proto_hdr_len, pay_len, mss);
4266                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4267                         skb->data_len);
4268         }
4269
4270         /* Check if VLAN can be inserted by HW */
4271         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4272
4273         first_entry = tx_q->cur_tx;
4274         WARN_ON(tx_q->tx_skbuff[first_entry]);
4275
4276         if (tx_q->tbs & STMMAC_TBS_AVAIL)
4277                 desc = &tx_q->dma_entx[first_entry].basic;
4278         else
4279                 desc = &tx_q->dma_tx[first_entry];
4280         first = desc;
4281
4282         if (has_vlan)
4283                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4284
4285         /* first descriptor: fill Headers on Buf1 */
4286         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4287                              DMA_TO_DEVICE);
4288         if (dma_mapping_error(priv->device, des))
4289                 goto dma_map_err;
4290
4291         tx_q->tx_skbuff_dma[first_entry].buf = des;
4292         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4293         tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4294         tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4295
4296         if (priv->dma_cap.addr64 <= 32) {
4297                 first->des0 = cpu_to_le32(des);
4298
4299                 /* Fill start of payload in buff2 of first descriptor */
4300                 if (pay_len)
4301                         first->des1 = cpu_to_le32(des + proto_hdr_len);
4302
4303                 /* If needed take extra descriptors to fill the remaining payload */
4304                 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4305         } else {
4306                 stmmac_set_desc_addr(priv, first, des);
4307                 tmp_pay_len = pay_len;
4308                 des += proto_hdr_len;
4309                 pay_len = 0;
4310         }
4311
4312         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4313
4314         /* Prepare fragments */
4315         for (i = 0; i < nfrags; i++) {
4316                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4317
4318                 des = skb_frag_dma_map(priv->device, frag, 0,
4319                                        skb_frag_size(frag),
4320                                        DMA_TO_DEVICE);
4321                 if (dma_mapping_error(priv->device, des))
4322                         goto dma_map_err;
4323
4324                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4325                                      (i == nfrags - 1), queue);
4326
4327                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4328                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4329                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4330                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4331         }
4332
4333         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4334
4335         /* Only the last descriptor gets to point to the skb. */
4336         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4337         tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4338
4339         /* Manage tx mitigation */
4340         tx_packets = (tx_q->cur_tx + 1) - first_tx;
4341         tx_q->tx_count_frames += tx_packets;
4342
4343         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4344                 set_ic = true;
4345         else if (!priv->tx_coal_frames[queue])
4346                 set_ic = false;
4347         else if (tx_packets > priv->tx_coal_frames[queue])
4348                 set_ic = true;
4349         else if ((tx_q->tx_count_frames %
4350                   priv->tx_coal_frames[queue]) < tx_packets)
4351                 set_ic = true;
4352         else
4353                 set_ic = false;
4354
4355         if (set_ic) {
4356                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4357                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4358                 else
4359                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4360
4361                 tx_q->tx_count_frames = 0;
4362                 stmmac_set_tx_ic(priv, desc);
4363         }
4364
4365         /* We've used all descriptors we need for this skb, however,
4366          * advance cur_tx so that it references a fresh descriptor.
4367          * ndo_start_xmit will fill this descriptor the next time it's
4368          * called and stmmac_tx_clean may clean up to this descriptor.
4369          */
4370         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4371
4372         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4373                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4374                           __func__);
4375                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4376         }
4377
4378         flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4379         txq_stats->tx_bytes += skb->len;
4380         txq_stats->tx_tso_frames++;
4381         txq_stats->tx_tso_nfrags += nfrags;
4382         if (set_ic)
4383                 txq_stats->tx_set_ic_bit++;
4384         u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4385
4386         if (priv->sarc_type)
4387                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4388
4389         skb_tx_timestamp(skb);
4390
4391         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4392                      priv->hwts_tx_en)) {
4393                 /* declare that device is doing timestamping */
4394                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4395                 stmmac_enable_tx_timestamp(priv, first);
4396         }
4397
4398         /* Complete the first descriptor before granting the DMA */
4399         stmmac_prepare_tso_tx_desc(priv, first, 1,
4400                         proto_hdr_len,
4401                         pay_len,
4402                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4403                         hdr / 4, (skb->len - proto_hdr_len));
4404
4405         /* If context desc is used to change MSS */
4406         if (mss_desc) {
4407                 /* Make sure that first descriptor has been completely
4408                  * written, including its own bit. This is because MSS is
4409                  * actually before first descriptor, so we need to make
4410                  * sure that MSS's own bit is the last thing written.
4411                  */
4412                 dma_wmb();
4413                 stmmac_set_tx_owner(priv, mss_desc);
4414         }
4415
4416         if (netif_msg_pktdata(priv)) {
4417                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4418                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4419                         tx_q->cur_tx, first, nfrags);
4420                 pr_info(">>> frame to be transmitted: ");
4421                 print_pkt(skb->data, skb_headlen(skb));
4422         }
4423
4424         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4425
4426         stmmac_flush_tx_descriptors(priv, queue);
4427         stmmac_tx_timer_arm(priv, queue);
4428
4429         return NETDEV_TX_OK;
4430
4431 dma_map_err:
4432         dev_err(priv->device, "Tx dma map failed\n");
4433         dev_kfree_skb(skb);
4434         priv->xstats.tx_dropped++;
4435         return NETDEV_TX_OK;
4436 }
4437
4438 /**
4439  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4440  * @skb: socket buffer to check
4441  *
4442  * Check if a packet has an ethertype that will trigger the IP header checks
4443  * and IP/TCP checksum engine of the stmmac core.
4444  *
4445  * Return: true if the ethertype can trigger the checksum engine, false
4446  * otherwise
4447  */
4448 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4449 {
4450         int depth = 0;
4451         __be16 proto;
4452
4453         proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4454                                     &depth);
4455
4456         return (depth <= ETH_HLEN) &&
4457                 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4458 }
4459
4460 /**
4461  *  stmmac_xmit - Tx entry point of the driver
4462  *  @skb : the socket buffer
4463  *  @dev : device pointer
4464  *  Description : this is the tx entry point of the driver.
4465  *  It programs the chain or the ring and supports oversized frames
4466  *  and SG feature.
4467  */
4468 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4469 {
4470         unsigned int first_entry, tx_packets, enh_desc;
4471         struct stmmac_priv *priv = netdev_priv(dev);
4472         unsigned int nopaged_len = skb_headlen(skb);
4473         int i, csum_insertion = 0, is_jumbo = 0;
4474         u32 queue = skb_get_queue_mapping(skb);
4475         int nfrags = skb_shinfo(skb)->nr_frags;
4476         int gso = skb_shinfo(skb)->gso_type;
4477         struct stmmac_txq_stats *txq_stats;
4478         struct dma_edesc *tbs_desc = NULL;
4479         struct dma_desc *desc, *first;
4480         struct stmmac_tx_queue *tx_q;
4481         bool has_vlan, set_ic;
4482         int entry, first_tx;
4483         unsigned long flags;
4484         dma_addr_t des;
4485
4486         tx_q = &priv->dma_conf.tx_queue[queue];
4487         txq_stats = &priv->xstats.txq_stats[queue];
4488         first_tx = tx_q->cur_tx;
4489
4490         if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4491                 stmmac_disable_eee_mode(priv);
4492
4493         /* Manage oversized TCP frames for GMAC4 device */
4494         if (skb_is_gso(skb) && priv->tso) {
4495                 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4496                         return stmmac_tso_xmit(skb, dev);
4497                 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4498                         return stmmac_tso_xmit(skb, dev);
4499         }
4500
4501         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4502                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4503                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4504                                                                 queue));
4505                         /* This is a hard error, log it. */
4506                         netdev_err(priv->dev,
4507                                    "%s: Tx Ring full when queue awake\n",
4508                                    __func__);
4509                 }
4510                 return NETDEV_TX_BUSY;
4511         }
4512
4513         /* Check if VLAN can be inserted by HW */
4514         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4515
4516         entry = tx_q->cur_tx;
4517         first_entry = entry;
4518         WARN_ON(tx_q->tx_skbuff[first_entry]);
4519
4520         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4521         /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4522          * queues. In that case, checksum offloading for those queues that don't
4523          * support tx coe needs to fallback to software checksum calculation.
4524          *
4525          * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4526          * also have to be checksummed in software.
4527          */
4528         if (csum_insertion &&
4529             (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4530              !stmmac_has_ip_ethertype(skb))) {
4531                 if (unlikely(skb_checksum_help(skb)))
4532                         goto dma_map_err;
4533                 csum_insertion = !csum_insertion;
4534         }
4535
4536         if (likely(priv->extend_desc))
4537                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4538         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4539                 desc = &tx_q->dma_entx[entry].basic;
4540         else
4541                 desc = tx_q->dma_tx + entry;
4542
4543         first = desc;
4544
4545         if (has_vlan)
4546                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4547
4548         enh_desc = priv->plat->enh_desc;
4549         /* To program the descriptors according to the size of the frame */
4550         if (enh_desc)
4551                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4552
4553         if (unlikely(is_jumbo)) {
4554                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4555                 if (unlikely(entry < 0) && (entry != -EINVAL))
4556                         goto dma_map_err;
4557         }
4558
4559         for (i = 0; i < nfrags; i++) {
4560                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4561                 int len = skb_frag_size(frag);
4562                 bool last_segment = (i == (nfrags - 1));
4563
4564                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4565                 WARN_ON(tx_q->tx_skbuff[entry]);
4566
4567                 if (likely(priv->extend_desc))
4568                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4569                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4570                         desc = &tx_q->dma_entx[entry].basic;
4571                 else
4572                         desc = tx_q->dma_tx + entry;
4573
4574                 des = skb_frag_dma_map(priv->device, frag, 0, len,
4575                                        DMA_TO_DEVICE);
4576                 if (dma_mapping_error(priv->device, des))
4577                         goto dma_map_err; /* should reuse desc w/o issues */
4578
4579                 tx_q->tx_skbuff_dma[entry].buf = des;
4580
4581                 stmmac_set_desc_addr(priv, desc, des);
4582
4583                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4584                 tx_q->tx_skbuff_dma[entry].len = len;
4585                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4586                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4587
4588                 /* Prepare the descriptor and set the own bit too */
4589                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4590                                 priv->mode, 1, last_segment, skb->len);
4591         }
4592
4593         /* Only the last descriptor gets to point to the skb. */
4594         tx_q->tx_skbuff[entry] = skb;
4595         tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4596
4597         /* According to the coalesce parameter the IC bit for the latest
4598          * segment is reset and the timer re-started to clean the tx status.
4599          * This approach takes care about the fragments: desc is the first
4600          * element in case of no SG.
4601          */
4602         tx_packets = (entry + 1) - first_tx;
4603         tx_q->tx_count_frames += tx_packets;
4604
4605         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4606                 set_ic = true;
4607         else if (!priv->tx_coal_frames[queue])
4608                 set_ic = false;
4609         else if (tx_packets > priv->tx_coal_frames[queue])
4610                 set_ic = true;
4611         else if ((tx_q->tx_count_frames %
4612                   priv->tx_coal_frames[queue]) < tx_packets)
4613                 set_ic = true;
4614         else
4615                 set_ic = false;
4616
4617         if (set_ic) {
4618                 if (likely(priv->extend_desc))
4619                         desc = &tx_q->dma_etx[entry].basic;
4620                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4621                         desc = &tx_q->dma_entx[entry].basic;
4622                 else
4623                         desc = &tx_q->dma_tx[entry];
4624
4625                 tx_q->tx_count_frames = 0;
4626                 stmmac_set_tx_ic(priv, desc);
4627         }
4628
4629         /* We've used all descriptors we need for this skb, however,
4630          * advance cur_tx so that it references a fresh descriptor.
4631          * ndo_start_xmit will fill this descriptor the next time it's
4632          * called and stmmac_tx_clean may clean up to this descriptor.
4633          */
4634         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4635         tx_q->cur_tx = entry;
4636
4637         if (netif_msg_pktdata(priv)) {
4638                 netdev_dbg(priv->dev,
4639                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4640                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4641                            entry, first, nfrags);
4642
4643                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4644                 print_pkt(skb->data, skb->len);
4645         }
4646
4647         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4648                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4649                           __func__);
4650                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4651         }
4652
4653         flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4654         txq_stats->tx_bytes += skb->len;
4655         if (set_ic)
4656                 txq_stats->tx_set_ic_bit++;
4657         u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4658
4659         if (priv->sarc_type)
4660                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4661
4662         skb_tx_timestamp(skb);
4663
4664         /* Ready to fill the first descriptor and set the OWN bit w/o any
4665          * problems because all the descriptors are actually ready to be
4666          * passed to the DMA engine.
4667          */
4668         if (likely(!is_jumbo)) {
4669                 bool last_segment = (nfrags == 0);
4670
4671                 des = dma_map_single(priv->device, skb->data,
4672                                      nopaged_len, DMA_TO_DEVICE);
4673                 if (dma_mapping_error(priv->device, des))
4674                         goto dma_map_err;
4675
4676                 tx_q->tx_skbuff_dma[first_entry].buf = des;
4677                 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4678                 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4679
4680                 stmmac_set_desc_addr(priv, first, des);
4681
4682                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4683                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4684
4685                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4686                              priv->hwts_tx_en)) {
4687                         /* declare that device is doing timestamping */
4688                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4689                         stmmac_enable_tx_timestamp(priv, first);
4690                 }
4691
4692                 /* Prepare the first descriptor setting the OWN bit too */
4693                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4694                                 csum_insertion, priv->mode, 0, last_segment,
4695                                 skb->len);
4696         }
4697
4698         if (tx_q->tbs & STMMAC_TBS_EN) {
4699                 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4700
4701                 tbs_desc = &tx_q->dma_entx[first_entry];
4702                 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4703         }
4704
4705         stmmac_set_tx_owner(priv, first);
4706
4707         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4708
4709         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4710
4711         stmmac_flush_tx_descriptors(priv, queue);
4712         stmmac_tx_timer_arm(priv, queue);
4713
4714         return NETDEV_TX_OK;
4715
4716 dma_map_err:
4717         netdev_err(priv->dev, "Tx DMA map failed\n");
4718         dev_kfree_skb(skb);
4719         priv->xstats.tx_dropped++;
4720         return NETDEV_TX_OK;
4721 }
4722
4723 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4724 {
4725         struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4726         __be16 vlan_proto = veth->h_vlan_proto;
4727         u16 vlanid;
4728
4729         if ((vlan_proto == htons(ETH_P_8021Q) &&
4730              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4731             (vlan_proto == htons(ETH_P_8021AD) &&
4732              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4733                 /* pop the vlan tag */
4734                 vlanid = ntohs(veth->h_vlan_TCI);
4735                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4736                 skb_pull(skb, VLAN_HLEN);
4737                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4738         }
4739 }
4740
4741 /**
4742  * stmmac_rx_refill - refill used skb preallocated buffers
4743  * @priv: driver private structure
4744  * @queue: RX queue index
4745  * Description : this is to reallocate the skb for the reception process
4746  * that is based on zero-copy.
4747  */
4748 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4749 {
4750         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4751         int dirty = stmmac_rx_dirty(priv, queue);
4752         unsigned int entry = rx_q->dirty_rx;
4753         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4754
4755         if (priv->dma_cap.host_dma_width <= 32)
4756                 gfp |= GFP_DMA32;
4757
4758         while (dirty-- > 0) {
4759                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4760                 struct dma_desc *p;
4761                 bool use_rx_wd;
4762
4763                 if (priv->extend_desc)
4764                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
4765                 else
4766                         p = rx_q->dma_rx + entry;
4767
4768                 if (!buf->page) {
4769                         buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4770                         if (!buf->page)
4771                                 break;
4772                 }
4773
4774                 if (priv->sph && !buf->sec_page) {
4775                         buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4776                         if (!buf->sec_page)
4777                                 break;
4778
4779                         buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4780                 }
4781
4782                 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4783
4784                 stmmac_set_desc_addr(priv, p, buf->addr);
4785                 if (priv->sph)
4786                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4787                 else
4788                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4789                 stmmac_refill_desc3(priv, rx_q, p);
4790
4791                 rx_q->rx_count_frames++;
4792                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4793                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4794                         rx_q->rx_count_frames = 0;
4795
4796                 use_rx_wd = !priv->rx_coal_frames[queue];
4797                 use_rx_wd |= rx_q->rx_count_frames > 0;
4798                 if (!priv->use_riwt)
4799                         use_rx_wd = false;
4800
4801                 dma_wmb();
4802                 stmmac_set_rx_owner(priv, p, use_rx_wd);
4803
4804                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4805         }
4806         rx_q->dirty_rx = entry;
4807         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4808                             (rx_q->dirty_rx * sizeof(struct dma_desc));
4809         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4810 }
4811
4812 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4813                                        struct dma_desc *p,
4814                                        int status, unsigned int len)
4815 {
4816         unsigned int plen = 0, hlen = 0;
4817         int coe = priv->hw->rx_csum;
4818
4819         /* Not first descriptor, buffer is always zero */
4820         if (priv->sph && len)
4821                 return 0;
4822
4823         /* First descriptor, get split header length */
4824         stmmac_get_rx_header_len(priv, p, &hlen);
4825         if (priv->sph && hlen) {
4826                 priv->xstats.rx_split_hdr_pkt_n++;
4827                 return hlen;
4828         }
4829
4830         /* First descriptor, not last descriptor and not split header */
4831         if (status & rx_not_ls)
4832                 return priv->dma_conf.dma_buf_sz;
4833
4834         plen = stmmac_get_rx_frame_len(priv, p, coe);
4835
4836         /* First descriptor and last descriptor and not split header */
4837         return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4838 }
4839
4840 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4841                                        struct dma_desc *p,
4842                                        int status, unsigned int len)
4843 {
4844         int coe = priv->hw->rx_csum;
4845         unsigned int plen = 0;
4846
4847         /* Not split header, buffer is not available */
4848         if (!priv->sph)
4849                 return 0;
4850
4851         /* Not last descriptor */
4852         if (status & rx_not_ls)
4853                 return priv->dma_conf.dma_buf_sz;
4854
4855         plen = stmmac_get_rx_frame_len(priv, p, coe);
4856
4857         /* Last descriptor */
4858         return plen - len;
4859 }
4860
4861 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4862                                 struct xdp_frame *xdpf, bool dma_map)
4863 {
4864         struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4865         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4866         unsigned int entry = tx_q->cur_tx;
4867         struct dma_desc *tx_desc;
4868         dma_addr_t dma_addr;
4869         bool set_ic;
4870
4871         if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4872                 return STMMAC_XDP_CONSUMED;
4873
4874         if (likely(priv->extend_desc))
4875                 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4876         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4877                 tx_desc = &tx_q->dma_entx[entry].basic;
4878         else
4879                 tx_desc = tx_q->dma_tx + entry;
4880
4881         if (dma_map) {
4882                 dma_addr = dma_map_single(priv->device, xdpf->data,
4883                                           xdpf->len, DMA_TO_DEVICE);
4884                 if (dma_mapping_error(priv->device, dma_addr))
4885                         return STMMAC_XDP_CONSUMED;
4886
4887                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4888         } else {
4889                 struct page *page = virt_to_page(xdpf->data);
4890
4891                 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4892                            xdpf->headroom;
4893                 dma_sync_single_for_device(priv->device, dma_addr,
4894                                            xdpf->len, DMA_BIDIRECTIONAL);
4895
4896                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4897         }
4898
4899         tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4900         tx_q->tx_skbuff_dma[entry].map_as_page = false;
4901         tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4902         tx_q->tx_skbuff_dma[entry].last_segment = true;
4903         tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4904
4905         tx_q->xdpf[entry] = xdpf;
4906
4907         stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4908
4909         stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4910                                true, priv->mode, true, true,
4911                                xdpf->len);
4912
4913         tx_q->tx_count_frames++;
4914
4915         if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4916                 set_ic = true;
4917         else
4918                 set_ic = false;
4919
4920         if (set_ic) {
4921                 unsigned long flags;
4922                 tx_q->tx_count_frames = 0;
4923                 stmmac_set_tx_ic(priv, tx_desc);
4924                 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4925                 txq_stats->tx_set_ic_bit++;
4926                 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4927         }
4928
4929         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4930
4931         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4932         tx_q->cur_tx = entry;
4933
4934         return STMMAC_XDP_TX;
4935 }
4936
4937 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4938                                    int cpu)
4939 {
4940         int index = cpu;
4941
4942         if (unlikely(index < 0))
4943                 index = 0;
4944
4945         while (index >= priv->plat->tx_queues_to_use)
4946                 index -= priv->plat->tx_queues_to_use;
4947
4948         return index;
4949 }
4950
4951 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4952                                 struct xdp_buff *xdp)
4953 {
4954         struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4955         int cpu = smp_processor_id();
4956         struct netdev_queue *nq;
4957         int queue;
4958         int res;
4959
4960         if (unlikely(!xdpf))
4961                 return STMMAC_XDP_CONSUMED;
4962
4963         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4964         nq = netdev_get_tx_queue(priv->dev, queue);
4965
4966         __netif_tx_lock(nq, cpu);
4967         /* Avoids TX time-out as we are sharing with slow path */
4968         txq_trans_cond_update(nq);
4969
4970         res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4971         if (res == STMMAC_XDP_TX)
4972                 stmmac_flush_tx_descriptors(priv, queue);
4973
4974         __netif_tx_unlock(nq);
4975
4976         return res;
4977 }
4978
4979 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4980                                  struct bpf_prog *prog,
4981                                  struct xdp_buff *xdp)
4982 {
4983         u32 act;
4984         int res;
4985
4986         act = bpf_prog_run_xdp(prog, xdp);
4987         switch (act) {
4988         case XDP_PASS:
4989                 res = STMMAC_XDP_PASS;
4990                 break;
4991         case XDP_TX:
4992                 res = stmmac_xdp_xmit_back(priv, xdp);
4993                 break;
4994         case XDP_REDIRECT:
4995                 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4996                         res = STMMAC_XDP_CONSUMED;
4997                 else
4998                         res = STMMAC_XDP_REDIRECT;
4999                 break;
5000         default:
5001                 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5002                 fallthrough;
5003         case XDP_ABORTED:
5004                 trace_xdp_exception(priv->dev, prog, act);
5005                 fallthrough;
5006         case XDP_DROP:
5007                 res = STMMAC_XDP_CONSUMED;
5008                 break;
5009         }
5010
5011         return res;
5012 }
5013
5014 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5015                                            struct xdp_buff *xdp)
5016 {
5017         struct bpf_prog *prog;
5018         int res;
5019
5020         prog = READ_ONCE(priv->xdp_prog);
5021         if (!prog) {
5022                 res = STMMAC_XDP_PASS;
5023                 goto out;
5024         }
5025
5026         res = __stmmac_xdp_run_prog(priv, prog, xdp);
5027 out:
5028         return ERR_PTR(-res);
5029 }
5030
5031 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5032                                    int xdp_status)
5033 {
5034         int cpu = smp_processor_id();
5035         int queue;
5036
5037         queue = stmmac_xdp_get_tx_queue(priv, cpu);
5038
5039         if (xdp_status & STMMAC_XDP_TX)
5040                 stmmac_tx_timer_arm(priv, queue);
5041
5042         if (xdp_status & STMMAC_XDP_REDIRECT)
5043                 xdp_do_flush();
5044 }
5045
5046 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5047                                                struct xdp_buff *xdp)
5048 {
5049         unsigned int metasize = xdp->data - xdp->data_meta;
5050         unsigned int datasize = xdp->data_end - xdp->data;
5051         struct sk_buff *skb;
5052
5053         skb = __napi_alloc_skb(&ch->rxtx_napi,
5054                                xdp->data_end - xdp->data_hard_start,
5055                                GFP_ATOMIC | __GFP_NOWARN);
5056         if (unlikely(!skb))
5057                 return NULL;
5058
5059         skb_reserve(skb, xdp->data - xdp->data_hard_start);
5060         memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5061         if (metasize)
5062                 skb_metadata_set(skb, metasize);
5063
5064         return skb;
5065 }
5066
5067 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5068                                    struct dma_desc *p, struct dma_desc *np,
5069                                    struct xdp_buff *xdp)
5070 {
5071         struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5072         struct stmmac_channel *ch = &priv->channel[queue];
5073         unsigned int len = xdp->data_end - xdp->data;
5074         enum pkt_hash_types hash_type;
5075         int coe = priv->hw->rx_csum;
5076         unsigned long flags;
5077         struct sk_buff *skb;
5078         u32 hash;
5079
5080         skb = stmmac_construct_skb_zc(ch, xdp);
5081         if (!skb) {
5082                 priv->xstats.rx_dropped++;
5083                 return;
5084         }
5085
5086         stmmac_get_rx_hwtstamp(priv, p, np, skb);
5087         if (priv->hw->hw_vlan_en)
5088                 /* MAC level stripping. */
5089                 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5090         else
5091                 /* Driver level stripping. */
5092                 stmmac_rx_vlan(priv->dev, skb);
5093         skb->protocol = eth_type_trans(skb, priv->dev);
5094
5095         if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5096                 skb_checksum_none_assert(skb);
5097         else
5098                 skb->ip_summed = CHECKSUM_UNNECESSARY;
5099
5100         if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5101                 skb_set_hash(skb, hash, hash_type);
5102
5103         skb_record_rx_queue(skb, queue);
5104         napi_gro_receive(&ch->rxtx_napi, skb);
5105
5106         flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5107         rxq_stats->rx_pkt_n++;
5108         rxq_stats->rx_bytes += len;
5109         u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5110 }
5111
5112 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5113 {
5114         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5115         unsigned int entry = rx_q->dirty_rx;
5116         struct dma_desc *rx_desc = NULL;
5117         bool ret = true;
5118
5119         budget = min(budget, stmmac_rx_dirty(priv, queue));
5120
5121         while (budget-- > 0 && entry != rx_q->cur_rx) {
5122                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5123                 dma_addr_t dma_addr;
5124                 bool use_rx_wd;
5125
5126                 if (!buf->xdp) {
5127                         buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5128                         if (!buf->xdp) {
5129                                 ret = false;
5130                                 break;
5131                         }
5132                 }
5133
5134                 if (priv->extend_desc)
5135                         rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5136                 else
5137                         rx_desc = rx_q->dma_rx + entry;
5138
5139                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5140                 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5141                 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5142                 stmmac_refill_desc3(priv, rx_q, rx_desc);
5143
5144                 rx_q->rx_count_frames++;
5145                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5146                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5147                         rx_q->rx_count_frames = 0;
5148
5149                 use_rx_wd = !priv->rx_coal_frames[queue];
5150                 use_rx_wd |= rx_q->rx_count_frames > 0;
5151                 if (!priv->use_riwt)
5152                         use_rx_wd = false;
5153
5154                 dma_wmb();
5155                 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5156
5157                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5158         }
5159
5160         if (rx_desc) {
5161                 rx_q->dirty_rx = entry;
5162                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5163                                      (rx_q->dirty_rx * sizeof(struct dma_desc));
5164                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5165         }
5166
5167         return ret;
5168 }
5169
5170 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5171 {
5172         /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5173          * to represent incoming packet, whereas cb field in the same structure
5174          * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5175          * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5176          */
5177         return (struct stmmac_xdp_buff *)xdp;
5178 }
5179
5180 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5181 {
5182         struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5183         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5184         unsigned int count = 0, error = 0, len = 0;
5185         int dirty = stmmac_rx_dirty(priv, queue);
5186         unsigned int next_entry = rx_q->cur_rx;
5187         u32 rx_errors = 0, rx_dropped = 0;
5188         unsigned int desc_size;
5189         struct bpf_prog *prog;
5190         bool failure = false;
5191         unsigned long flags;
5192         int xdp_status = 0;
5193         int status = 0;
5194
5195         if (netif_msg_rx_status(priv)) {
5196                 void *rx_head;
5197
5198                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5199                 if (priv->extend_desc) {
5200                         rx_head = (void *)rx_q->dma_erx;
5201                         desc_size = sizeof(struct dma_extended_desc);
5202                 } else {
5203                         rx_head = (void *)rx_q->dma_rx;
5204                         desc_size = sizeof(struct dma_desc);
5205                 }
5206
5207                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5208                                     rx_q->dma_rx_phy, desc_size);
5209         }
5210         while (count < limit) {
5211                 struct stmmac_rx_buffer *buf;
5212                 struct stmmac_xdp_buff *ctx;
5213                 unsigned int buf1_len = 0;
5214                 struct dma_desc *np, *p;
5215                 int entry;
5216                 int res;
5217
5218                 if (!count && rx_q->state_saved) {
5219                         error = rx_q->state.error;
5220                         len = rx_q->state.len;
5221                 } else {
5222                         rx_q->state_saved = false;
5223                         error = 0;
5224                         len = 0;
5225                 }
5226
5227                 if (count >= limit)
5228                         break;
5229
5230 read_again:
5231                 buf1_len = 0;
5232                 entry = next_entry;
5233                 buf = &rx_q->buf_pool[entry];
5234
5235                 if (dirty >= STMMAC_RX_FILL_BATCH) {
5236                         failure = failure ||
5237                                   !stmmac_rx_refill_zc(priv, queue, dirty);
5238                         dirty = 0;
5239                 }
5240
5241                 if (priv->extend_desc)
5242                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5243                 else
5244                         p = rx_q->dma_rx + entry;
5245
5246                 /* read the status of the incoming frame */
5247                 status = stmmac_rx_status(priv, &priv->xstats, p);
5248                 /* check if managed by the DMA otherwise go ahead */
5249                 if (unlikely(status & dma_own))
5250                         break;
5251
5252                 /* Prefetch the next RX descriptor */
5253                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5254                                                 priv->dma_conf.dma_rx_size);
5255                 next_entry = rx_q->cur_rx;
5256
5257                 if (priv->extend_desc)
5258                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5259                 else
5260                         np = rx_q->dma_rx + next_entry;
5261
5262                 prefetch(np);
5263
5264                 /* Ensure a valid XSK buffer before proceed */
5265                 if (!buf->xdp)
5266                         break;
5267
5268                 if (priv->extend_desc)
5269                         stmmac_rx_extended_status(priv, &priv->xstats,
5270                                                   rx_q->dma_erx + entry);
5271                 if (unlikely(status == discard_frame)) {
5272                         xsk_buff_free(buf->xdp);
5273                         buf->xdp = NULL;
5274                         dirty++;
5275                         error = 1;
5276                         if (!priv->hwts_rx_en)
5277                                 rx_errors++;
5278                 }
5279
5280                 if (unlikely(error && (status & rx_not_ls)))
5281                         goto read_again;
5282                 if (unlikely(error)) {
5283                         count++;
5284                         continue;
5285                 }
5286
5287                 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5288                 if (likely(status & rx_not_ls)) {
5289                         xsk_buff_free(buf->xdp);
5290                         buf->xdp = NULL;
5291                         dirty++;
5292                         count++;
5293                         goto read_again;
5294                 }
5295
5296                 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5297                 ctx->priv = priv;
5298                 ctx->desc = p;
5299                 ctx->ndesc = np;
5300
5301                 /* XDP ZC Frame only support primary buffers for now */
5302                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5303                 len += buf1_len;
5304
5305                 /* ACS is disabled; strip manually. */
5306                 if (likely(!(status & rx_not_ls))) {
5307                         buf1_len -= ETH_FCS_LEN;
5308                         len -= ETH_FCS_LEN;
5309                 }
5310
5311                 /* RX buffer is good and fit into a XSK pool buffer */
5312                 buf->xdp->data_end = buf->xdp->data + buf1_len;
5313                 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5314
5315                 prog = READ_ONCE(priv->xdp_prog);
5316                 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5317
5318                 switch (res) {
5319                 case STMMAC_XDP_PASS:
5320                         stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5321                         xsk_buff_free(buf->xdp);
5322                         break;
5323                 case STMMAC_XDP_CONSUMED:
5324                         xsk_buff_free(buf->xdp);
5325                         rx_dropped++;
5326                         break;
5327                 case STMMAC_XDP_TX:
5328                 case STMMAC_XDP_REDIRECT:
5329                         xdp_status |= res;
5330                         break;
5331                 }
5332
5333                 buf->xdp = NULL;
5334                 dirty++;
5335                 count++;
5336         }
5337
5338         if (status & rx_not_ls) {
5339                 rx_q->state_saved = true;
5340                 rx_q->state.error = error;
5341                 rx_q->state.len = len;
5342         }
5343
5344         stmmac_finalize_xdp_rx(priv, xdp_status);
5345
5346         flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5347         rxq_stats->rx_pkt_n += count;
5348         u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5349
5350         priv->xstats.rx_dropped += rx_dropped;
5351         priv->xstats.rx_errors += rx_errors;
5352
5353         if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5354                 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5355                         xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5356                 else
5357                         xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5358
5359                 return (int)count;
5360         }
5361
5362         return failure ? limit : (int)count;
5363 }
5364
5365 /**
5366  * stmmac_rx - manage the receive process
5367  * @priv: driver private structure
5368  * @limit: napi bugget
5369  * @queue: RX queue index.
5370  * Description :  this the function called by the napi poll method.
5371  * It gets all the frames inside the ring.
5372  */
5373 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5374 {
5375         u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5376         struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5377         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5378         struct stmmac_channel *ch = &priv->channel[queue];
5379         unsigned int count = 0, error = 0, len = 0;
5380         int status = 0, coe = priv->hw->rx_csum;
5381         unsigned int next_entry = rx_q->cur_rx;
5382         enum dma_data_direction dma_dir;
5383         unsigned int desc_size;
5384         struct sk_buff *skb = NULL;
5385         struct stmmac_xdp_buff ctx;
5386         unsigned long flags;
5387         int xdp_status = 0;
5388         int buf_sz;
5389
5390         dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5391         buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5392         limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5393
5394         if (netif_msg_rx_status(priv)) {
5395                 void *rx_head;
5396
5397                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5398                 if (priv->extend_desc) {
5399                         rx_head = (void *)rx_q->dma_erx;
5400                         desc_size = sizeof(struct dma_extended_desc);
5401                 } else {
5402                         rx_head = (void *)rx_q->dma_rx;
5403                         desc_size = sizeof(struct dma_desc);
5404                 }
5405
5406                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5407                                     rx_q->dma_rx_phy, desc_size);
5408         }
5409         while (count < limit) {
5410                 unsigned int buf1_len = 0, buf2_len = 0;
5411                 enum pkt_hash_types hash_type;
5412                 struct stmmac_rx_buffer *buf;
5413                 struct dma_desc *np, *p;
5414                 int entry;
5415                 u32 hash;
5416
5417                 if (!count && rx_q->state_saved) {
5418                         skb = rx_q->state.skb;
5419                         error = rx_q->state.error;
5420                         len = rx_q->state.len;
5421                 } else {
5422                         rx_q->state_saved = false;
5423                         skb = NULL;
5424                         error = 0;
5425                         len = 0;
5426                 }
5427
5428 read_again:
5429                 if (count >= limit)
5430                         break;
5431
5432                 buf1_len = 0;
5433                 buf2_len = 0;
5434                 entry = next_entry;
5435                 buf = &rx_q->buf_pool[entry];
5436
5437                 if (priv->extend_desc)
5438                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5439                 else
5440                         p = rx_q->dma_rx + entry;
5441
5442                 /* read the status of the incoming frame */
5443                 status = stmmac_rx_status(priv, &priv->xstats, p);
5444                 /* check if managed by the DMA otherwise go ahead */
5445                 if (unlikely(status & dma_own))
5446                         break;
5447
5448                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5449                                                 priv->dma_conf.dma_rx_size);
5450                 next_entry = rx_q->cur_rx;
5451
5452                 if (priv->extend_desc)
5453                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5454                 else
5455                         np = rx_q->dma_rx + next_entry;
5456
5457                 prefetch(np);
5458
5459                 if (priv->extend_desc)
5460                         stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5461                 if (unlikely(status == discard_frame)) {
5462                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5463                         buf->page = NULL;
5464                         error = 1;
5465                         if (!priv->hwts_rx_en)
5466                                 rx_errors++;
5467                 }
5468
5469                 if (unlikely(error && (status & rx_not_ls)))
5470                         goto read_again;
5471                 if (unlikely(error)) {
5472                         dev_kfree_skb(skb);
5473                         skb = NULL;
5474                         count++;
5475                         continue;
5476                 }
5477
5478                 /* Buffer is good. Go on. */
5479
5480                 prefetch(page_address(buf->page) + buf->page_offset);
5481                 if (buf->sec_page)
5482                         prefetch(page_address(buf->sec_page));
5483
5484                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5485                 len += buf1_len;
5486                 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5487                 len += buf2_len;
5488
5489                 /* ACS is disabled; strip manually. */
5490                 if (likely(!(status & rx_not_ls))) {
5491                         if (buf2_len) {
5492                                 buf2_len -= ETH_FCS_LEN;
5493                                 len -= ETH_FCS_LEN;
5494                         } else if (buf1_len) {
5495                                 buf1_len -= ETH_FCS_LEN;
5496                                 len -= ETH_FCS_LEN;
5497                         }
5498                 }
5499
5500                 if (!skb) {
5501                         unsigned int pre_len, sync_len;
5502
5503                         dma_sync_single_for_cpu(priv->device, buf->addr,
5504                                                 buf1_len, dma_dir);
5505
5506                         xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5507                         xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5508                                          buf->page_offset, buf1_len, true);
5509
5510                         pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5511                                   buf->page_offset;
5512
5513                         ctx.priv = priv;
5514                         ctx.desc = p;
5515                         ctx.ndesc = np;
5516
5517                         skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5518                         /* Due xdp_adjust_tail: DMA sync for_device
5519                          * cover max len CPU touch
5520                          */
5521                         sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5522                                    buf->page_offset;
5523                         sync_len = max(sync_len, pre_len);
5524
5525                         /* For Not XDP_PASS verdict */
5526                         if (IS_ERR(skb)) {
5527                                 unsigned int xdp_res = -PTR_ERR(skb);
5528
5529                                 if (xdp_res & STMMAC_XDP_CONSUMED) {
5530                                         page_pool_put_page(rx_q->page_pool,
5531                                                            virt_to_head_page(ctx.xdp.data),
5532                                                            sync_len, true);
5533                                         buf->page = NULL;
5534                                         rx_dropped++;
5535
5536                                         /* Clear skb as it was set as
5537                                          * status by XDP program.
5538                                          */
5539                                         skb = NULL;
5540
5541                                         if (unlikely((status & rx_not_ls)))
5542                                                 goto read_again;
5543
5544                                         count++;
5545                                         continue;
5546                                 } else if (xdp_res & (STMMAC_XDP_TX |
5547                                                       STMMAC_XDP_REDIRECT)) {
5548                                         xdp_status |= xdp_res;
5549                                         buf->page = NULL;
5550                                         skb = NULL;
5551                                         count++;
5552                                         continue;
5553                                 }
5554                         }
5555                 }
5556
5557                 if (!skb) {
5558                         /* XDP program may expand or reduce tail */
5559                         buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5560
5561                         skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5562                         if (!skb) {
5563                                 rx_dropped++;
5564                                 count++;
5565                                 goto drain_data;
5566                         }
5567
5568                         /* XDP program may adjust header */
5569                         skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5570                         skb_put(skb, buf1_len);
5571
5572                         /* Data payload copied into SKB, page ready for recycle */
5573                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5574                         buf->page = NULL;
5575                 } else if (buf1_len) {
5576                         dma_sync_single_for_cpu(priv->device, buf->addr,
5577                                                 buf1_len, dma_dir);
5578                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5579                                         buf->page, buf->page_offset, buf1_len,
5580                                         priv->dma_conf.dma_buf_sz);
5581
5582                         /* Data payload appended into SKB */
5583                         skb_mark_for_recycle(skb);
5584                         buf->page = NULL;
5585                 }
5586
5587                 if (buf2_len) {
5588                         dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5589                                                 buf2_len, dma_dir);
5590                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5591                                         buf->sec_page, 0, buf2_len,
5592                                         priv->dma_conf.dma_buf_sz);
5593
5594                         /* Data payload appended into SKB */
5595                         skb_mark_for_recycle(skb);
5596                         buf->sec_page = NULL;
5597                 }
5598
5599 drain_data:
5600                 if (likely(status & rx_not_ls))
5601                         goto read_again;
5602                 if (!skb)
5603                         continue;
5604
5605                 /* Got entire packet into SKB. Finish it. */
5606
5607                 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5608
5609                 if (priv->hw->hw_vlan_en)
5610                         /* MAC level stripping. */
5611                         stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5612                 else
5613                         /* Driver level stripping. */
5614                         stmmac_rx_vlan(priv->dev, skb);
5615
5616                 skb->protocol = eth_type_trans(skb, priv->dev);
5617
5618                 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5619                         skb_checksum_none_assert(skb);
5620                 else
5621                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5622
5623                 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5624                         skb_set_hash(skb, hash, hash_type);
5625
5626                 skb_record_rx_queue(skb, queue);
5627                 napi_gro_receive(&ch->rx_napi, skb);
5628                 skb = NULL;
5629
5630                 rx_packets++;
5631                 rx_bytes += len;
5632                 count++;
5633         }
5634
5635         if (status & rx_not_ls || skb) {
5636                 rx_q->state_saved = true;
5637                 rx_q->state.skb = skb;
5638                 rx_q->state.error = error;
5639                 rx_q->state.len = len;
5640         }
5641
5642         stmmac_finalize_xdp_rx(priv, xdp_status);
5643
5644         stmmac_rx_refill(priv, queue);
5645
5646         flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5647         rxq_stats->rx_packets += rx_packets;
5648         rxq_stats->rx_bytes += rx_bytes;
5649         rxq_stats->rx_pkt_n += count;
5650         u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5651
5652         priv->xstats.rx_dropped += rx_dropped;
5653         priv->xstats.rx_errors += rx_errors;
5654
5655         return count;
5656 }
5657
5658 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5659 {
5660         struct stmmac_channel *ch =
5661                 container_of(napi, struct stmmac_channel, rx_napi);
5662         struct stmmac_priv *priv = ch->priv_data;
5663         struct stmmac_rxq_stats *rxq_stats;
5664         u32 chan = ch->index;
5665         unsigned long flags;
5666         int work_done;
5667
5668         rxq_stats = &priv->xstats.rxq_stats[chan];
5669         flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5670         rxq_stats->napi_poll++;
5671         u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5672
5673         work_done = stmmac_rx(priv, budget, chan);
5674         if (work_done < budget && napi_complete_done(napi, work_done)) {
5675                 unsigned long flags;
5676
5677                 spin_lock_irqsave(&ch->lock, flags);
5678                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5679                 spin_unlock_irqrestore(&ch->lock, flags);
5680         }
5681
5682         return work_done;
5683 }
5684
5685 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5686 {
5687         struct stmmac_channel *ch =
5688                 container_of(napi, struct stmmac_channel, tx_napi);
5689         struct stmmac_priv *priv = ch->priv_data;
5690         struct stmmac_txq_stats *txq_stats;
5691         bool pending_packets = false;
5692         u32 chan = ch->index;
5693         unsigned long flags;
5694         int work_done;
5695
5696         txq_stats = &priv->xstats.txq_stats[chan];
5697         flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5698         txq_stats->napi_poll++;
5699         u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5700
5701         work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5702         work_done = min(work_done, budget);
5703
5704         if (work_done < budget && napi_complete_done(napi, work_done)) {
5705                 unsigned long flags;
5706
5707                 spin_lock_irqsave(&ch->lock, flags);
5708                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5709                 spin_unlock_irqrestore(&ch->lock, flags);
5710         }
5711
5712         /* TX still have packet to handle, check if we need to arm tx timer */
5713         if (pending_packets)
5714                 stmmac_tx_timer_arm(priv, chan);
5715
5716         return work_done;
5717 }
5718
5719 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5720 {
5721         struct stmmac_channel *ch =
5722                 container_of(napi, struct stmmac_channel, rxtx_napi);
5723         struct stmmac_priv *priv = ch->priv_data;
5724         bool tx_pending_packets = false;
5725         int rx_done, tx_done, rxtx_done;
5726         struct stmmac_rxq_stats *rxq_stats;
5727         struct stmmac_txq_stats *txq_stats;
5728         u32 chan = ch->index;
5729         unsigned long flags;
5730
5731         rxq_stats = &priv->xstats.rxq_stats[chan];
5732         flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5733         rxq_stats->napi_poll++;
5734         u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5735
5736         txq_stats = &priv->xstats.txq_stats[chan];
5737         flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5738         txq_stats->napi_poll++;
5739         u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5740
5741         tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5742         tx_done = min(tx_done, budget);
5743
5744         rx_done = stmmac_rx_zc(priv, budget, chan);
5745
5746         rxtx_done = max(tx_done, rx_done);
5747
5748         /* If either TX or RX work is not complete, return budget
5749          * and keep pooling
5750          */
5751         if (rxtx_done >= budget)
5752                 return budget;
5753
5754         /* all work done, exit the polling mode */
5755         if (napi_complete_done(napi, rxtx_done)) {
5756                 unsigned long flags;
5757
5758                 spin_lock_irqsave(&ch->lock, flags);
5759                 /* Both RX and TX work done are compelte,
5760                  * so enable both RX & TX IRQs.
5761                  */
5762                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5763                 spin_unlock_irqrestore(&ch->lock, flags);
5764         }
5765
5766         /* TX still have packet to handle, check if we need to arm tx timer */
5767         if (tx_pending_packets)
5768                 stmmac_tx_timer_arm(priv, chan);
5769
5770         return min(rxtx_done, budget - 1);
5771 }
5772
5773 /**
5774  *  stmmac_tx_timeout
5775  *  @dev : Pointer to net device structure
5776  *  @txqueue: the index of the hanging transmit queue
5777  *  Description: this function is called when a packet transmission fails to
5778  *   complete within a reasonable time. The driver will mark the error in the
5779  *   netdev structure and arrange for the device to be reset to a sane state
5780  *   in order to transmit a new packet.
5781  */
5782 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5783 {
5784         struct stmmac_priv *priv = netdev_priv(dev);
5785
5786         stmmac_global_err(priv);
5787 }
5788
5789 /**
5790  *  stmmac_set_rx_mode - entry point for multicast addressing
5791  *  @dev : pointer to the device structure
5792  *  Description:
5793  *  This function is a driver entry point which gets called by the kernel
5794  *  whenever multicast addresses must be enabled/disabled.
5795  *  Return value:
5796  *  void.
5797  */
5798 static void stmmac_set_rx_mode(struct net_device *dev)
5799 {
5800         struct stmmac_priv *priv = netdev_priv(dev);
5801
5802         stmmac_set_filter(priv, priv->hw, dev);
5803 }
5804
5805 /**
5806  *  stmmac_change_mtu - entry point to change MTU size for the device.
5807  *  @dev : device pointer.
5808  *  @new_mtu : the new MTU size for the device.
5809  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5810  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5811  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5812  *  Return value:
5813  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5814  *  file on failure.
5815  */
5816 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5817 {
5818         struct stmmac_priv *priv = netdev_priv(dev);
5819         int txfifosz = priv->plat->tx_fifo_size;
5820         struct stmmac_dma_conf *dma_conf;
5821         const int mtu = new_mtu;
5822         int ret;
5823
5824         if (txfifosz == 0)
5825                 txfifosz = priv->dma_cap.tx_fifo_size;
5826
5827         txfifosz /= priv->plat->tx_queues_to_use;
5828
5829         if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5830                 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5831                 return -EINVAL;
5832         }
5833
5834         new_mtu = STMMAC_ALIGN(new_mtu);
5835
5836         /* If condition true, FIFO is too small or MTU too large */
5837         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5838                 return -EINVAL;
5839
5840         if (netif_running(dev)) {
5841                 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5842                 /* Try to allocate the new DMA conf with the new mtu */
5843                 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5844                 if (IS_ERR(dma_conf)) {
5845                         netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5846                                    mtu);
5847                         return PTR_ERR(dma_conf);
5848                 }
5849
5850                 stmmac_release(dev);
5851
5852                 ret = __stmmac_open(dev, dma_conf);
5853                 if (ret) {
5854                         free_dma_desc_resources(priv, dma_conf);
5855                         kfree(dma_conf);
5856                         netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5857                         return ret;
5858                 }
5859
5860                 kfree(dma_conf);
5861
5862                 stmmac_set_rx_mode(dev);
5863         }
5864
5865         dev->mtu = mtu;
5866         netdev_update_features(dev);
5867
5868         return 0;
5869 }
5870
5871 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5872                                              netdev_features_t features)
5873 {
5874         struct stmmac_priv *priv = netdev_priv(dev);
5875
5876         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5877                 features &= ~NETIF_F_RXCSUM;
5878
5879         if (!priv->plat->tx_coe)
5880                 features &= ~NETIF_F_CSUM_MASK;
5881
5882         /* Some GMAC devices have a bugged Jumbo frame support that
5883          * needs to have the Tx COE disabled for oversized frames
5884          * (due to limited buffer sizes). In this case we disable
5885          * the TX csum insertion in the TDES and not use SF.
5886          */
5887         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5888                 features &= ~NETIF_F_CSUM_MASK;
5889
5890         /* Disable tso if asked by ethtool */
5891         if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5892                 if (features & NETIF_F_TSO)
5893                         priv->tso = true;
5894                 else
5895                         priv->tso = false;
5896         }
5897
5898         return features;
5899 }
5900
5901 static int stmmac_set_features(struct net_device *netdev,
5902                                netdev_features_t features)
5903 {
5904         struct stmmac_priv *priv = netdev_priv(netdev);
5905
5906         /* Keep the COE Type in case of csum is supporting */
5907         if (features & NETIF_F_RXCSUM)
5908                 priv->hw->rx_csum = priv->plat->rx_coe;
5909         else
5910                 priv->hw->rx_csum = 0;
5911         /* No check needed because rx_coe has been set before and it will be
5912          * fixed in case of issue.
5913          */
5914         stmmac_rx_ipc(priv, priv->hw);
5915
5916         if (priv->sph_cap) {
5917                 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5918                 u32 chan;
5919
5920                 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5921                         stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5922         }
5923
5924         if (features & NETIF_F_HW_VLAN_CTAG_RX)
5925                 priv->hw->hw_vlan_en = true;
5926         else
5927                 priv->hw->hw_vlan_en = false;
5928
5929         stmmac_set_hw_vlan_mode(priv, priv->hw);
5930
5931         return 0;
5932 }
5933
5934 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5935 {
5936         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5937         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5938         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5939         bool *hs_enable = &fpe_cfg->hs_enable;
5940
5941         if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5942                 return;
5943
5944         /* If LP has sent verify mPacket, LP is FPE capable */
5945         if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5946                 if (*lp_state < FPE_STATE_CAPABLE)
5947                         *lp_state = FPE_STATE_CAPABLE;
5948
5949                 /* If user has requested FPE enable, quickly response */
5950                 if (*hs_enable)
5951                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5952                                                 fpe_cfg,
5953                                                 MPACKET_RESPONSE);
5954         }
5955
5956         /* If Local has sent verify mPacket, Local is FPE capable */
5957         if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5958                 if (*lo_state < FPE_STATE_CAPABLE)
5959                         *lo_state = FPE_STATE_CAPABLE;
5960         }
5961
5962         /* If LP has sent response mPacket, LP is entering FPE ON */
5963         if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5964                 *lp_state = FPE_STATE_ENTERING_ON;
5965
5966         /* If Local has sent response mPacket, Local is entering FPE ON */
5967         if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5968                 *lo_state = FPE_STATE_ENTERING_ON;
5969
5970         if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5971             !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5972             priv->fpe_wq) {
5973                 queue_work(priv->fpe_wq, &priv->fpe_task);
5974         }
5975 }
5976
5977 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5978 {
5979         u32 rx_cnt = priv->plat->rx_queues_to_use;
5980         u32 tx_cnt = priv->plat->tx_queues_to_use;
5981         u32 queues_count;
5982         u32 queue;
5983         bool xmac;
5984
5985         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5986         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5987
5988         if (priv->irq_wake)
5989                 pm_wakeup_event(priv->device, 0);
5990
5991         if (priv->dma_cap.estsel)
5992                 stmmac_est_irq_status(priv, priv, priv->dev,
5993                                       &priv->xstats, tx_cnt);
5994
5995         if (priv->dma_cap.fpesel) {
5996                 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5997                                                    priv->dev);
5998
5999                 stmmac_fpe_event_status(priv, status);
6000         }
6001
6002         /* To handle GMAC own interrupts */
6003         if ((priv->plat->has_gmac) || xmac) {
6004                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6005
6006                 if (unlikely(status)) {
6007                         /* For LPI we need to save the tx status */
6008                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6009                                 priv->tx_path_in_lpi_mode = true;
6010                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6011                                 priv->tx_path_in_lpi_mode = false;
6012                 }
6013
6014                 for (queue = 0; queue < queues_count; queue++) {
6015                         status = stmmac_host_mtl_irq_status(priv, priv->hw,
6016                                                             queue);
6017                 }
6018
6019                 /* PCS link status */
6020                 if (priv->hw->pcs &&
6021                     !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6022                         if (priv->xstats.pcs_link)
6023                                 netif_carrier_on(priv->dev);
6024                         else
6025                                 netif_carrier_off(priv->dev);
6026                 }
6027
6028                 stmmac_timestamp_interrupt(priv, priv);
6029         }
6030 }
6031
6032 /**
6033  *  stmmac_interrupt - main ISR
6034  *  @irq: interrupt number.
6035  *  @dev_id: to pass the net device pointer.
6036  *  Description: this is the main driver interrupt service routine.
6037  *  It can call:
6038  *  o DMA service routine (to manage incoming frame reception and transmission
6039  *    status)
6040  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6041  *    interrupts.
6042  */
6043 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6044 {
6045         struct net_device *dev = (struct net_device *)dev_id;
6046         struct stmmac_priv *priv = netdev_priv(dev);
6047
6048         /* Check if adapter is up */
6049         if (test_bit(STMMAC_DOWN, &priv->state))
6050                 return IRQ_HANDLED;
6051
6052         /* Check if a fatal error happened */
6053         if (stmmac_safety_feat_interrupt(priv))
6054                 return IRQ_HANDLED;
6055
6056         /* To handle Common interrupts */
6057         stmmac_common_interrupt(priv);
6058
6059         /* To handle DMA interrupts */
6060         stmmac_dma_interrupt(priv);
6061
6062         return IRQ_HANDLED;
6063 }
6064
6065 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6066 {
6067         struct net_device *dev = (struct net_device *)dev_id;
6068         struct stmmac_priv *priv = netdev_priv(dev);
6069
6070         if (unlikely(!dev)) {
6071                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6072                 return IRQ_NONE;
6073         }
6074
6075         /* Check if adapter is up */
6076         if (test_bit(STMMAC_DOWN, &priv->state))
6077                 return IRQ_HANDLED;
6078
6079         /* To handle Common interrupts */
6080         stmmac_common_interrupt(priv);
6081
6082         return IRQ_HANDLED;
6083 }
6084
6085 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6086 {
6087         struct net_device *dev = (struct net_device *)dev_id;
6088         struct stmmac_priv *priv = netdev_priv(dev);
6089
6090         if (unlikely(!dev)) {
6091                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6092                 return IRQ_NONE;
6093         }
6094
6095         /* Check if adapter is up */
6096         if (test_bit(STMMAC_DOWN, &priv->state))
6097                 return IRQ_HANDLED;
6098
6099         /* Check if a fatal error happened */
6100         stmmac_safety_feat_interrupt(priv);
6101
6102         return IRQ_HANDLED;
6103 }
6104
6105 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6106 {
6107         struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6108         struct stmmac_dma_conf *dma_conf;
6109         int chan = tx_q->queue_index;
6110         struct stmmac_priv *priv;
6111         int status;
6112
6113         dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6114         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6115
6116         if (unlikely(!data)) {
6117                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6118                 return IRQ_NONE;
6119         }
6120
6121         /* Check if adapter is up */
6122         if (test_bit(STMMAC_DOWN, &priv->state))
6123                 return IRQ_HANDLED;
6124
6125         status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6126
6127         if (unlikely(status & tx_hard_error_bump_tc)) {
6128                 /* Try to bump up the dma threshold on this failure */
6129                 stmmac_bump_dma_threshold(priv, chan);
6130         } else if (unlikely(status == tx_hard_error)) {
6131                 stmmac_tx_err(priv, chan);
6132         }
6133
6134         return IRQ_HANDLED;
6135 }
6136
6137 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6138 {
6139         struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6140         struct stmmac_dma_conf *dma_conf;
6141         int chan = rx_q->queue_index;
6142         struct stmmac_priv *priv;
6143
6144         dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6145         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6146
6147         if (unlikely(!data)) {
6148                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6149                 return IRQ_NONE;
6150         }
6151
6152         /* Check if adapter is up */
6153         if (test_bit(STMMAC_DOWN, &priv->state))
6154                 return IRQ_HANDLED;
6155
6156         stmmac_napi_check(priv, chan, DMA_DIR_RX);
6157
6158         return IRQ_HANDLED;
6159 }
6160
6161 /**
6162  *  stmmac_ioctl - Entry point for the Ioctl
6163  *  @dev: Device pointer.
6164  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6165  *  a proprietary structure used to pass information to the driver.
6166  *  @cmd: IOCTL command
6167  *  Description:
6168  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6169  */
6170 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6171 {
6172         struct stmmac_priv *priv = netdev_priv (dev);
6173         int ret = -EOPNOTSUPP;
6174
6175         if (!netif_running(dev))
6176                 return -EINVAL;
6177
6178         switch (cmd) {
6179         case SIOCGMIIPHY:
6180         case SIOCGMIIREG:
6181         case SIOCSMIIREG:
6182                 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6183                 break;
6184         case SIOCSHWTSTAMP:
6185                 ret = stmmac_hwtstamp_set(dev, rq);
6186                 break;
6187         case SIOCGHWTSTAMP:
6188                 ret = stmmac_hwtstamp_get(dev, rq);
6189                 break;
6190         default:
6191                 break;
6192         }
6193
6194         return ret;
6195 }
6196
6197 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6198                                     void *cb_priv)
6199 {
6200         struct stmmac_priv *priv = cb_priv;
6201         int ret = -EOPNOTSUPP;
6202
6203         if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6204                 return ret;
6205
6206         __stmmac_disable_all_queues(priv);
6207
6208         switch (type) {
6209         case TC_SETUP_CLSU32:
6210                 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6211                 break;
6212         case TC_SETUP_CLSFLOWER:
6213                 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6214                 break;
6215         default:
6216                 break;
6217         }
6218
6219         stmmac_enable_all_queues(priv);
6220         return ret;
6221 }
6222
6223 static LIST_HEAD(stmmac_block_cb_list);
6224
6225 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6226                            void *type_data)
6227 {
6228         struct stmmac_priv *priv = netdev_priv(ndev);
6229
6230         switch (type) {
6231         case TC_QUERY_CAPS:
6232                 return stmmac_tc_query_caps(priv, priv, type_data);
6233         case TC_SETUP_BLOCK:
6234                 return flow_block_cb_setup_simple(type_data,
6235                                                   &stmmac_block_cb_list,
6236                                                   stmmac_setup_tc_block_cb,
6237                                                   priv, priv, true);
6238         case TC_SETUP_QDISC_CBS:
6239                 return stmmac_tc_setup_cbs(priv, priv, type_data);
6240         case TC_SETUP_QDISC_TAPRIO:
6241                 return stmmac_tc_setup_taprio(priv, priv, type_data);
6242         case TC_SETUP_QDISC_ETF:
6243                 return stmmac_tc_setup_etf(priv, priv, type_data);
6244         default:
6245                 return -EOPNOTSUPP;
6246         }
6247 }
6248
6249 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6250                                struct net_device *sb_dev)
6251 {
6252         int gso = skb_shinfo(skb)->gso_type;
6253
6254         if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6255                 /*
6256                  * There is no way to determine the number of TSO/USO
6257                  * capable Queues. Let's use always the Queue 0
6258                  * because if TSO/USO is supported then at least this
6259                  * one will be capable.
6260                  */
6261                 return 0;
6262         }
6263
6264         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6265 }
6266
6267 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6268 {
6269         struct stmmac_priv *priv = netdev_priv(ndev);
6270         int ret = 0;
6271
6272         ret = pm_runtime_resume_and_get(priv->device);
6273         if (ret < 0)
6274                 return ret;
6275
6276         ret = eth_mac_addr(ndev, addr);
6277         if (ret)
6278                 goto set_mac_error;
6279
6280         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6281
6282 set_mac_error:
6283         pm_runtime_put(priv->device);
6284
6285         return ret;
6286 }
6287
6288 #ifdef CONFIG_DEBUG_FS
6289 static struct dentry *stmmac_fs_dir;
6290
6291 static void sysfs_display_ring(void *head, int size, int extend_desc,
6292                                struct seq_file *seq, dma_addr_t dma_phy_addr)
6293 {
6294         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6295         struct dma_desc *p = (struct dma_desc *)head;
6296         unsigned int desc_size;
6297         dma_addr_t dma_addr;
6298         int i;
6299
6300         desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6301         for (i = 0; i < size; i++) {
6302                 dma_addr = dma_phy_addr + i * desc_size;
6303                 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6304                                 i, &dma_addr,
6305                                 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6306                                 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6307                 if (extend_desc)
6308                         p = &(++ep)->basic;
6309                 else
6310                         p++;
6311         }
6312 }
6313
6314 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6315 {
6316         struct net_device *dev = seq->private;
6317         struct stmmac_priv *priv = netdev_priv(dev);
6318         u32 rx_count = priv->plat->rx_queues_to_use;
6319         u32 tx_count = priv->plat->tx_queues_to_use;
6320         u32 queue;
6321
6322         if ((dev->flags & IFF_UP) == 0)
6323                 return 0;
6324
6325         for (queue = 0; queue < rx_count; queue++) {
6326                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6327
6328                 seq_printf(seq, "RX Queue %d:\n", queue);
6329
6330                 if (priv->extend_desc) {
6331                         seq_printf(seq, "Extended descriptor ring:\n");
6332                         sysfs_display_ring((void *)rx_q->dma_erx,
6333                                            priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6334                 } else {
6335                         seq_printf(seq, "Descriptor ring:\n");
6336                         sysfs_display_ring((void *)rx_q->dma_rx,
6337                                            priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6338                 }
6339         }
6340
6341         for (queue = 0; queue < tx_count; queue++) {
6342                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6343
6344                 seq_printf(seq, "TX Queue %d:\n", queue);
6345
6346                 if (priv->extend_desc) {
6347                         seq_printf(seq, "Extended descriptor ring:\n");
6348                         sysfs_display_ring((void *)tx_q->dma_etx,
6349                                            priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6350                 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6351                         seq_printf(seq, "Descriptor ring:\n");
6352                         sysfs_display_ring((void *)tx_q->dma_tx,
6353                                            priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6354                 }
6355         }
6356
6357         return 0;
6358 }
6359 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6360
6361 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6362 {
6363         static const char * const dwxgmac_timestamp_source[] = {
6364                 "None",
6365                 "Internal",
6366                 "External",
6367                 "Both",
6368         };
6369         static const char * const dwxgmac_safety_feature_desc[] = {
6370                 "No",
6371                 "All Safety Features with ECC and Parity",
6372                 "All Safety Features without ECC or Parity",
6373                 "All Safety Features with Parity Only",
6374                 "ECC Only",
6375                 "UNDEFINED",
6376                 "UNDEFINED",
6377                 "UNDEFINED",
6378         };
6379         struct net_device *dev = seq->private;
6380         struct stmmac_priv *priv = netdev_priv(dev);
6381
6382         if (!priv->hw_cap_support) {
6383                 seq_printf(seq, "DMA HW features not supported\n");
6384                 return 0;
6385         }
6386
6387         seq_printf(seq, "==============================\n");
6388         seq_printf(seq, "\tDMA HW features\n");
6389         seq_printf(seq, "==============================\n");
6390
6391         seq_printf(seq, "\t10/100 Mbps: %s\n",
6392                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6393         seq_printf(seq, "\t1000 Mbps: %s\n",
6394                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
6395         seq_printf(seq, "\tHalf duplex: %s\n",
6396                    (priv->dma_cap.half_duplex) ? "Y" : "N");
6397         if (priv->plat->has_xgmac) {
6398                 seq_printf(seq,
6399                            "\tNumber of Additional MAC address registers: %d\n",
6400                            priv->dma_cap.multi_addr);
6401         } else {
6402                 seq_printf(seq, "\tHash Filter: %s\n",
6403                            (priv->dma_cap.hash_filter) ? "Y" : "N");
6404                 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6405                            (priv->dma_cap.multi_addr) ? "Y" : "N");
6406         }
6407         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6408                    (priv->dma_cap.pcs) ? "Y" : "N");
6409         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6410                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
6411         seq_printf(seq, "\tPMT Remote wake up: %s\n",
6412                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6413         seq_printf(seq, "\tPMT Magic Frame: %s\n",
6414                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6415         seq_printf(seq, "\tRMON module: %s\n",
6416                    (priv->dma_cap.rmon) ? "Y" : "N");
6417         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6418                    (priv->dma_cap.time_stamp) ? "Y" : "N");
6419         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6420                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
6421         if (priv->plat->has_xgmac)
6422                 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6423                            dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6424         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6425                    (priv->dma_cap.eee) ? "Y" : "N");
6426         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6427         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6428                    (priv->dma_cap.tx_coe) ? "Y" : "N");
6429         if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6430             priv->plat->has_xgmac) {
6431                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6432                            (priv->dma_cap.rx_coe) ? "Y" : "N");
6433         } else {
6434                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6435                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6436                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6437                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6438                 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6439                            (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6440         }
6441         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6442                    priv->dma_cap.number_rx_channel);
6443         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6444                    priv->dma_cap.number_tx_channel);
6445         seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6446                    priv->dma_cap.number_rx_queues);
6447         seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6448                    priv->dma_cap.number_tx_queues);
6449         seq_printf(seq, "\tEnhanced descriptors: %s\n",
6450                    (priv->dma_cap.enh_desc) ? "Y" : "N");
6451         seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6452         seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6453         seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6454                    (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6455         seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6456         seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6457                    priv->dma_cap.pps_out_num);
6458         seq_printf(seq, "\tSafety Features: %s\n",
6459                    dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6460         seq_printf(seq, "\tFlexible RX Parser: %s\n",
6461                    priv->dma_cap.frpsel ? "Y" : "N");
6462         seq_printf(seq, "\tEnhanced Addressing: %d\n",
6463                    priv->dma_cap.host_dma_width);
6464         seq_printf(seq, "\tReceive Side Scaling: %s\n",
6465                    priv->dma_cap.rssen ? "Y" : "N");
6466         seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6467                    priv->dma_cap.vlhash ? "Y" : "N");
6468         seq_printf(seq, "\tSplit Header: %s\n",
6469                    priv->dma_cap.sphen ? "Y" : "N");
6470         seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6471                    priv->dma_cap.vlins ? "Y" : "N");
6472         seq_printf(seq, "\tDouble VLAN: %s\n",
6473                    priv->dma_cap.dvlan ? "Y" : "N");
6474         seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6475                    priv->dma_cap.l3l4fnum);
6476         seq_printf(seq, "\tARP Offloading: %s\n",
6477                    priv->dma_cap.arpoffsel ? "Y" : "N");
6478         seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6479                    priv->dma_cap.estsel ? "Y" : "N");
6480         seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6481                    priv->dma_cap.fpesel ? "Y" : "N");
6482         seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6483                    priv->dma_cap.tbssel ? "Y" : "N");
6484         seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6485                    priv->dma_cap.tbs_ch_num);
6486         seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6487                    priv->dma_cap.sgfsel ? "Y" : "N");
6488         seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6489                    BIT(priv->dma_cap.ttsfd) >> 1);
6490         seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6491                    priv->dma_cap.numtc);
6492         seq_printf(seq, "\tDCB Feature: %s\n",
6493                    priv->dma_cap.dcben ? "Y" : "N");
6494         seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6495                    priv->dma_cap.advthword ? "Y" : "N");
6496         seq_printf(seq, "\tPTP Offload: %s\n",
6497                    priv->dma_cap.ptoen ? "Y" : "N");
6498         seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6499                    priv->dma_cap.osten ? "Y" : "N");
6500         seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6501                    priv->dma_cap.pfcen ? "Y" : "N");
6502         seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6503                    BIT(priv->dma_cap.frpes) << 6);
6504         seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6505                    BIT(priv->dma_cap.frpbs) << 6);
6506         seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6507                    priv->dma_cap.frppipe_num);
6508         seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6509                    priv->dma_cap.nrvf_num ?
6510                    (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6511         seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6512                    priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6513         seq_printf(seq, "\tDepth of GCL: %lu\n",
6514                    priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6515         seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6516                    priv->dma_cap.cbtisel ? "Y" : "N");
6517         seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6518                    priv->dma_cap.aux_snapshot_n);
6519         seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6520                    priv->dma_cap.pou_ost_en ? "Y" : "N");
6521         seq_printf(seq, "\tEnhanced DMA: %s\n",
6522                    priv->dma_cap.edma ? "Y" : "N");
6523         seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6524                    priv->dma_cap.ediffc ? "Y" : "N");
6525         seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6526                    priv->dma_cap.vxn ? "Y" : "N");
6527         seq_printf(seq, "\tDebug Memory Interface: %s\n",
6528                    priv->dma_cap.dbgmem ? "Y" : "N");
6529         seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6530                    priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6531         return 0;
6532 }
6533 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6534
6535 /* Use network device events to rename debugfs file entries.
6536  */
6537 static int stmmac_device_event(struct notifier_block *unused,
6538                                unsigned long event, void *ptr)
6539 {
6540         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6541         struct stmmac_priv *priv = netdev_priv(dev);
6542
6543         if (dev->netdev_ops != &stmmac_netdev_ops)
6544                 goto done;
6545
6546         switch (event) {
6547         case NETDEV_CHANGENAME:
6548                 if (priv->dbgfs_dir)
6549                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6550                                                          priv->dbgfs_dir,
6551                                                          stmmac_fs_dir,
6552                                                          dev->name);
6553                 break;
6554         }
6555 done:
6556         return NOTIFY_DONE;
6557 }
6558
6559 static struct notifier_block stmmac_notifier = {
6560         .notifier_call = stmmac_device_event,
6561 };
6562
6563 static void stmmac_init_fs(struct net_device *dev)
6564 {
6565         struct stmmac_priv *priv = netdev_priv(dev);
6566
6567         rtnl_lock();
6568
6569         /* Create per netdev entries */
6570         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6571
6572         /* Entry to report DMA RX/TX rings */
6573         debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6574                             &stmmac_rings_status_fops);
6575
6576         /* Entry to report the DMA HW features */
6577         debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6578                             &stmmac_dma_cap_fops);
6579
6580         rtnl_unlock();
6581 }
6582
6583 static void stmmac_exit_fs(struct net_device *dev)
6584 {
6585         struct stmmac_priv *priv = netdev_priv(dev);
6586
6587         debugfs_remove_recursive(priv->dbgfs_dir);
6588 }
6589 #endif /* CONFIG_DEBUG_FS */
6590
6591 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6592 {
6593         unsigned char *data = (unsigned char *)&vid_le;
6594         unsigned char data_byte = 0;
6595         u32 crc = ~0x0;
6596         u32 temp = 0;
6597         int i, bits;
6598
6599         bits = get_bitmask_order(VLAN_VID_MASK);
6600         for (i = 0; i < bits; i++) {
6601                 if ((i % 8) == 0)
6602                         data_byte = data[i / 8];
6603
6604                 temp = ((crc & 1) ^ data_byte) & 1;
6605                 crc >>= 1;
6606                 data_byte >>= 1;
6607
6608                 if (temp)
6609                         crc ^= 0xedb88320;
6610         }
6611
6612         return crc;
6613 }
6614
6615 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6616 {
6617         u32 crc, hash = 0;
6618         __le16 pmatch = 0;
6619         int count = 0;
6620         u16 vid = 0;
6621
6622         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6623                 __le16 vid_le = cpu_to_le16(vid);
6624                 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6625                 hash |= (1 << crc);
6626                 count++;
6627         }
6628
6629         if (!priv->dma_cap.vlhash) {
6630                 if (count > 2) /* VID = 0 always passes filter */
6631                         return -EOPNOTSUPP;
6632
6633                 pmatch = cpu_to_le16(vid);
6634                 hash = 0;
6635         }
6636
6637         return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6638 }
6639
6640 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6641 {
6642         struct stmmac_priv *priv = netdev_priv(ndev);
6643         bool is_double = false;
6644         int ret;
6645
6646         ret = pm_runtime_resume_and_get(priv->device);
6647         if (ret < 0)
6648                 return ret;
6649
6650         if (be16_to_cpu(proto) == ETH_P_8021AD)
6651                 is_double = true;
6652
6653         set_bit(vid, priv->active_vlans);
6654         ret = stmmac_vlan_update(priv, is_double);
6655         if (ret) {
6656                 clear_bit(vid, priv->active_vlans);
6657                 goto err_pm_put;
6658         }
6659
6660         if (priv->hw->num_vlan) {
6661                 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6662                 if (ret)
6663                         goto err_pm_put;
6664         }
6665 err_pm_put:
6666         pm_runtime_put(priv->device);
6667
6668         return ret;
6669 }
6670
6671 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6672 {
6673         struct stmmac_priv *priv = netdev_priv(ndev);
6674         bool is_double = false;
6675         int ret;
6676
6677         ret = pm_runtime_resume_and_get(priv->device);
6678         if (ret < 0)
6679                 return ret;
6680
6681         if (be16_to_cpu(proto) == ETH_P_8021AD)
6682                 is_double = true;
6683
6684         clear_bit(vid, priv->active_vlans);
6685
6686         if (priv->hw->num_vlan) {
6687                 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6688                 if (ret)
6689                         goto del_vlan_error;
6690         }
6691
6692         ret = stmmac_vlan_update(priv, is_double);
6693
6694 del_vlan_error:
6695         pm_runtime_put(priv->device);
6696
6697         return ret;
6698 }
6699
6700 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6701 {
6702         struct stmmac_priv *priv = netdev_priv(dev);
6703
6704         switch (bpf->command) {
6705         case XDP_SETUP_PROG:
6706                 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6707         case XDP_SETUP_XSK_POOL:
6708                 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6709                                              bpf->xsk.queue_id);
6710         default:
6711                 return -EOPNOTSUPP;
6712         }
6713 }
6714
6715 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6716                            struct xdp_frame **frames, u32 flags)
6717 {
6718         struct stmmac_priv *priv = netdev_priv(dev);
6719         int cpu = smp_processor_id();
6720         struct netdev_queue *nq;
6721         int i, nxmit = 0;
6722         int queue;
6723
6724         if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6725                 return -ENETDOWN;
6726
6727         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6728                 return -EINVAL;
6729
6730         queue = stmmac_xdp_get_tx_queue(priv, cpu);
6731         nq = netdev_get_tx_queue(priv->dev, queue);
6732
6733         __netif_tx_lock(nq, cpu);
6734         /* Avoids TX time-out as we are sharing with slow path */
6735         txq_trans_cond_update(nq);
6736
6737         for (i = 0; i < num_frames; i++) {
6738                 int res;
6739
6740                 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6741                 if (res == STMMAC_XDP_CONSUMED)
6742                         break;
6743
6744                 nxmit++;
6745         }
6746
6747         if (flags & XDP_XMIT_FLUSH) {
6748                 stmmac_flush_tx_descriptors(priv, queue);
6749                 stmmac_tx_timer_arm(priv, queue);
6750         }
6751
6752         __netif_tx_unlock(nq);
6753
6754         return nxmit;
6755 }
6756
6757 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6758 {
6759         struct stmmac_channel *ch = &priv->channel[queue];
6760         unsigned long flags;
6761
6762         spin_lock_irqsave(&ch->lock, flags);
6763         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6764         spin_unlock_irqrestore(&ch->lock, flags);
6765
6766         stmmac_stop_rx_dma(priv, queue);
6767         __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6768 }
6769
6770 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6771 {
6772         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6773         struct stmmac_channel *ch = &priv->channel[queue];
6774         unsigned long flags;
6775         u32 buf_size;
6776         int ret;
6777
6778         ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6779         if (ret) {
6780                 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6781                 return;
6782         }
6783
6784         ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6785         if (ret) {
6786                 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6787                 netdev_err(priv->dev, "Failed to init RX desc.\n");
6788                 return;
6789         }
6790
6791         stmmac_reset_rx_queue(priv, queue);
6792         stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6793
6794         stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6795                             rx_q->dma_rx_phy, rx_q->queue_index);
6796
6797         rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6798                              sizeof(struct dma_desc));
6799         stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6800                                rx_q->rx_tail_addr, rx_q->queue_index);
6801
6802         if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6803                 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6804                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6805                                       buf_size,
6806                                       rx_q->queue_index);
6807         } else {
6808                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6809                                       priv->dma_conf.dma_buf_sz,
6810                                       rx_q->queue_index);
6811         }
6812
6813         stmmac_start_rx_dma(priv, queue);
6814
6815         spin_lock_irqsave(&ch->lock, flags);
6816         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6817         spin_unlock_irqrestore(&ch->lock, flags);
6818 }
6819
6820 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6821 {
6822         struct stmmac_channel *ch = &priv->channel[queue];
6823         unsigned long flags;
6824
6825         spin_lock_irqsave(&ch->lock, flags);
6826         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6827         spin_unlock_irqrestore(&ch->lock, flags);
6828
6829         stmmac_stop_tx_dma(priv, queue);
6830         __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6831 }
6832
6833 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6834 {
6835         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6836         struct stmmac_channel *ch = &priv->channel[queue];
6837         unsigned long flags;
6838         int ret;
6839
6840         ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6841         if (ret) {
6842                 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6843                 return;
6844         }
6845
6846         ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6847         if (ret) {
6848                 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6849                 netdev_err(priv->dev, "Failed to init TX desc.\n");
6850                 return;
6851         }
6852
6853         stmmac_reset_tx_queue(priv, queue);
6854         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6855
6856         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6857                             tx_q->dma_tx_phy, tx_q->queue_index);
6858
6859         if (tx_q->tbs & STMMAC_TBS_AVAIL)
6860                 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6861
6862         tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6863         stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6864                                tx_q->tx_tail_addr, tx_q->queue_index);
6865
6866         stmmac_start_tx_dma(priv, queue);
6867
6868         spin_lock_irqsave(&ch->lock, flags);
6869         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6870         spin_unlock_irqrestore(&ch->lock, flags);
6871 }
6872
6873 void stmmac_xdp_release(struct net_device *dev)
6874 {
6875         struct stmmac_priv *priv = netdev_priv(dev);
6876         u32 chan;
6877
6878         /* Ensure tx function is not running */
6879         netif_tx_disable(dev);
6880
6881         /* Disable NAPI process */
6882         stmmac_disable_all_queues(priv);
6883
6884         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6885                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6886
6887         /* Free the IRQ lines */
6888         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6889
6890         /* Stop TX/RX DMA channels */
6891         stmmac_stop_all_dma(priv);
6892
6893         /* Release and free the Rx/Tx resources */
6894         free_dma_desc_resources(priv, &priv->dma_conf);
6895
6896         /* Disable the MAC Rx/Tx */
6897         stmmac_mac_set(priv, priv->ioaddr, false);
6898
6899         /* set trans_start so we don't get spurious
6900          * watchdogs during reset
6901          */
6902         netif_trans_update(dev);
6903         netif_carrier_off(dev);
6904 }
6905
6906 int stmmac_xdp_open(struct net_device *dev)
6907 {
6908         struct stmmac_priv *priv = netdev_priv(dev);
6909         u32 rx_cnt = priv->plat->rx_queues_to_use;
6910         u32 tx_cnt = priv->plat->tx_queues_to_use;
6911         u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6912         struct stmmac_rx_queue *rx_q;
6913         struct stmmac_tx_queue *tx_q;
6914         u32 buf_size;
6915         bool sph_en;
6916         u32 chan;
6917         int ret;
6918
6919         ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6920         if (ret < 0) {
6921                 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6922                            __func__);
6923                 goto dma_desc_error;
6924         }
6925
6926         ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6927         if (ret < 0) {
6928                 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6929                            __func__);
6930                 goto init_error;
6931         }
6932
6933         stmmac_reset_queues_param(priv);
6934
6935         /* DMA CSR Channel configuration */
6936         for (chan = 0; chan < dma_csr_ch; chan++) {
6937                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6938                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6939         }
6940
6941         /* Adjust Split header */
6942         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6943
6944         /* DMA RX Channel Configuration */
6945         for (chan = 0; chan < rx_cnt; chan++) {
6946                 rx_q = &priv->dma_conf.rx_queue[chan];
6947
6948                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6949                                     rx_q->dma_rx_phy, chan);
6950
6951                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6952                                      (rx_q->buf_alloc_num *
6953                                       sizeof(struct dma_desc));
6954                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6955                                        rx_q->rx_tail_addr, chan);
6956
6957                 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6958                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6959                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6960                                               buf_size,
6961                                               rx_q->queue_index);
6962                 } else {
6963                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6964                                               priv->dma_conf.dma_buf_sz,
6965                                               rx_q->queue_index);
6966                 }
6967
6968                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6969         }
6970
6971         /* DMA TX Channel Configuration */
6972         for (chan = 0; chan < tx_cnt; chan++) {
6973                 tx_q = &priv->dma_conf.tx_queue[chan];
6974
6975                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6976                                     tx_q->dma_tx_phy, chan);
6977
6978                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6979                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6980                                        tx_q->tx_tail_addr, chan);
6981
6982                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6983                 tx_q->txtimer.function = stmmac_tx_timer;
6984         }
6985
6986         /* Enable the MAC Rx/Tx */
6987         stmmac_mac_set(priv, priv->ioaddr, true);
6988
6989         /* Start Rx & Tx DMA Channels */
6990         stmmac_start_all_dma(priv);
6991
6992         ret = stmmac_request_irq(dev);
6993         if (ret)
6994                 goto irq_error;
6995
6996         /* Enable NAPI process*/
6997         stmmac_enable_all_queues(priv);
6998         netif_carrier_on(dev);
6999         netif_tx_start_all_queues(dev);
7000         stmmac_enable_all_dma_irq(priv);
7001
7002         return 0;
7003
7004 irq_error:
7005         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7006                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7007
7008         stmmac_hw_teardown(dev);
7009 init_error:
7010         free_dma_desc_resources(priv, &priv->dma_conf);
7011 dma_desc_error:
7012         return ret;
7013 }
7014
7015 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7016 {
7017         struct stmmac_priv *priv = netdev_priv(dev);
7018         struct stmmac_rx_queue *rx_q;
7019         struct stmmac_tx_queue *tx_q;
7020         struct stmmac_channel *ch;
7021
7022         if (test_bit(STMMAC_DOWN, &priv->state) ||
7023             !netif_carrier_ok(priv->dev))
7024                 return -ENETDOWN;
7025
7026         if (!stmmac_xdp_is_enabled(priv))
7027                 return -EINVAL;
7028
7029         if (queue >= priv->plat->rx_queues_to_use ||
7030             queue >= priv->plat->tx_queues_to_use)
7031                 return -EINVAL;
7032
7033         rx_q = &priv->dma_conf.rx_queue[queue];
7034         tx_q = &priv->dma_conf.tx_queue[queue];
7035         ch = &priv->channel[queue];
7036
7037         if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7038                 return -EINVAL;
7039
7040         if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7041                 /* EQoS does not have per-DMA channel SW interrupt,
7042                  * so we schedule RX Napi straight-away.
7043                  */
7044                 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7045                         __napi_schedule(&ch->rxtx_napi);
7046         }
7047
7048         return 0;
7049 }
7050
7051 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7052 {
7053         struct stmmac_priv *priv = netdev_priv(dev);
7054         u32 tx_cnt = priv->plat->tx_queues_to_use;
7055         u32 rx_cnt = priv->plat->rx_queues_to_use;
7056         unsigned int start;
7057         int q;
7058
7059         for (q = 0; q < tx_cnt; q++) {
7060                 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7061                 u64 tx_packets;
7062                 u64 tx_bytes;
7063
7064                 do {
7065                         start = u64_stats_fetch_begin(&txq_stats->syncp);
7066                         tx_packets = txq_stats->tx_packets;
7067                         tx_bytes   = txq_stats->tx_bytes;
7068                 } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
7069
7070                 stats->tx_packets += tx_packets;
7071                 stats->tx_bytes += tx_bytes;
7072         }
7073
7074         for (q = 0; q < rx_cnt; q++) {
7075                 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7076                 u64 rx_packets;
7077                 u64 rx_bytes;
7078
7079                 do {
7080                         start = u64_stats_fetch_begin(&rxq_stats->syncp);
7081                         rx_packets = rxq_stats->rx_packets;
7082                         rx_bytes   = rxq_stats->rx_bytes;
7083                 } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
7084
7085                 stats->rx_packets += rx_packets;
7086                 stats->rx_bytes += rx_bytes;
7087         }
7088
7089         stats->rx_dropped = priv->xstats.rx_dropped;
7090         stats->rx_errors = priv->xstats.rx_errors;
7091         stats->tx_dropped = priv->xstats.tx_dropped;
7092         stats->tx_errors = priv->xstats.tx_errors;
7093         stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7094         stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7095         stats->rx_length_errors = priv->xstats.rx_length;
7096         stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7097         stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7098         stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7099 }
7100
7101 static const struct net_device_ops stmmac_netdev_ops = {
7102         .ndo_open = stmmac_open,
7103         .ndo_start_xmit = stmmac_xmit,
7104         .ndo_stop = stmmac_release,
7105         .ndo_change_mtu = stmmac_change_mtu,
7106         .ndo_fix_features = stmmac_fix_features,
7107         .ndo_set_features = stmmac_set_features,
7108         .ndo_set_rx_mode = stmmac_set_rx_mode,
7109         .ndo_tx_timeout = stmmac_tx_timeout,
7110         .ndo_eth_ioctl = stmmac_ioctl,
7111         .ndo_get_stats64 = stmmac_get_stats64,
7112         .ndo_setup_tc = stmmac_setup_tc,
7113         .ndo_select_queue = stmmac_select_queue,
7114         .ndo_set_mac_address = stmmac_set_mac_address,
7115         .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7116         .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7117         .ndo_bpf = stmmac_bpf,
7118         .ndo_xdp_xmit = stmmac_xdp_xmit,
7119         .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7120 };
7121
7122 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7123 {
7124         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7125                 return;
7126         if (test_bit(STMMAC_DOWN, &priv->state))
7127                 return;
7128
7129         netdev_err(priv->dev, "Reset adapter.\n");
7130
7131         rtnl_lock();
7132         netif_trans_update(priv->dev);
7133         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7134                 usleep_range(1000, 2000);
7135
7136         set_bit(STMMAC_DOWN, &priv->state);
7137         dev_close(priv->dev);
7138         dev_open(priv->dev, NULL);
7139         clear_bit(STMMAC_DOWN, &priv->state);
7140         clear_bit(STMMAC_RESETING, &priv->state);
7141         rtnl_unlock();
7142 }
7143
7144 static void stmmac_service_task(struct work_struct *work)
7145 {
7146         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7147                         service_task);
7148
7149         stmmac_reset_subtask(priv);
7150         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7151 }
7152
7153 /**
7154  *  stmmac_hw_init - Init the MAC device
7155  *  @priv: driver private structure
7156  *  Description: this function is to configure the MAC device according to
7157  *  some platform parameters or the HW capability register. It prepares the
7158  *  driver to use either ring or chain modes and to setup either enhanced or
7159  *  normal descriptors.
7160  */
7161 static int stmmac_hw_init(struct stmmac_priv *priv)
7162 {
7163         int ret;
7164
7165         /* dwmac-sun8i only work in chain mode */
7166         if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7167                 chain_mode = 1;
7168         priv->chain_mode = chain_mode;
7169
7170         /* Initialize HW Interface */
7171         ret = stmmac_hwif_init(priv);
7172         if (ret)
7173                 return ret;
7174
7175         /* Get the HW capability (new GMAC newer than 3.50a) */
7176         priv->hw_cap_support = stmmac_get_hw_features(priv);
7177         if (priv->hw_cap_support) {
7178                 dev_info(priv->device, "DMA HW capability register supported\n");
7179
7180                 /* We can override some gmac/dma configuration fields: e.g.
7181                  * enh_desc, tx_coe (e.g. that are passed through the
7182                  * platform) with the values from the HW capability
7183                  * register (if supported).
7184                  */
7185                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7186                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7187                                 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7188                 priv->hw->pmt = priv->plat->pmt;
7189                 if (priv->dma_cap.hash_tb_sz) {
7190                         priv->hw->multicast_filter_bins =
7191                                         (BIT(priv->dma_cap.hash_tb_sz) << 5);
7192                         priv->hw->mcast_bits_log2 =
7193                                         ilog2(priv->hw->multicast_filter_bins);
7194                 }
7195
7196                 /* TXCOE doesn't work in thresh DMA mode */
7197                 if (priv->plat->force_thresh_dma_mode)
7198                         priv->plat->tx_coe = 0;
7199                 else
7200                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
7201
7202                 /* In case of GMAC4 rx_coe is from HW cap register. */
7203                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7204
7205                 if (priv->dma_cap.rx_coe_type2)
7206                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7207                 else if (priv->dma_cap.rx_coe_type1)
7208                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7209
7210         } else {
7211                 dev_info(priv->device, "No HW DMA feature register supported\n");
7212         }
7213
7214         if (priv->plat->rx_coe) {
7215                 priv->hw->rx_csum = priv->plat->rx_coe;
7216                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7217                 if (priv->synopsys_id < DWMAC_CORE_4_00)
7218                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7219         }
7220         if (priv->plat->tx_coe)
7221                 dev_info(priv->device, "TX Checksum insertion supported\n");
7222
7223         if (priv->plat->pmt) {
7224                 dev_info(priv->device, "Wake-Up On Lan supported\n");
7225                 device_set_wakeup_capable(priv->device, 1);
7226         }
7227
7228         if (priv->dma_cap.tsoen)
7229                 dev_info(priv->device, "TSO supported\n");
7230
7231         priv->hw->vlan_fail_q_en =
7232                 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7233         priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7234
7235         /* Run HW quirks, if any */
7236         if (priv->hwif_quirks) {
7237                 ret = priv->hwif_quirks(priv);
7238                 if (ret)
7239                         return ret;
7240         }
7241
7242         /* Rx Watchdog is available in the COREs newer than the 3.40.
7243          * In some case, for example on bugged HW this feature
7244          * has to be disable and this can be done by passing the
7245          * riwt_off field from the platform.
7246          */
7247         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7248             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7249                 priv->use_riwt = 1;
7250                 dev_info(priv->device,
7251                          "Enable RX Mitigation via HW Watchdog Timer\n");
7252         }
7253
7254         return 0;
7255 }
7256
7257 static void stmmac_napi_add(struct net_device *dev)
7258 {
7259         struct stmmac_priv *priv = netdev_priv(dev);
7260         u32 queue, maxq;
7261
7262         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7263
7264         for (queue = 0; queue < maxq; queue++) {
7265                 struct stmmac_channel *ch = &priv->channel[queue];
7266
7267                 ch->priv_data = priv;
7268                 ch->index = queue;
7269                 spin_lock_init(&ch->lock);
7270
7271                 if (queue < priv->plat->rx_queues_to_use) {
7272                         netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7273                 }
7274                 if (queue < priv->plat->tx_queues_to_use) {
7275                         netif_napi_add_tx(dev, &ch->tx_napi,
7276                                           stmmac_napi_poll_tx);
7277                 }
7278                 if (queue < priv->plat->rx_queues_to_use &&
7279                     queue < priv->plat->tx_queues_to_use) {
7280                         netif_napi_add(dev, &ch->rxtx_napi,
7281                                        stmmac_napi_poll_rxtx);
7282                 }
7283         }
7284 }
7285
7286 static void stmmac_napi_del(struct net_device *dev)
7287 {
7288         struct stmmac_priv *priv = netdev_priv(dev);
7289         u32 queue, maxq;
7290
7291         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7292
7293         for (queue = 0; queue < maxq; queue++) {
7294                 struct stmmac_channel *ch = &priv->channel[queue];
7295
7296                 if (queue < priv->plat->rx_queues_to_use)
7297                         netif_napi_del(&ch->rx_napi);
7298                 if (queue < priv->plat->tx_queues_to_use)
7299                         netif_napi_del(&ch->tx_napi);
7300                 if (queue < priv->plat->rx_queues_to_use &&
7301                     queue < priv->plat->tx_queues_to_use) {
7302                         netif_napi_del(&ch->rxtx_napi);
7303                 }
7304         }
7305 }
7306
7307 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7308 {
7309         struct stmmac_priv *priv = netdev_priv(dev);
7310         int ret = 0, i;
7311
7312         if (netif_running(dev))
7313                 stmmac_release(dev);
7314
7315         stmmac_napi_del(dev);
7316
7317         priv->plat->rx_queues_to_use = rx_cnt;
7318         priv->plat->tx_queues_to_use = tx_cnt;
7319         if (!netif_is_rxfh_configured(dev))
7320                 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7321                         priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7322                                                                         rx_cnt);
7323
7324         stmmac_set_half_duplex(priv);
7325         stmmac_napi_add(dev);
7326
7327         if (netif_running(dev))
7328                 ret = stmmac_open(dev);
7329
7330         return ret;
7331 }
7332
7333 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7334 {
7335         struct stmmac_priv *priv = netdev_priv(dev);
7336         int ret = 0;
7337
7338         if (netif_running(dev))
7339                 stmmac_release(dev);
7340
7341         priv->dma_conf.dma_rx_size = rx_size;
7342         priv->dma_conf.dma_tx_size = tx_size;
7343
7344         if (netif_running(dev))
7345                 ret = stmmac_open(dev);
7346
7347         return ret;
7348 }
7349
7350 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7351 static void stmmac_fpe_lp_task(struct work_struct *work)
7352 {
7353         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7354                                                 fpe_task);
7355         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7356         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7357         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7358         bool *hs_enable = &fpe_cfg->hs_enable;
7359         bool *enable = &fpe_cfg->enable;
7360         int retries = 20;
7361
7362         while (retries-- > 0) {
7363                 /* Bail out immediately if FPE handshake is OFF */
7364                 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7365                         break;
7366
7367                 if (*lo_state == FPE_STATE_ENTERING_ON &&
7368                     *lp_state == FPE_STATE_ENTERING_ON) {
7369                         stmmac_fpe_configure(priv, priv->ioaddr,
7370                                              fpe_cfg,
7371                                              priv->plat->tx_queues_to_use,
7372                                              priv->plat->rx_queues_to_use,
7373                                              *enable);
7374
7375                         netdev_info(priv->dev, "configured FPE\n");
7376
7377                         *lo_state = FPE_STATE_ON;
7378                         *lp_state = FPE_STATE_ON;
7379                         netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7380                         break;
7381                 }
7382
7383                 if ((*lo_state == FPE_STATE_CAPABLE ||
7384                      *lo_state == FPE_STATE_ENTERING_ON) &&
7385                      *lp_state != FPE_STATE_ON) {
7386                         netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7387                                     *lo_state, *lp_state);
7388                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7389                                                 fpe_cfg,
7390                                                 MPACKET_VERIFY);
7391                 }
7392                 /* Sleep then retry */
7393                 msleep(500);
7394         }
7395
7396         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7397 }
7398
7399 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7400 {
7401         if (priv->plat->fpe_cfg->hs_enable != enable) {
7402                 if (enable) {
7403                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7404                                                 priv->plat->fpe_cfg,
7405                                                 MPACKET_VERIFY);
7406                 } else {
7407                         priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7408                         priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7409                 }
7410
7411                 priv->plat->fpe_cfg->hs_enable = enable;
7412         }
7413 }
7414
7415 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7416 {
7417         const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7418         struct dma_desc *desc_contains_ts = ctx->desc;
7419         struct stmmac_priv *priv = ctx->priv;
7420         struct dma_desc *ndesc = ctx->ndesc;
7421         struct dma_desc *desc = ctx->desc;
7422         u64 ns = 0;
7423
7424         if (!priv->hwts_rx_en)
7425                 return -ENODATA;
7426
7427         /* For GMAC4, the valid timestamp is from CTX next desc. */
7428         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7429                 desc_contains_ts = ndesc;
7430
7431         /* Check if timestamp is available */
7432         if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7433                 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7434                 ns -= priv->plat->cdc_error_adj;
7435                 *timestamp = ns_to_ktime(ns);
7436                 return 0;
7437         }
7438
7439         return -ENODATA;
7440 }
7441
7442 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7443         .xmo_rx_timestamp               = stmmac_xdp_rx_timestamp,
7444 };
7445
7446 /**
7447  * stmmac_dvr_probe
7448  * @device: device pointer
7449  * @plat_dat: platform data pointer
7450  * @res: stmmac resource pointer
7451  * Description: this is the main probe function used to
7452  * call the alloc_etherdev, allocate the priv structure.
7453  * Return:
7454  * returns 0 on success, otherwise errno.
7455  */
7456 int stmmac_dvr_probe(struct device *device,
7457                      struct plat_stmmacenet_data *plat_dat,
7458                      struct stmmac_resources *res)
7459 {
7460         struct net_device *ndev = NULL;
7461         struct stmmac_priv *priv;
7462         u32 rxq;
7463         int i, ret = 0;
7464
7465         ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7466                                        MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7467         if (!ndev)
7468                 return -ENOMEM;
7469
7470         SET_NETDEV_DEV(ndev, device);
7471
7472         priv = netdev_priv(ndev);
7473         priv->device = device;
7474         priv->dev = ndev;
7475
7476         for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7477                 u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
7478         for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7479                 u64_stats_init(&priv->xstats.txq_stats[i].syncp);
7480
7481         stmmac_set_ethtool_ops(ndev);
7482         priv->pause = pause;
7483         priv->plat = plat_dat;
7484         priv->ioaddr = res->addr;
7485         priv->dev->base_addr = (unsigned long)res->addr;
7486         priv->plat->dma_cfg->multi_msi_en =
7487                 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7488
7489         priv->dev->irq = res->irq;
7490         priv->wol_irq = res->wol_irq;
7491         priv->lpi_irq = res->lpi_irq;
7492         priv->sfty_ce_irq = res->sfty_ce_irq;
7493         priv->sfty_ue_irq = res->sfty_ue_irq;
7494         for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7495                 priv->rx_irq[i] = res->rx_irq[i];
7496         for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7497                 priv->tx_irq[i] = res->tx_irq[i];
7498
7499         if (!is_zero_ether_addr(res->mac))
7500                 eth_hw_addr_set(priv->dev, res->mac);
7501
7502         dev_set_drvdata(device, priv->dev);
7503
7504         /* Verify driver arguments */
7505         stmmac_verify_args();
7506
7507         priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7508         if (!priv->af_xdp_zc_qps)
7509                 return -ENOMEM;
7510
7511         /* Allocate workqueue */
7512         priv->wq = create_singlethread_workqueue("stmmac_wq");
7513         if (!priv->wq) {
7514                 dev_err(priv->device, "failed to create workqueue\n");
7515                 ret = -ENOMEM;
7516                 goto error_wq_init;
7517         }
7518
7519         INIT_WORK(&priv->service_task, stmmac_service_task);
7520
7521         /* Initialize Link Partner FPE workqueue */
7522         INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7523
7524         /* Override with kernel parameters if supplied XXX CRS XXX
7525          * this needs to have multiple instances
7526          */
7527         if ((phyaddr >= 0) && (phyaddr <= 31))
7528                 priv->plat->phy_addr = phyaddr;
7529
7530         if (priv->plat->stmmac_rst) {
7531                 ret = reset_control_assert(priv->plat->stmmac_rst);
7532                 reset_control_deassert(priv->plat->stmmac_rst);
7533                 /* Some reset controllers have only reset callback instead of
7534                  * assert + deassert callbacks pair.
7535                  */
7536                 if (ret == -ENOTSUPP)
7537                         reset_control_reset(priv->plat->stmmac_rst);
7538         }
7539
7540         ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7541         if (ret == -ENOTSUPP)
7542                 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7543                         ERR_PTR(ret));
7544
7545         /* Wait a bit for the reset to take effect */
7546         udelay(10);
7547
7548         /* Init MAC and get the capabilities */
7549         ret = stmmac_hw_init(priv);
7550         if (ret)
7551                 goto error_hw_init;
7552
7553         /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7554          */
7555         if (priv->synopsys_id < DWMAC_CORE_5_20)
7556                 priv->plat->dma_cfg->dche = false;
7557
7558         stmmac_check_ether_addr(priv);
7559
7560         ndev->netdev_ops = &stmmac_netdev_ops;
7561
7562         ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7563         ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7564
7565         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7566                             NETIF_F_RXCSUM;
7567         ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7568                              NETDEV_XDP_ACT_XSK_ZEROCOPY;
7569
7570         ret = stmmac_tc_init(priv, priv);
7571         if (!ret) {
7572                 ndev->hw_features |= NETIF_F_HW_TC;
7573         }
7574
7575         if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7576                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7577                 if (priv->plat->has_gmac4)
7578                         ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7579                 priv->tso = true;
7580                 dev_info(priv->device, "TSO feature enabled\n");
7581         }
7582
7583         if (priv->dma_cap.sphen &&
7584             !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7585                 ndev->hw_features |= NETIF_F_GRO;
7586                 priv->sph_cap = true;
7587                 priv->sph = priv->sph_cap;
7588                 dev_info(priv->device, "SPH feature enabled\n");
7589         }
7590
7591         /* Ideally our host DMA address width is the same as for the
7592          * device. However, it may differ and then we have to use our
7593          * host DMA width for allocation and the device DMA width for
7594          * register handling.
7595          */
7596         if (priv->plat->host_dma_width)
7597                 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7598         else
7599                 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7600
7601         if (priv->dma_cap.host_dma_width) {
7602                 ret = dma_set_mask_and_coherent(device,
7603                                 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7604                 if (!ret) {
7605                         dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7606                                  priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7607
7608                         /*
7609                          * If more than 32 bits can be addressed, make sure to
7610                          * enable enhanced addressing mode.
7611                          */
7612                         if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7613                                 priv->plat->dma_cfg->eame = true;
7614                 } else {
7615                         ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7616                         if (ret) {
7617                                 dev_err(priv->device, "Failed to set DMA Mask\n");
7618                                 goto error_hw_init;
7619                         }
7620
7621                         priv->dma_cap.host_dma_width = 32;
7622                 }
7623         }
7624
7625         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7626         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7627 #ifdef STMMAC_VLAN_TAG_USED
7628         /* Both mac100 and gmac support receive VLAN tag detection */
7629         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7630         ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7631         priv->hw->hw_vlan_en = true;
7632
7633         if (priv->dma_cap.vlhash) {
7634                 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7635                 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7636         }
7637         if (priv->dma_cap.vlins) {
7638                 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7639                 if (priv->dma_cap.dvlan)
7640                         ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7641         }
7642 #endif
7643         priv->msg_enable = netif_msg_init(debug, default_msg_level);
7644
7645         priv->xstats.threshold = tc;
7646
7647         /* Initialize RSS */
7648         rxq = priv->plat->rx_queues_to_use;
7649         netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7650         for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7651                 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7652
7653         if (priv->dma_cap.rssen && priv->plat->rss_en)
7654                 ndev->features |= NETIF_F_RXHASH;
7655
7656         ndev->vlan_features |= ndev->features;
7657         /* TSO doesn't work on VLANs yet */
7658         ndev->vlan_features &= ~NETIF_F_TSO;
7659
7660         /* MTU range: 46 - hw-specific max */
7661         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7662         if (priv->plat->has_xgmac)
7663                 ndev->max_mtu = XGMAC_JUMBO_LEN;
7664         else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7665                 ndev->max_mtu = JUMBO_LEN;
7666         else
7667                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7668         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7669          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7670          */
7671         if ((priv->plat->maxmtu < ndev->max_mtu) &&
7672             (priv->plat->maxmtu >= ndev->min_mtu))
7673                 ndev->max_mtu = priv->plat->maxmtu;
7674         else if (priv->plat->maxmtu < ndev->min_mtu)
7675                 dev_warn(priv->device,
7676                          "%s: warning: maxmtu having invalid value (%d)\n",
7677                          __func__, priv->plat->maxmtu);
7678
7679         if (flow_ctrl)
7680                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
7681
7682         ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7683
7684         /* Setup channels NAPI */
7685         stmmac_napi_add(ndev);
7686
7687         mutex_init(&priv->lock);
7688
7689         /* If a specific clk_csr value is passed from the platform
7690          * this means that the CSR Clock Range selection cannot be
7691          * changed at run-time and it is fixed. Viceversa the driver'll try to
7692          * set the MDC clock dynamically according to the csr actual
7693          * clock input.
7694          */
7695         if (priv->plat->clk_csr >= 0)
7696                 priv->clk_csr = priv->plat->clk_csr;
7697         else
7698                 stmmac_clk_csr_set(priv);
7699
7700         stmmac_check_pcs_mode(priv);
7701
7702         pm_runtime_get_noresume(device);
7703         pm_runtime_set_active(device);
7704         if (!pm_runtime_enabled(device))
7705                 pm_runtime_enable(device);
7706
7707         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7708             priv->hw->pcs != STMMAC_PCS_RTBI) {
7709                 /* MDIO bus Registration */
7710                 ret = stmmac_mdio_register(ndev);
7711                 if (ret < 0) {
7712                         dev_err_probe(priv->device, ret,
7713                                       "%s: MDIO bus (id: %d) registration failed\n",
7714                                       __func__, priv->plat->bus_id);
7715                         goto error_mdio_register;
7716                 }
7717         }
7718
7719         if (priv->plat->speed_mode_2500)
7720                 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7721
7722         if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7723                 ret = stmmac_xpcs_setup(priv->mii);
7724                 if (ret)
7725                         goto error_xpcs_setup;
7726         }
7727
7728         ret = stmmac_phy_setup(priv);
7729         if (ret) {
7730                 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7731                 goto error_phy_setup;
7732         }
7733
7734         ret = register_netdev(ndev);
7735         if (ret) {
7736                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7737                         __func__, ret);
7738                 goto error_netdev_register;
7739         }
7740
7741 #ifdef CONFIG_DEBUG_FS
7742         stmmac_init_fs(ndev);
7743 #endif
7744
7745         if (priv->plat->dump_debug_regs)
7746                 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7747
7748         /* Let pm_runtime_put() disable the clocks.
7749          * If CONFIG_PM is not enabled, the clocks will stay powered.
7750          */
7751         pm_runtime_put(device);
7752
7753         return ret;
7754
7755 error_netdev_register:
7756         phylink_destroy(priv->phylink);
7757 error_xpcs_setup:
7758 error_phy_setup:
7759         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7760             priv->hw->pcs != STMMAC_PCS_RTBI)
7761                 stmmac_mdio_unregister(ndev);
7762 error_mdio_register:
7763         stmmac_napi_del(ndev);
7764 error_hw_init:
7765         destroy_workqueue(priv->wq);
7766 error_wq_init:
7767         bitmap_free(priv->af_xdp_zc_qps);
7768
7769         return ret;
7770 }
7771 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7772
7773 /**
7774  * stmmac_dvr_remove
7775  * @dev: device pointer
7776  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7777  * changes the link status, releases the DMA descriptor rings.
7778  */
7779 void stmmac_dvr_remove(struct device *dev)
7780 {
7781         struct net_device *ndev = dev_get_drvdata(dev);
7782         struct stmmac_priv *priv = netdev_priv(ndev);
7783
7784         netdev_info(priv->dev, "%s: removing driver", __func__);
7785
7786         pm_runtime_get_sync(dev);
7787
7788         stmmac_stop_all_dma(priv);
7789         stmmac_mac_set(priv, priv->ioaddr, false);
7790         netif_carrier_off(ndev);
7791         unregister_netdev(ndev);
7792
7793 #ifdef CONFIG_DEBUG_FS
7794         stmmac_exit_fs(ndev);
7795 #endif
7796         phylink_destroy(priv->phylink);
7797         if (priv->plat->stmmac_rst)
7798                 reset_control_assert(priv->plat->stmmac_rst);
7799         reset_control_assert(priv->plat->stmmac_ahb_rst);
7800         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7801             priv->hw->pcs != STMMAC_PCS_RTBI)
7802                 stmmac_mdio_unregister(ndev);
7803         destroy_workqueue(priv->wq);
7804         mutex_destroy(&priv->lock);
7805         bitmap_free(priv->af_xdp_zc_qps);
7806
7807         pm_runtime_disable(dev);
7808         pm_runtime_put_noidle(dev);
7809 }
7810 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7811
7812 /**
7813  * stmmac_suspend - suspend callback
7814  * @dev: device pointer
7815  * Description: this is the function to suspend the device and it is called
7816  * by the platform driver to stop the network queue, release the resources,
7817  * program the PMT register (for WoL), clean and release driver resources.
7818  */
7819 int stmmac_suspend(struct device *dev)
7820 {
7821         struct net_device *ndev = dev_get_drvdata(dev);
7822         struct stmmac_priv *priv = netdev_priv(ndev);
7823         u32 chan;
7824
7825         if (!ndev || !netif_running(ndev))
7826                 return 0;
7827
7828         mutex_lock(&priv->lock);
7829
7830         netif_device_detach(ndev);
7831
7832         stmmac_disable_all_queues(priv);
7833
7834         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7835                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7836
7837         if (priv->eee_enabled) {
7838                 priv->tx_path_in_lpi_mode = false;
7839                 del_timer_sync(&priv->eee_ctrl_timer);
7840         }
7841
7842         /* Stop TX/RX DMA */
7843         stmmac_stop_all_dma(priv);
7844
7845         if (priv->plat->serdes_powerdown)
7846                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7847
7848         /* Enable Power down mode by programming the PMT regs */
7849         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7850                 stmmac_pmt(priv, priv->hw, priv->wolopts);
7851                 priv->irq_wake = 1;
7852         } else {
7853                 stmmac_mac_set(priv, priv->ioaddr, false);
7854                 pinctrl_pm_select_sleep_state(priv->device);
7855         }
7856
7857         mutex_unlock(&priv->lock);
7858
7859         rtnl_lock();
7860         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7861                 phylink_suspend(priv->phylink, true);
7862         } else {
7863                 if (device_may_wakeup(priv->device))
7864                         phylink_speed_down(priv->phylink, false);
7865                 phylink_suspend(priv->phylink, false);
7866         }
7867         rtnl_unlock();
7868
7869         if (priv->dma_cap.fpesel) {
7870                 /* Disable FPE */
7871                 stmmac_fpe_configure(priv, priv->ioaddr,
7872                                      priv->plat->fpe_cfg,
7873                                      priv->plat->tx_queues_to_use,
7874                                      priv->plat->rx_queues_to_use, false);
7875
7876                 stmmac_fpe_handshake(priv, false);
7877                 stmmac_fpe_stop_wq(priv);
7878         }
7879
7880         priv->speed = SPEED_UNKNOWN;
7881         return 0;
7882 }
7883 EXPORT_SYMBOL_GPL(stmmac_suspend);
7884
7885 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7886 {
7887         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7888
7889         rx_q->cur_rx = 0;
7890         rx_q->dirty_rx = 0;
7891 }
7892
7893 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7894 {
7895         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7896
7897         tx_q->cur_tx = 0;
7898         tx_q->dirty_tx = 0;
7899         tx_q->mss = 0;
7900
7901         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7902 }
7903
7904 /**
7905  * stmmac_reset_queues_param - reset queue parameters
7906  * @priv: device pointer
7907  */
7908 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7909 {
7910         u32 rx_cnt = priv->plat->rx_queues_to_use;
7911         u32 tx_cnt = priv->plat->tx_queues_to_use;
7912         u32 queue;
7913
7914         for (queue = 0; queue < rx_cnt; queue++)
7915                 stmmac_reset_rx_queue(priv, queue);
7916
7917         for (queue = 0; queue < tx_cnt; queue++)
7918                 stmmac_reset_tx_queue(priv, queue);
7919 }
7920
7921 /**
7922  * stmmac_resume - resume callback
7923  * @dev: device pointer
7924  * Description: when resume this function is invoked to setup the DMA and CORE
7925  * in a usable state.
7926  */
7927 int stmmac_resume(struct device *dev)
7928 {
7929         struct net_device *ndev = dev_get_drvdata(dev);
7930         struct stmmac_priv *priv = netdev_priv(ndev);
7931         int ret;
7932
7933         if (!netif_running(ndev))
7934                 return 0;
7935
7936         /* Power Down bit, into the PM register, is cleared
7937          * automatically as soon as a magic packet or a Wake-up frame
7938          * is received. Anyway, it's better to manually clear
7939          * this bit because it can generate problems while resuming
7940          * from another devices (e.g. serial console).
7941          */
7942         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7943                 mutex_lock(&priv->lock);
7944                 stmmac_pmt(priv, priv->hw, 0);
7945                 mutex_unlock(&priv->lock);
7946                 priv->irq_wake = 0;
7947         } else {
7948                 pinctrl_pm_select_default_state(priv->device);
7949                 /* reset the phy so that it's ready */
7950                 if (priv->mii)
7951                         stmmac_mdio_reset(priv->mii);
7952         }
7953
7954         if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7955             priv->plat->serdes_powerup) {
7956                 ret = priv->plat->serdes_powerup(ndev,
7957                                                  priv->plat->bsp_priv);
7958
7959                 if (ret < 0)
7960                         return ret;
7961         }
7962
7963         rtnl_lock();
7964         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7965                 phylink_resume(priv->phylink);
7966         } else {
7967                 phylink_resume(priv->phylink);
7968                 if (device_may_wakeup(priv->device))
7969                         phylink_speed_up(priv->phylink);
7970         }
7971         rtnl_unlock();
7972
7973         rtnl_lock();
7974         mutex_lock(&priv->lock);
7975
7976         stmmac_reset_queues_param(priv);
7977
7978         stmmac_free_tx_skbufs(priv);
7979         stmmac_clear_descriptors(priv, &priv->dma_conf);
7980
7981         stmmac_hw_setup(ndev, false);
7982         stmmac_init_coalesce(priv);
7983         stmmac_set_rx_mode(ndev);
7984
7985         stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7986
7987         stmmac_enable_all_queues(priv);
7988         stmmac_enable_all_dma_irq(priv);
7989
7990         mutex_unlock(&priv->lock);
7991         rtnl_unlock();
7992
7993         netif_device_attach(ndev);
7994
7995         return 0;
7996 }
7997 EXPORT_SYMBOL_GPL(stmmac_resume);
7998
7999 #ifndef MODULE
8000 static int __init stmmac_cmdline_opt(char *str)
8001 {
8002         char *opt;
8003
8004         if (!str || !*str)
8005                 return 1;
8006         while ((opt = strsep(&str, ",")) != NULL) {
8007                 if (!strncmp(opt, "debug:", 6)) {
8008                         if (kstrtoint(opt + 6, 0, &debug))
8009                                 goto err;
8010                 } else if (!strncmp(opt, "phyaddr:", 8)) {
8011                         if (kstrtoint(opt + 8, 0, &phyaddr))
8012                                 goto err;
8013                 } else if (!strncmp(opt, "buf_sz:", 7)) {
8014                         if (kstrtoint(opt + 7, 0, &buf_sz))
8015                                 goto err;
8016                 } else if (!strncmp(opt, "tc:", 3)) {
8017                         if (kstrtoint(opt + 3, 0, &tc))
8018                                 goto err;
8019                 } else if (!strncmp(opt, "watchdog:", 9)) {
8020                         if (kstrtoint(opt + 9, 0, &watchdog))
8021                                 goto err;
8022                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
8023                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
8024                                 goto err;
8025                 } else if (!strncmp(opt, "pause:", 6)) {
8026                         if (kstrtoint(opt + 6, 0, &pause))
8027                                 goto err;
8028                 } else if (!strncmp(opt, "eee_timer:", 10)) {
8029                         if (kstrtoint(opt + 10, 0, &eee_timer))
8030                                 goto err;
8031                 } else if (!strncmp(opt, "chain_mode:", 11)) {
8032                         if (kstrtoint(opt + 11, 0, &chain_mode))
8033                                 goto err;
8034                 }
8035         }
8036         return 1;
8037
8038 err:
8039         pr_err("%s: ERROR broken module parameter conversion", __func__);
8040         return 1;
8041 }
8042
8043 __setup("stmmaceth=", stmmac_cmdline_opt);
8044 #endif /* MODULE */
8045
8046 static int __init stmmac_init(void)
8047 {
8048 #ifdef CONFIG_DEBUG_FS
8049         /* Create debugfs main directory if it doesn't exist yet */
8050         if (!stmmac_fs_dir)
8051                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8052         register_netdevice_notifier(&stmmac_notifier);
8053 #endif
8054
8055         return 0;
8056 }
8057
8058 static void __exit stmmac_exit(void)
8059 {
8060 #ifdef CONFIG_DEBUG_FS
8061         unregister_netdevice_notifier(&stmmac_notifier);
8062         debugfs_remove_recursive(stmmac_fs_dir);
8063 #endif
8064 }
8065
8066 module_init(stmmac_init)
8067 module_exit(stmmac_exit)
8068
8069 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8070 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8071 MODULE_LICENSE("GPL");