net: stmmac: Use correct values in TQS/RQS fields
[linux-2.6-block.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148         u32 queue;
149
150         for (queue = 0; queue < rx_queues_cnt; queue++) {
151                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153                 napi_disable(&rx_q->napi);
154         }
155 }
156
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164         u32 queue;
165
166         for (queue = 0; queue < rx_queues_cnt; queue++) {
167                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169                 napi_enable(&rx_q->napi);
170         }
171 }
172
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180         u32 queue;
181
182         for (queue = 0; queue < tx_queues_cnt; queue++)
183                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193         u32 queue;
194
195         for (queue = 0; queue < tx_queues_cnt; queue++)
196                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *      If a specific clk_csr value is passed from the platform
206  *      this means that the CSR Clock Range selection cannot be
207  *      changed at run-time and it is fixed (as reported in the driver
208  *      documentation). Viceversa the driver will try to set the MDC
209  *      clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213         u32 clk_rate;
214
215         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216
217         /* Platform provided default clk_csr would be assumed valid
218          * for all other cases except for the below mentioned ones.
219          * For values higher than the IEEE 802.3 specified frequency
220          * we can not estimate the proper divider as it is not known
221          * the frequency of clk_csr_i. So we do not change the default
222          * divider.
223          */
224         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225                 if (clk_rate < CSR_F_35M)
226                         priv->clk_csr = STMMAC_CSR_20_35M;
227                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228                         priv->clk_csr = STMMAC_CSR_35_60M;
229                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230                         priv->clk_csr = STMMAC_CSR_60_100M;
231                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232                         priv->clk_csr = STMMAC_CSR_100_150M;
233                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234                         priv->clk_csr = STMMAC_CSR_150_250M;
235                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236                         priv->clk_csr = STMMAC_CSR_250_300M;
237         }
238
239         if (priv->plat->has_sun8i) {
240                 if (clk_rate > 160000000)
241                         priv->clk_csr = 0x03;
242                 else if (clk_rate > 80000000)
243                         priv->clk_csr = 0x02;
244                 else if (clk_rate > 40000000)
245                         priv->clk_csr = 0x01;
246                 else
247                         priv->clk_csr = 0;
248         }
249 }
250
251 static void print_pkt(unsigned char *buf, int len)
252 {
253         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260         u32 avail;
261
262         if (tx_q->dirty_tx > tx_q->cur_tx)
263                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264         else
265                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266
267         return avail;
268 }
269
270 /**
271  * stmmac_rx_dirty - Get RX queue dirty
272  * @priv: driver private structure
273  * @queue: RX queue index
274  */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278         u32 dirty;
279
280         if (rx_q->dirty_rx <= rx_q->cur_rx)
281                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
282         else
283                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284
285         return dirty;
286 }
287
288 /**
289  * stmmac_hw_fix_mac_speed - callback for speed selection
290  * @priv: driver private structure
291  * Description: on some platforms (e.g. ST), some HW system configuration
292  * registers have to be set according to the link speed negotiated.
293  */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296         struct net_device *ndev = priv->dev;
297         struct phy_device *phydev = ndev->phydev;
298
299         if (likely(priv->plat->fix_mac_speed))
300                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302
303 /**
304  * stmmac_enable_eee_mode - check and enter in LPI mode
305  * @priv: driver private structure
306  * Description: this function is to verify and enter in LPI mode in case of
307  * EEE.
308  */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311         u32 tx_cnt = priv->plat->tx_queues_to_use;
312         u32 queue;
313
314         /* check if all TX queues have the work finished */
315         for (queue = 0; queue < tx_cnt; queue++) {
316                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317
318                 if (tx_q->dirty_tx != tx_q->cur_tx)
319                         return; /* still unfinished work */
320         }
321
322         /* Check and enter in LPI mode */
323         if (!priv->tx_path_in_lpi_mode)
324                 priv->hw->mac->set_eee_mode(priv->hw,
325                                             priv->plat->en_tx_lpi_clockgating);
326 }
327
328 /**
329  * stmmac_disable_eee_mode - disable and exit from LPI mode
330  * @priv: driver private structure
331  * Description: this function is to exit and disable EEE in case of
332  * LPI state is true. This is called by the xmit.
333  */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336         priv->hw->mac->reset_eee_mode(priv->hw);
337         del_timer_sync(&priv->eee_ctrl_timer);
338         priv->tx_path_in_lpi_mode = false;
339 }
340
341 /**
342  * stmmac_eee_ctrl_timer - EEE TX SW timer.
343  * @arg : data hook
344  * Description:
345  *  if there is no data transfer and if we are not in LPI state,
346  *  then MAC Transmitter can be moved to LPI state.
347  */
348 static void stmmac_eee_ctrl_timer(unsigned long arg)
349 {
350         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
351
352         stmmac_enable_eee_mode(priv);
353         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355
356 /**
357  * stmmac_eee_init - init EEE
358  * @priv: driver private structure
359  * Description:
360  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
361  *  can also manage EEE, this function enable the LPI state and start related
362  *  timer.
363  */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366         struct net_device *ndev = priv->dev;
367         unsigned long flags;
368         bool ret = false;
369
370         /* Using PCS we cannot dial with the phy registers at this stage
371          * so we do not support extra feature like EEE.
372          */
373         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
374             (priv->hw->pcs == STMMAC_PCS_TBI) ||
375             (priv->hw->pcs == STMMAC_PCS_RTBI))
376                 goto out;
377
378         /* MAC core supports the EEE feature. */
379         if (priv->dma_cap.eee) {
380                 int tx_lpi_timer = priv->tx_lpi_timer;
381
382                 /* Check if the PHY supports EEE */
383                 if (phy_init_eee(ndev->phydev, 1)) {
384                         /* To manage at run-time if the EEE cannot be supported
385                          * anymore (for example because the lp caps have been
386                          * changed).
387                          * In that case the driver disable own timers.
388                          */
389                         spin_lock_irqsave(&priv->lock, flags);
390                         if (priv->eee_active) {
391                                 netdev_dbg(priv->dev, "disable EEE\n");
392                                 del_timer_sync(&priv->eee_ctrl_timer);
393                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
394                                                              tx_lpi_timer);
395                         }
396                         priv->eee_active = 0;
397                         spin_unlock_irqrestore(&priv->lock, flags);
398                         goto out;
399                 }
400                 /* Activate the EEE and start timers */
401                 spin_lock_irqsave(&priv->lock, flags);
402                 if (!priv->eee_active) {
403                         priv->eee_active = 1;
404                         setup_timer(&priv->eee_ctrl_timer,
405                                     stmmac_eee_ctrl_timer,
406                                     (unsigned long)priv);
407                         mod_timer(&priv->eee_ctrl_timer,
408                                   STMMAC_LPI_T(eee_timer));
409
410                         priv->hw->mac->set_eee_timer(priv->hw,
411                                                      STMMAC_DEFAULT_LIT_LS,
412                                                      tx_lpi_timer);
413                 }
414                 /* Set HW EEE according to the speed */
415                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
416
417                 ret = true;
418                 spin_unlock_irqrestore(&priv->lock, flags);
419
420                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
421         }
422 out:
423         return ret;
424 }
425
426 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
427  * @priv: driver private structure
428  * @p : descriptor pointer
429  * @skb : the socket buffer
430  * Description :
431  * This function will read timestamp from the descriptor & pass it to stack.
432  * and also perform some sanity checks.
433  */
434 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
435                                    struct dma_desc *p, struct sk_buff *skb)
436 {
437         struct skb_shared_hwtstamps shhwtstamp;
438         u64 ns;
439
440         if (!priv->hwts_tx_en)
441                 return;
442
443         /* exit if skb doesn't support hw tstamp */
444         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
445                 return;
446
447         /* check tx tstamp status */
448         if (priv->hw->desc->get_tx_timestamp_status(p)) {
449                 /* get the valid tstamp */
450                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
451
452                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
453                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
454
455                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
456                 /* pass tstamp to stack */
457                 skb_tstamp_tx(skb, &shhwtstamp);
458         }
459
460         return;
461 }
462
463 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
464  * @priv: driver private structure
465  * @p : descriptor pointer
466  * @np : next descriptor pointer
467  * @skb : the socket buffer
468  * Description :
469  * This function will read received packet's timestamp from the descriptor
470  * and pass it to stack. It also perform some sanity checks.
471  */
472 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473                                    struct dma_desc *np, struct sk_buff *skb)
474 {
475         struct skb_shared_hwtstamps *shhwtstamp = NULL;
476         u64 ns;
477
478         if (!priv->hwts_rx_en)
479                 return;
480
481         /* Check if timestamp is available */
482         if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
483                 /* For GMAC4, the valid timestamp is from CTX next desc. */
484                 if (priv->plat->has_gmac4)
485                         ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
486                 else
487                         ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
488
489                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490                 shhwtstamp = skb_hwtstamps(skb);
491                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
493         } else  {
494                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
495         }
496 }
497
498 /**
499  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
500  *  @dev: device pointer.
501  *  @ifr: An IOCTL specific structure, that can contain a pointer to
502  *  a proprietary structure used to pass information to the driver.
503  *  Description:
504  *  This function configures the MAC to enable/disable both outgoing(TX)
505  *  and incoming(RX) packets time stamping based on user input.
506  *  Return Value:
507  *  0 on success and an appropriate -ve integer on failure.
508  */
509 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
510 {
511         struct stmmac_priv *priv = netdev_priv(dev);
512         struct hwtstamp_config config;
513         struct timespec64 now;
514         u64 temp = 0;
515         u32 ptp_v2 = 0;
516         u32 tstamp_all = 0;
517         u32 ptp_over_ipv4_udp = 0;
518         u32 ptp_over_ipv6_udp = 0;
519         u32 ptp_over_ethernet = 0;
520         u32 snap_type_sel = 0;
521         u32 ts_master_en = 0;
522         u32 ts_event_en = 0;
523         u32 value = 0;
524         u32 sec_inc;
525
526         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
527                 netdev_alert(priv->dev, "No support for HW time stamping\n");
528                 priv->hwts_tx_en = 0;
529                 priv->hwts_rx_en = 0;
530
531                 return -EOPNOTSUPP;
532         }
533
534         if (copy_from_user(&config, ifr->ifr_data,
535                            sizeof(struct hwtstamp_config)))
536                 return -EFAULT;
537
538         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
539                    __func__, config.flags, config.tx_type, config.rx_filter);
540
541         /* reserved for future extensions */
542         if (config.flags)
543                 return -EINVAL;
544
545         if (config.tx_type != HWTSTAMP_TX_OFF &&
546             config.tx_type != HWTSTAMP_TX_ON)
547                 return -ERANGE;
548
549         if (priv->adv_ts) {
550                 switch (config.rx_filter) {
551                 case HWTSTAMP_FILTER_NONE:
552                         /* time stamp no incoming packet at all */
553                         config.rx_filter = HWTSTAMP_FILTER_NONE;
554                         break;
555
556                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
557                         /* PTP v1, UDP, any kind of event packet */
558                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
559                         /* take time stamp for all event messages */
560                         if (priv->plat->has_gmac4)
561                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
562                         else
563                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
564
565                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
566                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
567                         break;
568
569                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
570                         /* PTP v1, UDP, Sync packet */
571                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
572                         /* take time stamp for SYNC messages only */
573                         ts_event_en = PTP_TCR_TSEVNTENA;
574
575                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
576                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
577                         break;
578
579                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
580                         /* PTP v1, UDP, Delay_req packet */
581                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
582                         /* take time stamp for Delay_Req messages only */
583                         ts_master_en = PTP_TCR_TSMSTRENA;
584                         ts_event_en = PTP_TCR_TSEVNTENA;
585
586                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
587                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
588                         break;
589
590                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
591                         /* PTP v2, UDP, any kind of event packet */
592                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
593                         ptp_v2 = PTP_TCR_TSVER2ENA;
594                         /* take time stamp for all event messages */
595                         if (priv->plat->has_gmac4)
596                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
597                         else
598                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
599
600                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
601                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
602                         break;
603
604                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
605                         /* PTP v2, UDP, Sync packet */
606                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
607                         ptp_v2 = PTP_TCR_TSVER2ENA;
608                         /* take time stamp for SYNC messages only */
609                         ts_event_en = PTP_TCR_TSEVNTENA;
610
611                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
612                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
613                         break;
614
615                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
616                         /* PTP v2, UDP, Delay_req packet */
617                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
618                         ptp_v2 = PTP_TCR_TSVER2ENA;
619                         /* take time stamp for Delay_Req messages only */
620                         ts_master_en = PTP_TCR_TSMSTRENA;
621                         ts_event_en = PTP_TCR_TSEVNTENA;
622
623                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625                         break;
626
627                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
628                         /* PTP v2/802.AS1 any layer, any kind of event packet */
629                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
630                         ptp_v2 = PTP_TCR_TSVER2ENA;
631                         /* take time stamp for all event messages */
632                         if (priv->plat->has_gmac4)
633                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
634                         else
635                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
636
637                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
638                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
639                         ptp_over_ethernet = PTP_TCR_TSIPENA;
640                         break;
641
642                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
643                         /* PTP v2/802.AS1, any layer, Sync packet */
644                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
645                         ptp_v2 = PTP_TCR_TSVER2ENA;
646                         /* take time stamp for SYNC messages only */
647                         ts_event_en = PTP_TCR_TSEVNTENA;
648
649                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
650                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
651                         ptp_over_ethernet = PTP_TCR_TSIPENA;
652                         break;
653
654                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
655                         /* PTP v2/802.AS1, any layer, Delay_req packet */
656                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
657                         ptp_v2 = PTP_TCR_TSVER2ENA;
658                         /* take time stamp for Delay_Req messages only */
659                         ts_master_en = PTP_TCR_TSMSTRENA;
660                         ts_event_en = PTP_TCR_TSEVNTENA;
661
662                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
663                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
664                         ptp_over_ethernet = PTP_TCR_TSIPENA;
665                         break;
666
667                 case HWTSTAMP_FILTER_NTP_ALL:
668                 case HWTSTAMP_FILTER_ALL:
669                         /* time stamp any incoming packet */
670                         config.rx_filter = HWTSTAMP_FILTER_ALL;
671                         tstamp_all = PTP_TCR_TSENALL;
672                         break;
673
674                 default:
675                         return -ERANGE;
676                 }
677         } else {
678                 switch (config.rx_filter) {
679                 case HWTSTAMP_FILTER_NONE:
680                         config.rx_filter = HWTSTAMP_FILTER_NONE;
681                         break;
682                 default:
683                         /* PTP v1, UDP, any kind of event packet */
684                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
685                         break;
686                 }
687         }
688         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
689         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
690
691         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
692                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
693         else {
694                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
695                          tstamp_all | ptp_v2 | ptp_over_ethernet |
696                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
697                          ts_master_en | snap_type_sel);
698                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
699
700                 /* program Sub Second Increment reg */
701                 sec_inc = priv->hw->ptp->config_sub_second_increment(
702                         priv->ptpaddr, priv->plat->clk_ptp_rate,
703                         priv->plat->has_gmac4);
704                 temp = div_u64(1000000000ULL, sec_inc);
705
706                 /* calculate default added value:
707                  * formula is :
708                  * addend = (2^32)/freq_div_ratio;
709                  * where, freq_div_ratio = 1e9ns/sec_inc
710                  */
711                 temp = (u64)(temp << 32);
712                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
713                 priv->hw->ptp->config_addend(priv->ptpaddr,
714                                              priv->default_addend);
715
716                 /* initialize system time */
717                 ktime_get_real_ts64(&now);
718
719                 /* lower 32 bits of tv_sec are safe until y2106 */
720                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
721                                             now.tv_nsec);
722         }
723
724         return copy_to_user(ifr->ifr_data, &config,
725                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
726 }
727
728 /**
729  * stmmac_init_ptp - init PTP
730  * @priv: driver private structure
731  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
732  * This is done by looking at the HW cap. register.
733  * This function also registers the ptp driver.
734  */
735 static int stmmac_init_ptp(struct stmmac_priv *priv)
736 {
737         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
738                 return -EOPNOTSUPP;
739
740         priv->adv_ts = 0;
741         /* Check if adv_ts can be enabled for dwmac 4.x core */
742         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
743                 priv->adv_ts = 1;
744         /* Dwmac 3.x core with extend_desc can support adv_ts */
745         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
746                 priv->adv_ts = 1;
747
748         if (priv->dma_cap.time_stamp)
749                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
750
751         if (priv->adv_ts)
752                 netdev_info(priv->dev,
753                             "IEEE 1588-2008 Advanced Timestamp supported\n");
754
755         priv->hw->ptp = &stmmac_ptp;
756         priv->hwts_tx_en = 0;
757         priv->hwts_rx_en = 0;
758
759         stmmac_ptp_register(priv);
760
761         return 0;
762 }
763
764 static void stmmac_release_ptp(struct stmmac_priv *priv)
765 {
766         if (priv->plat->clk_ptp_ref)
767                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
768         stmmac_ptp_unregister(priv);
769 }
770
771 /**
772  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
773  *  @priv: driver private structure
774  *  Description: It is used for configuring the flow control in all queues
775  */
776 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
777 {
778         u32 tx_cnt = priv->plat->tx_queues_to_use;
779
780         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
781                                  priv->pause, tx_cnt);
782 }
783
784 /**
785  * stmmac_adjust_link - adjusts the link parameters
786  * @dev: net device structure
787  * Description: this is the helper called by the physical abstraction layer
788  * drivers to communicate the phy link status. According the speed and duplex
789  * this driver can invoke registered glue-logic as well.
790  * It also invoke the eee initialization because it could happen when switch
791  * on different networks (that are eee capable).
792  */
793 static void stmmac_adjust_link(struct net_device *dev)
794 {
795         struct stmmac_priv *priv = netdev_priv(dev);
796         struct phy_device *phydev = dev->phydev;
797         unsigned long flags;
798         bool new_state = false;
799
800         if (!phydev)
801                 return;
802
803         spin_lock_irqsave(&priv->lock, flags);
804
805         if (phydev->link) {
806                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
807
808                 /* Now we make sure that we can be in full duplex mode.
809                  * If not, we operate in half-duplex mode. */
810                 if (phydev->duplex != priv->oldduplex) {
811                         new_state = true;
812                         if (!phydev->duplex)
813                                 ctrl &= ~priv->hw->link.duplex;
814                         else
815                                 ctrl |= priv->hw->link.duplex;
816                         priv->oldduplex = phydev->duplex;
817                 }
818                 /* Flow Control operation */
819                 if (phydev->pause)
820                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
821
822                 if (phydev->speed != priv->speed) {
823                         new_state = true;
824                         ctrl &= ~priv->hw->link.speed_mask;
825                         switch (phydev->speed) {
826                         case SPEED_1000:
827                                 ctrl |= priv->hw->link.speed1000;
828                                 break;
829                         case SPEED_100:
830                                 ctrl |= priv->hw->link.speed100;
831                                 break;
832                         case SPEED_10:
833                                 ctrl |= priv->hw->link.speed10;
834                                 break;
835                         default:
836                                 netif_warn(priv, link, priv->dev,
837                                            "broken speed: %d\n", phydev->speed);
838                                 phydev->speed = SPEED_UNKNOWN;
839                                 break;
840                         }
841                         if (phydev->speed != SPEED_UNKNOWN)
842                                 stmmac_hw_fix_mac_speed(priv);
843                         priv->speed = phydev->speed;
844                 }
845
846                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
847
848                 if (!priv->oldlink) {
849                         new_state = true;
850                         priv->oldlink = true;
851                 }
852         } else if (priv->oldlink) {
853                 new_state = true;
854                 priv->oldlink = false;
855                 priv->speed = SPEED_UNKNOWN;
856                 priv->oldduplex = DUPLEX_UNKNOWN;
857         }
858
859         if (new_state && netif_msg_link(priv))
860                 phy_print_status(phydev);
861
862         spin_unlock_irqrestore(&priv->lock, flags);
863
864         if (phydev->is_pseudo_fixed_link)
865                 /* Stop PHY layer to call the hook to adjust the link in case
866                  * of a switch is attached to the stmmac driver.
867                  */
868                 phydev->irq = PHY_IGNORE_INTERRUPT;
869         else
870                 /* At this stage, init the EEE if supported.
871                  * Never called in case of fixed_link.
872                  */
873                 priv->eee_enabled = stmmac_eee_init(priv);
874 }
875
876 /**
877  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PCS.
880  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
881  * configured for the TBI, RTBI, or SGMII PHY interface.
882  */
883 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
884 {
885         int interface = priv->plat->interface;
886
887         if (priv->dma_cap.pcs) {
888                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
889                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
890                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
891                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
892                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
893                         priv->hw->pcs = STMMAC_PCS_RGMII;
894                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
895                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
896                         priv->hw->pcs = STMMAC_PCS_SGMII;
897                 }
898         }
899 }
900
901 /**
902  * stmmac_init_phy - PHY initialization
903  * @dev: net device structure
904  * Description: it initializes the driver's PHY state, and attaches the PHY
905  * to the mac driver.
906  *  Return value:
907  *  0 on success
908  */
909 static int stmmac_init_phy(struct net_device *dev)
910 {
911         struct stmmac_priv *priv = netdev_priv(dev);
912         struct phy_device *phydev;
913         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
914         char bus_id[MII_BUS_ID_SIZE];
915         int interface = priv->plat->interface;
916         int max_speed = priv->plat->max_speed;
917         priv->oldlink = false;
918         priv->speed = SPEED_UNKNOWN;
919         priv->oldduplex = DUPLEX_UNKNOWN;
920
921         if (priv->plat->phy_node) {
922                 phydev = of_phy_connect(dev, priv->plat->phy_node,
923                                         &stmmac_adjust_link, 0, interface);
924         } else {
925                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
926                          priv->plat->bus_id);
927
928                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
929                          priv->plat->phy_addr);
930                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
931                            phy_id_fmt);
932
933                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
934                                      interface);
935         }
936
937         if (IS_ERR_OR_NULL(phydev)) {
938                 netdev_err(priv->dev, "Could not attach to PHY\n");
939                 if (!phydev)
940                         return -ENODEV;
941
942                 return PTR_ERR(phydev);
943         }
944
945         /* Stop Advertising 1000BASE Capability if interface is not GMII */
946         if ((interface == PHY_INTERFACE_MODE_MII) ||
947             (interface == PHY_INTERFACE_MODE_RMII) ||
948                 (max_speed < 1000 && max_speed > 0))
949                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
950                                          SUPPORTED_1000baseT_Full);
951
952         /*
953          * Broken HW is sometimes missing the pull-up resistor on the
954          * MDIO line, which results in reads to non-existent devices returning
955          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
956          * device as well.
957          * Note: phydev->phy_id is the result of reading the UID PHY registers.
958          */
959         if (!priv->plat->phy_node && phydev->phy_id == 0) {
960                 phy_disconnect(phydev);
961                 return -ENODEV;
962         }
963
964         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
965          * subsequent PHY polling, make sure we force a link transition if
966          * we have a UP/DOWN/UP transition
967          */
968         if (phydev->is_pseudo_fixed_link)
969                 phydev->irq = PHY_POLL;
970
971         phy_attached_info(phydev);
972         return 0;
973 }
974
975 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
976 {
977         u32 rx_cnt = priv->plat->rx_queues_to_use;
978         void *head_rx;
979         u32 queue;
980
981         /* Display RX rings */
982         for (queue = 0; queue < rx_cnt; queue++) {
983                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
984
985                 pr_info("\tRX Queue %u rings\n", queue);
986
987                 if (priv->extend_desc)
988                         head_rx = (void *)rx_q->dma_erx;
989                 else
990                         head_rx = (void *)rx_q->dma_rx;
991
992                 /* Display RX ring */
993                 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
994         }
995 }
996
997 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
998 {
999         u32 tx_cnt = priv->plat->tx_queues_to_use;
1000         void *head_tx;
1001         u32 queue;
1002
1003         /* Display TX rings */
1004         for (queue = 0; queue < tx_cnt; queue++) {
1005                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1006
1007                 pr_info("\tTX Queue %d rings\n", queue);
1008
1009                 if (priv->extend_desc)
1010                         head_tx = (void *)tx_q->dma_etx;
1011                 else
1012                         head_tx = (void *)tx_q->dma_tx;
1013
1014                 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1015         }
1016 }
1017
1018 static void stmmac_display_rings(struct stmmac_priv *priv)
1019 {
1020         /* Display RX ring */
1021         stmmac_display_rx_rings(priv);
1022
1023         /* Display TX ring */
1024         stmmac_display_tx_rings(priv);
1025 }
1026
1027 static int stmmac_set_bfsize(int mtu, int bufsize)
1028 {
1029         int ret = bufsize;
1030
1031         if (mtu >= BUF_SIZE_4KiB)
1032                 ret = BUF_SIZE_8KiB;
1033         else if (mtu >= BUF_SIZE_2KiB)
1034                 ret = BUF_SIZE_4KiB;
1035         else if (mtu > DEFAULT_BUFSIZE)
1036                 ret = BUF_SIZE_2KiB;
1037         else
1038                 ret = DEFAULT_BUFSIZE;
1039
1040         return ret;
1041 }
1042
1043 /**
1044  * stmmac_clear_rx_descriptors - clear RX descriptors
1045  * @priv: driver private structure
1046  * @queue: RX queue index
1047  * Description: this function is called to clear the RX descriptors
1048  * in case of both basic and extended descriptors are used.
1049  */
1050 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1051 {
1052         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1053         int i;
1054
1055         /* Clear the RX descriptors */
1056         for (i = 0; i < DMA_RX_SIZE; i++)
1057                 if (priv->extend_desc)
1058                         priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1059                                                      priv->use_riwt, priv->mode,
1060                                                      (i == DMA_RX_SIZE - 1));
1061                 else
1062                         priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1063                                                      priv->use_riwt, priv->mode,
1064                                                      (i == DMA_RX_SIZE - 1));
1065 }
1066
1067 /**
1068  * stmmac_clear_tx_descriptors - clear tx descriptors
1069  * @priv: driver private structure
1070  * @queue: TX queue index.
1071  * Description: this function is called to clear the TX descriptors
1072  * in case of both basic and extended descriptors are used.
1073  */
1074 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1075 {
1076         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1077         int i;
1078
1079         /* Clear the TX descriptors */
1080         for (i = 0; i < DMA_TX_SIZE; i++)
1081                 if (priv->extend_desc)
1082                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1083                                                      priv->mode,
1084                                                      (i == DMA_TX_SIZE - 1));
1085                 else
1086                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1087                                                      priv->mode,
1088                                                      (i == DMA_TX_SIZE - 1));
1089 }
1090
1091 /**
1092  * stmmac_clear_descriptors - clear descriptors
1093  * @priv: driver private structure
1094  * Description: this function is called to clear the TX and RX descriptors
1095  * in case of both basic and extended descriptors are used.
1096  */
1097 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1098 {
1099         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1100         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1101         u32 queue;
1102
1103         /* Clear the RX descriptors */
1104         for (queue = 0; queue < rx_queue_cnt; queue++)
1105                 stmmac_clear_rx_descriptors(priv, queue);
1106
1107         /* Clear the TX descriptors */
1108         for (queue = 0; queue < tx_queue_cnt; queue++)
1109                 stmmac_clear_tx_descriptors(priv, queue);
1110 }
1111
1112 /**
1113  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1114  * @priv: driver private structure
1115  * @p: descriptor pointer
1116  * @i: descriptor index
1117  * @flags: gfp flag
1118  * @queue: RX queue index
1119  * Description: this function is called to allocate a receive buffer, perform
1120  * the DMA mapping and init the descriptor.
1121  */
1122 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1123                                   int i, gfp_t flags, u32 queue)
1124 {
1125         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1126         struct sk_buff *skb;
1127
1128         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1129         if (!skb) {
1130                 netdev_err(priv->dev,
1131                            "%s: Rx init fails; skb is NULL\n", __func__);
1132                 return -ENOMEM;
1133         }
1134         rx_q->rx_skbuff[i] = skb;
1135         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1136                                                 priv->dma_buf_sz,
1137                                                 DMA_FROM_DEVICE);
1138         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1139                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1140                 dev_kfree_skb_any(skb);
1141                 return -EINVAL;
1142         }
1143
1144         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1145                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1146         else
1147                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1148
1149         if ((priv->hw->mode->init_desc3) &&
1150             (priv->dma_buf_sz == BUF_SIZE_16KiB))
1151                 priv->hw->mode->init_desc3(p);
1152
1153         return 0;
1154 }
1155
1156 /**
1157  * stmmac_free_rx_buffer - free RX dma buffers
1158  * @priv: private structure
1159  * @queue: RX queue index
1160  * @i: buffer index.
1161  */
1162 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1163 {
1164         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1165
1166         if (rx_q->rx_skbuff[i]) {
1167                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1168                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1169                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1170         }
1171         rx_q->rx_skbuff[i] = NULL;
1172 }
1173
1174 /**
1175  * stmmac_free_tx_buffer - free RX dma buffers
1176  * @priv: private structure
1177  * @queue: RX queue index
1178  * @i: buffer index.
1179  */
1180 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1181 {
1182         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1183
1184         if (tx_q->tx_skbuff_dma[i].buf) {
1185                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1186                         dma_unmap_page(priv->device,
1187                                        tx_q->tx_skbuff_dma[i].buf,
1188                                        tx_q->tx_skbuff_dma[i].len,
1189                                        DMA_TO_DEVICE);
1190                 else
1191                         dma_unmap_single(priv->device,
1192                                          tx_q->tx_skbuff_dma[i].buf,
1193                                          tx_q->tx_skbuff_dma[i].len,
1194                                          DMA_TO_DEVICE);
1195         }
1196
1197         if (tx_q->tx_skbuff[i]) {
1198                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1199                 tx_q->tx_skbuff[i] = NULL;
1200                 tx_q->tx_skbuff_dma[i].buf = 0;
1201                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1202         }
1203 }
1204
1205 /**
1206  * init_dma_rx_desc_rings - init the RX descriptor rings
1207  * @dev: net device structure
1208  * @flags: gfp flag.
1209  * Description: this function initializes the DMA RX descriptors
1210  * and allocates the socket buffers. It supports the chained and ring
1211  * modes.
1212  */
1213 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1214 {
1215         struct stmmac_priv *priv = netdev_priv(dev);
1216         u32 rx_count = priv->plat->rx_queues_to_use;
1217         unsigned int bfsize = 0;
1218         int ret = -ENOMEM;
1219         int queue;
1220         int i;
1221
1222         if (priv->hw->mode->set_16kib_bfsize)
1223                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1224
1225         if (bfsize < BUF_SIZE_16KiB)
1226                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1227
1228         priv->dma_buf_sz = bfsize;
1229
1230         /* RX INITIALIZATION */
1231         netif_dbg(priv, probe, priv->dev,
1232                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1233
1234         for (queue = 0; queue < rx_count; queue++) {
1235                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1236
1237                 netif_dbg(priv, probe, priv->dev,
1238                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1239                           (u32)rx_q->dma_rx_phy);
1240
1241                 for (i = 0; i < DMA_RX_SIZE; i++) {
1242                         struct dma_desc *p;
1243
1244                         if (priv->extend_desc)
1245                                 p = &((rx_q->dma_erx + i)->basic);
1246                         else
1247                                 p = rx_q->dma_rx + i;
1248
1249                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1250                                                      queue);
1251                         if (ret)
1252                                 goto err_init_rx_buffers;
1253
1254                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1255                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1256                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1257                 }
1258
1259                 rx_q->cur_rx = 0;
1260                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1261
1262                 stmmac_clear_rx_descriptors(priv, queue);
1263
1264                 /* Setup the chained descriptor addresses */
1265                 if (priv->mode == STMMAC_CHAIN_MODE) {
1266                         if (priv->extend_desc)
1267                                 priv->hw->mode->init(rx_q->dma_erx,
1268                                                      rx_q->dma_rx_phy,
1269                                                      DMA_RX_SIZE, 1);
1270                         else
1271                                 priv->hw->mode->init(rx_q->dma_rx,
1272                                                      rx_q->dma_rx_phy,
1273                                                      DMA_RX_SIZE, 0);
1274                 }
1275         }
1276
1277         buf_sz = bfsize;
1278
1279         return 0;
1280
1281 err_init_rx_buffers:
1282         while (queue >= 0) {
1283                 while (--i >= 0)
1284                         stmmac_free_rx_buffer(priv, queue, i);
1285
1286                 if (queue == 0)
1287                         break;
1288
1289                 i = DMA_RX_SIZE;
1290                 queue--;
1291         }
1292
1293         return ret;
1294 }
1295
1296 /**
1297  * init_dma_tx_desc_rings - init the TX descriptor rings
1298  * @dev: net device structure.
1299  * Description: this function initializes the DMA TX descriptors
1300  * and allocates the socket buffers. It supports the chained and ring
1301  * modes.
1302  */
1303 static int init_dma_tx_desc_rings(struct net_device *dev)
1304 {
1305         struct stmmac_priv *priv = netdev_priv(dev);
1306         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1307         u32 queue;
1308         int i;
1309
1310         for (queue = 0; queue < tx_queue_cnt; queue++) {
1311                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1312
1313                 netif_dbg(priv, probe, priv->dev,
1314                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1315                          (u32)tx_q->dma_tx_phy);
1316
1317                 /* Setup the chained descriptor addresses */
1318                 if (priv->mode == STMMAC_CHAIN_MODE) {
1319                         if (priv->extend_desc)
1320                                 priv->hw->mode->init(tx_q->dma_etx,
1321                                                      tx_q->dma_tx_phy,
1322                                                      DMA_TX_SIZE, 1);
1323                         else
1324                                 priv->hw->mode->init(tx_q->dma_tx,
1325                                                      tx_q->dma_tx_phy,
1326                                                      DMA_TX_SIZE, 0);
1327                 }
1328
1329                 for (i = 0; i < DMA_TX_SIZE; i++) {
1330                         struct dma_desc *p;
1331                         if (priv->extend_desc)
1332                                 p = &((tx_q->dma_etx + i)->basic);
1333                         else
1334                                 p = tx_q->dma_tx + i;
1335
1336                         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1337                                 p->des0 = 0;
1338                                 p->des1 = 0;
1339                                 p->des2 = 0;
1340                                 p->des3 = 0;
1341                         } else {
1342                                 p->des2 = 0;
1343                         }
1344
1345                         tx_q->tx_skbuff_dma[i].buf = 0;
1346                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1347                         tx_q->tx_skbuff_dma[i].len = 0;
1348                         tx_q->tx_skbuff_dma[i].last_segment = false;
1349                         tx_q->tx_skbuff[i] = NULL;
1350                 }
1351
1352                 tx_q->dirty_tx = 0;
1353                 tx_q->cur_tx = 0;
1354
1355                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1356         }
1357
1358         return 0;
1359 }
1360
1361 /**
1362  * init_dma_desc_rings - init the RX/TX descriptor rings
1363  * @dev: net device structure
1364  * @flags: gfp flag.
1365  * Description: this function initializes the DMA RX/TX descriptors
1366  * and allocates the socket buffers. It supports the chained and ring
1367  * modes.
1368  */
1369 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1370 {
1371         struct stmmac_priv *priv = netdev_priv(dev);
1372         int ret;
1373
1374         ret = init_dma_rx_desc_rings(dev, flags);
1375         if (ret)
1376                 return ret;
1377
1378         ret = init_dma_tx_desc_rings(dev);
1379
1380         stmmac_clear_descriptors(priv);
1381
1382         if (netif_msg_hw(priv))
1383                 stmmac_display_rings(priv);
1384
1385         return ret;
1386 }
1387
1388 /**
1389  * dma_free_rx_skbufs - free RX dma buffers
1390  * @priv: private structure
1391  * @queue: RX queue index
1392  */
1393 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1394 {
1395         int i;
1396
1397         for (i = 0; i < DMA_RX_SIZE; i++)
1398                 stmmac_free_rx_buffer(priv, queue, i);
1399 }
1400
1401 /**
1402  * dma_free_tx_skbufs - free TX dma buffers
1403  * @priv: private structure
1404  * @queue: TX queue index
1405  */
1406 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1407 {
1408         int i;
1409
1410         for (i = 0; i < DMA_TX_SIZE; i++)
1411                 stmmac_free_tx_buffer(priv, queue, i);
1412 }
1413
1414 /**
1415  * free_dma_rx_desc_resources - free RX dma desc resources
1416  * @priv: private structure
1417  */
1418 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1419 {
1420         u32 rx_count = priv->plat->rx_queues_to_use;
1421         u32 queue;
1422
1423         /* Free RX queue resources */
1424         for (queue = 0; queue < rx_count; queue++) {
1425                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1426
1427                 /* Release the DMA RX socket buffers */
1428                 dma_free_rx_skbufs(priv, queue);
1429
1430                 /* Free DMA regions of consistent memory previously allocated */
1431                 if (!priv->extend_desc)
1432                         dma_free_coherent(priv->device,
1433                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1434                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1435                 else
1436                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1437                                           sizeof(struct dma_extended_desc),
1438                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1439
1440                 kfree(rx_q->rx_skbuff_dma);
1441                 kfree(rx_q->rx_skbuff);
1442         }
1443 }
1444
1445 /**
1446  * free_dma_tx_desc_resources - free TX dma desc resources
1447  * @priv: private structure
1448  */
1449 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1450 {
1451         u32 tx_count = priv->plat->tx_queues_to_use;
1452         u32 queue;
1453
1454         /* Free TX queue resources */
1455         for (queue = 0; queue < tx_count; queue++) {
1456                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1457
1458                 /* Release the DMA TX socket buffers */
1459                 dma_free_tx_skbufs(priv, queue);
1460
1461                 /* Free DMA regions of consistent memory previously allocated */
1462                 if (!priv->extend_desc)
1463                         dma_free_coherent(priv->device,
1464                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1465                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1466                 else
1467                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1468                                           sizeof(struct dma_extended_desc),
1469                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1470
1471                 kfree(tx_q->tx_skbuff_dma);
1472                 kfree(tx_q->tx_skbuff);
1473         }
1474 }
1475
1476 /**
1477  * alloc_dma_rx_desc_resources - alloc RX resources.
1478  * @priv: private structure
1479  * Description: according to which descriptor can be used (extend or basic)
1480  * this function allocates the resources for TX and RX paths. In case of
1481  * reception, for example, it pre-allocated the RX socket buffer in order to
1482  * allow zero-copy mechanism.
1483  */
1484 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1485 {
1486         u32 rx_count = priv->plat->rx_queues_to_use;
1487         int ret = -ENOMEM;
1488         u32 queue;
1489
1490         /* RX queues buffers and DMA */
1491         for (queue = 0; queue < rx_count; queue++) {
1492                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1493
1494                 rx_q->queue_index = queue;
1495                 rx_q->priv_data = priv;
1496
1497                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1498                                                     sizeof(dma_addr_t),
1499                                                     GFP_KERNEL);
1500                 if (!rx_q->rx_skbuff_dma)
1501                         goto err_dma;
1502
1503                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1504                                                 sizeof(struct sk_buff *),
1505                                                 GFP_KERNEL);
1506                 if (!rx_q->rx_skbuff)
1507                         goto err_dma;
1508
1509                 if (priv->extend_desc) {
1510                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1511                                                             DMA_RX_SIZE *
1512                                                             sizeof(struct
1513                                                             dma_extended_desc),
1514                                                             &rx_q->dma_rx_phy,
1515                                                             GFP_KERNEL);
1516                         if (!rx_q->dma_erx)
1517                                 goto err_dma;
1518
1519                 } else {
1520                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1521                                                            DMA_RX_SIZE *
1522                                                            sizeof(struct
1523                                                            dma_desc),
1524                                                            &rx_q->dma_rx_phy,
1525                                                            GFP_KERNEL);
1526                         if (!rx_q->dma_rx)
1527                                 goto err_dma;
1528                 }
1529         }
1530
1531         return 0;
1532
1533 err_dma:
1534         free_dma_rx_desc_resources(priv);
1535
1536         return ret;
1537 }
1538
1539 /**
1540  * alloc_dma_tx_desc_resources - alloc TX resources.
1541  * @priv: private structure
1542  * Description: according to which descriptor can be used (extend or basic)
1543  * this function allocates the resources for TX and RX paths. In case of
1544  * reception, for example, it pre-allocated the RX socket buffer in order to
1545  * allow zero-copy mechanism.
1546  */
1547 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1548 {
1549         u32 tx_count = priv->plat->tx_queues_to_use;
1550         int ret = -ENOMEM;
1551         u32 queue;
1552
1553         /* TX queues buffers and DMA */
1554         for (queue = 0; queue < tx_count; queue++) {
1555                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1556
1557                 tx_q->queue_index = queue;
1558                 tx_q->priv_data = priv;
1559
1560                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1561                                                     sizeof(*tx_q->tx_skbuff_dma),
1562                                                     GFP_KERNEL);
1563                 if (!tx_q->tx_skbuff_dma)
1564                         goto err_dma;
1565
1566                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1567                                                 sizeof(struct sk_buff *),
1568                                                 GFP_KERNEL);
1569                 if (!tx_q->tx_skbuff)
1570                         goto err_dma;
1571
1572                 if (priv->extend_desc) {
1573                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1574                                                             DMA_TX_SIZE *
1575                                                             sizeof(struct
1576                                                             dma_extended_desc),
1577                                                             &tx_q->dma_tx_phy,
1578                                                             GFP_KERNEL);
1579                         if (!tx_q->dma_etx)
1580                                 goto err_dma;
1581                 } else {
1582                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1583                                                            DMA_TX_SIZE *
1584                                                            sizeof(struct
1585                                                                   dma_desc),
1586                                                            &tx_q->dma_tx_phy,
1587                                                            GFP_KERNEL);
1588                         if (!tx_q->dma_tx)
1589                                 goto err_dma;
1590                 }
1591         }
1592
1593         return 0;
1594
1595 err_dma:
1596         free_dma_tx_desc_resources(priv);
1597
1598         return ret;
1599 }
1600
1601 /**
1602  * alloc_dma_desc_resources - alloc TX/RX resources.
1603  * @priv: private structure
1604  * Description: according to which descriptor can be used (extend or basic)
1605  * this function allocates the resources for TX and RX paths. In case of
1606  * reception, for example, it pre-allocated the RX socket buffer in order to
1607  * allow zero-copy mechanism.
1608  */
1609 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1610 {
1611         /* RX Allocation */
1612         int ret = alloc_dma_rx_desc_resources(priv);
1613
1614         if (ret)
1615                 return ret;
1616
1617         ret = alloc_dma_tx_desc_resources(priv);
1618
1619         return ret;
1620 }
1621
1622 /**
1623  * free_dma_desc_resources - free dma desc resources
1624  * @priv: private structure
1625  */
1626 static void free_dma_desc_resources(struct stmmac_priv *priv)
1627 {
1628         /* Release the DMA RX socket buffers */
1629         free_dma_rx_desc_resources(priv);
1630
1631         /* Release the DMA TX socket buffers */
1632         free_dma_tx_desc_resources(priv);
1633 }
1634
1635 /**
1636  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1637  *  @priv: driver private structure
1638  *  Description: It is used for enabling the rx queues in the MAC
1639  */
1640 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1641 {
1642         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1643         int queue;
1644         u8 mode;
1645
1646         for (queue = 0; queue < rx_queues_count; queue++) {
1647                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1648                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1649         }
1650 }
1651
1652 /**
1653  * stmmac_start_rx_dma - start RX DMA channel
1654  * @priv: driver private structure
1655  * @chan: RX channel index
1656  * Description:
1657  * This starts a RX DMA channel
1658  */
1659 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1660 {
1661         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1662         priv->hw->dma->start_rx(priv->ioaddr, chan);
1663 }
1664
1665 /**
1666  * stmmac_start_tx_dma - start TX DMA channel
1667  * @priv: driver private structure
1668  * @chan: TX channel index
1669  * Description:
1670  * This starts a TX DMA channel
1671  */
1672 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1673 {
1674         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1675         priv->hw->dma->start_tx(priv->ioaddr, chan);
1676 }
1677
1678 /**
1679  * stmmac_stop_rx_dma - stop RX DMA channel
1680  * @priv: driver private structure
1681  * @chan: RX channel index
1682  * Description:
1683  * This stops a RX DMA channel
1684  */
1685 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1686 {
1687         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1688         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1689 }
1690
1691 /**
1692  * stmmac_stop_tx_dma - stop TX DMA channel
1693  * @priv: driver private structure
1694  * @chan: TX channel index
1695  * Description:
1696  * This stops a TX DMA channel
1697  */
1698 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1699 {
1700         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1701         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1702 }
1703
1704 /**
1705  * stmmac_start_all_dma - start all RX and TX DMA channels
1706  * @priv: driver private structure
1707  * Description:
1708  * This starts all the RX and TX DMA channels
1709  */
1710 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1711 {
1712         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1713         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1714         u32 chan = 0;
1715
1716         for (chan = 0; chan < rx_channels_count; chan++)
1717                 stmmac_start_rx_dma(priv, chan);
1718
1719         for (chan = 0; chan < tx_channels_count; chan++)
1720                 stmmac_start_tx_dma(priv, chan);
1721 }
1722
1723 /**
1724  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1725  * @priv: driver private structure
1726  * Description:
1727  * This stops the RX and TX DMA channels
1728  */
1729 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1730 {
1731         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1732         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1733         u32 chan = 0;
1734
1735         for (chan = 0; chan < rx_channels_count; chan++)
1736                 stmmac_stop_rx_dma(priv, chan);
1737
1738         for (chan = 0; chan < tx_channels_count; chan++)
1739                 stmmac_stop_tx_dma(priv, chan);
1740 }
1741
1742 /**
1743  *  stmmac_dma_operation_mode - HW DMA operation mode
1744  *  @priv: driver private structure
1745  *  Description: it is used for configuring the DMA operation mode register in
1746  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1747  */
1748 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1749 {
1750         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1751         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1752         int rxfifosz = priv->plat->rx_fifo_size;
1753         int txfifosz = priv->plat->tx_fifo_size;
1754         u32 txmode = 0;
1755         u32 rxmode = 0;
1756         u32 chan = 0;
1757
1758         if (rxfifosz == 0)
1759                 rxfifosz = priv->dma_cap.rx_fifo_size;
1760         if (txfifosz == 0)
1761                 txfifosz = priv->dma_cap.tx_fifo_size;
1762
1763         /* Adjust for real per queue fifo size */
1764         rxfifosz /= rx_channels_count;
1765         txfifosz /= tx_channels_count;
1766
1767         if (priv->plat->force_thresh_dma_mode) {
1768                 txmode = tc;
1769                 rxmode = tc;
1770         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1771                 /*
1772                  * In case of GMAC, SF mode can be enabled
1773                  * to perform the TX COE in HW. This depends on:
1774                  * 1) TX COE if actually supported
1775                  * 2) There is no bugged Jumbo frame support
1776                  *    that needs to not insert csum in the TDES.
1777                  */
1778                 txmode = SF_DMA_MODE;
1779                 rxmode = SF_DMA_MODE;
1780                 priv->xstats.threshold = SF_DMA_MODE;
1781         } else {
1782                 txmode = tc;
1783                 rxmode = SF_DMA_MODE;
1784         }
1785
1786         /* configure all channels */
1787         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1788                 for (chan = 0; chan < rx_channels_count; chan++)
1789                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1790                                                    rxfifosz);
1791
1792                 for (chan = 0; chan < tx_channels_count; chan++)
1793                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1794                                                    txfifosz);
1795         } else {
1796                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1797                                         rxfifosz);
1798         }
1799 }
1800
1801 /**
1802  * stmmac_tx_clean - to manage the transmission completion
1803  * @priv: driver private structure
1804  * @queue: TX queue index
1805  * Description: it reclaims the transmit resources after transmission completes.
1806  */
1807 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1808 {
1809         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1810         unsigned int bytes_compl = 0, pkts_compl = 0;
1811         unsigned int entry = tx_q->dirty_tx;
1812
1813         netif_tx_lock(priv->dev);
1814
1815         priv->xstats.tx_clean++;
1816
1817         while (entry != tx_q->cur_tx) {
1818                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1819                 struct dma_desc *p;
1820                 int status;
1821
1822                 if (priv->extend_desc)
1823                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1824                 else
1825                         p = tx_q->dma_tx + entry;
1826
1827                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1828                                                       &priv->xstats, p,
1829                                                       priv->ioaddr);
1830                 /* Check if the descriptor is owned by the DMA */
1831                 if (unlikely(status & tx_dma_own))
1832                         break;
1833
1834                 /* Just consider the last segment and ...*/
1835                 if (likely(!(status & tx_not_ls))) {
1836                         /* ... verify the status error condition */
1837                         if (unlikely(status & tx_err)) {
1838                                 priv->dev->stats.tx_errors++;
1839                         } else {
1840                                 priv->dev->stats.tx_packets++;
1841                                 priv->xstats.tx_pkt_n++;
1842                         }
1843                         stmmac_get_tx_hwtstamp(priv, p, skb);
1844                 }
1845
1846                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1847                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1848                                 dma_unmap_page(priv->device,
1849                                                tx_q->tx_skbuff_dma[entry].buf,
1850                                                tx_q->tx_skbuff_dma[entry].len,
1851                                                DMA_TO_DEVICE);
1852                         else
1853                                 dma_unmap_single(priv->device,
1854                                                  tx_q->tx_skbuff_dma[entry].buf,
1855                                                  tx_q->tx_skbuff_dma[entry].len,
1856                                                  DMA_TO_DEVICE);
1857                         tx_q->tx_skbuff_dma[entry].buf = 0;
1858                         tx_q->tx_skbuff_dma[entry].len = 0;
1859                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1860                 }
1861
1862                 if (priv->hw->mode->clean_desc3)
1863                         priv->hw->mode->clean_desc3(tx_q, p);
1864
1865                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1866                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1867
1868                 if (likely(skb != NULL)) {
1869                         pkts_compl++;
1870                         bytes_compl += skb->len;
1871                         dev_consume_skb_any(skb);
1872                         tx_q->tx_skbuff[entry] = NULL;
1873                 }
1874
1875                 priv->hw->desc->release_tx_desc(p, priv->mode);
1876
1877                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1878         }
1879         tx_q->dirty_tx = entry;
1880
1881         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1882                                   pkts_compl, bytes_compl);
1883
1884         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1885                                                                 queue))) &&
1886             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1887
1888                 netif_dbg(priv, tx_done, priv->dev,
1889                           "%s: restart transmit\n", __func__);
1890                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1891         }
1892
1893         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1894                 stmmac_enable_eee_mode(priv);
1895                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1896         }
1897         netif_tx_unlock(priv->dev);
1898 }
1899
1900 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1901 {
1902         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1903 }
1904
1905 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1906 {
1907         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1908 }
1909
1910 /**
1911  * stmmac_tx_err - to manage the tx error
1912  * @priv: driver private structure
1913  * @chan: channel index
1914  * Description: it cleans the descriptors and restarts the transmission
1915  * in case of transmission errors.
1916  */
1917 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1918 {
1919         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1920         int i;
1921
1922         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1923
1924         stmmac_stop_tx_dma(priv, chan);
1925         dma_free_tx_skbufs(priv, chan);
1926         for (i = 0; i < DMA_TX_SIZE; i++)
1927                 if (priv->extend_desc)
1928                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1929                                                      priv->mode,
1930                                                      (i == DMA_TX_SIZE - 1));
1931                 else
1932                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1933                                                      priv->mode,
1934                                                      (i == DMA_TX_SIZE - 1));
1935         tx_q->dirty_tx = 0;
1936         tx_q->cur_tx = 0;
1937         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1938         stmmac_start_tx_dma(priv, chan);
1939
1940         priv->dev->stats.tx_errors++;
1941         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1942 }
1943
1944 /**
1945  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1946  *  @priv: driver private structure
1947  *  @txmode: TX operating mode
1948  *  @rxmode: RX operating mode
1949  *  @chan: channel index
1950  *  Description: it is used for configuring of the DMA operation mode in
1951  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1952  *  mode.
1953  */
1954 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1955                                           u32 rxmode, u32 chan)
1956 {
1957         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1958         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1959         int rxfifosz = priv->plat->rx_fifo_size;
1960         int txfifosz = priv->plat->tx_fifo_size;
1961
1962         if (rxfifosz == 0)
1963                 rxfifosz = priv->dma_cap.rx_fifo_size;
1964         if (txfifosz == 0)
1965                 txfifosz = priv->dma_cap.tx_fifo_size;
1966
1967         /* Adjust for real per queue fifo size */
1968         rxfifosz /= rx_channels_count;
1969         txfifosz /= tx_channels_count;
1970
1971         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1972                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1973                                            rxfifosz);
1974                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1975                                            txfifosz);
1976         } else {
1977                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1978                                         rxfifosz);
1979         }
1980 }
1981
1982 /**
1983  * stmmac_dma_interrupt - DMA ISR
1984  * @priv: driver private structure
1985  * Description: this is the DMA ISR. It is called by the main ISR.
1986  * It calls the dwmac dma routine and schedule poll method in case of some
1987  * work can be done.
1988  */
1989 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1990 {
1991         u32 tx_channel_count = priv->plat->tx_queues_to_use;
1992         int status;
1993         u32 chan;
1994
1995         for (chan = 0; chan < tx_channel_count; chan++) {
1996                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1997
1998                 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1999                                                       &priv->xstats, chan);
2000                 if (likely((status & handle_rx)) || (status & handle_tx)) {
2001                         if (likely(napi_schedule_prep(&rx_q->napi))) {
2002                                 stmmac_disable_dma_irq(priv, chan);
2003                                 __napi_schedule(&rx_q->napi);
2004                         }
2005                 }
2006
2007                 if (unlikely(status & tx_hard_error_bump_tc)) {
2008                         /* Try to bump up the dma threshold on this failure */
2009                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2010                             (tc <= 256)) {
2011                                 tc += 64;
2012                                 if (priv->plat->force_thresh_dma_mode)
2013                                         stmmac_set_dma_operation_mode(priv,
2014                                                                       tc,
2015                                                                       tc,
2016                                                                       chan);
2017                                 else
2018                                         stmmac_set_dma_operation_mode(priv,
2019                                                                     tc,
2020                                                                     SF_DMA_MODE,
2021                                                                     chan);
2022                                 priv->xstats.threshold = tc;
2023                         }
2024                 } else if (unlikely(status == tx_hard_error)) {
2025                         stmmac_tx_err(priv, chan);
2026                 }
2027         }
2028 }
2029
2030 /**
2031  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2032  * @priv: driver private structure
2033  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2034  */
2035 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2036 {
2037         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2038                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2039
2040         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2041                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2042                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2043         } else {
2044                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2045                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2046         }
2047
2048         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2049
2050         if (priv->dma_cap.rmon) {
2051                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2052                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2053         } else
2054                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2055 }
2056
2057 /**
2058  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2059  * @priv: driver private structure
2060  * Description: select the Enhanced/Alternate or Normal descriptors.
2061  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2062  * supported by the HW capability register.
2063  */
2064 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2065 {
2066         if (priv->plat->enh_desc) {
2067                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2068
2069                 /* GMAC older than 3.50 has no extended descriptors */
2070                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2071                         dev_info(priv->device, "Enabled extended descriptors\n");
2072                         priv->extend_desc = 1;
2073                 } else
2074                         dev_warn(priv->device, "Extended descriptors not supported\n");
2075
2076                 priv->hw->desc = &enh_desc_ops;
2077         } else {
2078                 dev_info(priv->device, "Normal descriptors\n");
2079                 priv->hw->desc = &ndesc_ops;
2080         }
2081 }
2082
2083 /**
2084  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2085  * @priv: driver private structure
2086  * Description:
2087  *  new GMAC chip generations have a new register to indicate the
2088  *  presence of the optional feature/functions.
2089  *  This can be also used to override the value passed through the
2090  *  platform and necessary for old MAC10/100 and GMAC chips.
2091  */
2092 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2093 {
2094         u32 ret = 0;
2095
2096         if (priv->hw->dma->get_hw_feature) {
2097                 priv->hw->dma->get_hw_feature(priv->ioaddr,
2098                                               &priv->dma_cap);
2099                 ret = 1;
2100         }
2101
2102         return ret;
2103 }
2104
2105 /**
2106  * stmmac_check_ether_addr - check if the MAC addr is valid
2107  * @priv: driver private structure
2108  * Description:
2109  * it is to verify if the MAC address is valid, in case of failures it
2110  * generates a random MAC address
2111  */
2112 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2113 {
2114         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2115                 priv->hw->mac->get_umac_addr(priv->hw,
2116                                              priv->dev->dev_addr, 0);
2117                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2118                         eth_hw_addr_random(priv->dev);
2119                 netdev_info(priv->dev, "device MAC address %pM\n",
2120                             priv->dev->dev_addr);
2121         }
2122 }
2123
2124 /**
2125  * stmmac_init_dma_engine - DMA init.
2126  * @priv: driver private structure
2127  * Description:
2128  * It inits the DMA invoking the specific MAC/GMAC callback.
2129  * Some DMA parameters can be passed from the platform;
2130  * in case of these are not passed a default is kept for the MAC or GMAC.
2131  */
2132 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2133 {
2134         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2135         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2136         struct stmmac_rx_queue *rx_q;
2137         struct stmmac_tx_queue *tx_q;
2138         u32 dummy_dma_rx_phy = 0;
2139         u32 dummy_dma_tx_phy = 0;
2140         u32 chan = 0;
2141         int atds = 0;
2142         int ret = 0;
2143
2144         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2145                 dev_err(priv->device, "Invalid DMA configuration\n");
2146                 return -EINVAL;
2147         }
2148
2149         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2150                 atds = 1;
2151
2152         ret = priv->hw->dma->reset(priv->ioaddr);
2153         if (ret) {
2154                 dev_err(priv->device, "Failed to reset the dma\n");
2155                 return ret;
2156         }
2157
2158         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2159                 /* DMA Configuration */
2160                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2161                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2162
2163                 /* DMA RX Channel Configuration */
2164                 for (chan = 0; chan < rx_channels_count; chan++) {
2165                         rx_q = &priv->rx_queue[chan];
2166
2167                         priv->hw->dma->init_rx_chan(priv->ioaddr,
2168                                                     priv->plat->dma_cfg,
2169                                                     rx_q->dma_rx_phy, chan);
2170
2171                         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2172                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
2173                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2174                                                        rx_q->rx_tail_addr,
2175                                                        chan);
2176                 }
2177
2178                 /* DMA TX Channel Configuration */
2179                 for (chan = 0; chan < tx_channels_count; chan++) {
2180                         tx_q = &priv->tx_queue[chan];
2181
2182                         priv->hw->dma->init_chan(priv->ioaddr,
2183                                                  priv->plat->dma_cfg,
2184                                                  chan);
2185
2186                         priv->hw->dma->init_tx_chan(priv->ioaddr,
2187                                                     priv->plat->dma_cfg,
2188                                                     tx_q->dma_tx_phy, chan);
2189
2190                         tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2191                                     (DMA_TX_SIZE * sizeof(struct dma_desc));
2192                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2193                                                        tx_q->tx_tail_addr,
2194                                                        chan);
2195                 }
2196         } else {
2197                 rx_q = &priv->rx_queue[chan];
2198                 tx_q = &priv->tx_queue[chan];
2199                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2200                                     tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2201         }
2202
2203         if (priv->plat->axi && priv->hw->dma->axi)
2204                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2205
2206         return ret;
2207 }
2208
2209 /**
2210  * stmmac_tx_timer - mitigation sw timer for tx.
2211  * @data: data pointer
2212  * Description:
2213  * This is the timer handler to directly invoke the stmmac_tx_clean.
2214  */
2215 static void stmmac_tx_timer(unsigned long data)
2216 {
2217         struct stmmac_priv *priv = (struct stmmac_priv *)data;
2218         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2219         u32 queue;
2220
2221         /* let's scan all the tx queues */
2222         for (queue = 0; queue < tx_queues_count; queue++)
2223                 stmmac_tx_clean(priv, queue);
2224 }
2225
2226 /**
2227  * stmmac_init_tx_coalesce - init tx mitigation options.
2228  * @priv: driver private structure
2229  * Description:
2230  * This inits the transmit coalesce parameters: i.e. timer rate,
2231  * timer handler and default threshold used for enabling the
2232  * interrupt on completion bit.
2233  */
2234 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2235 {
2236         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2237         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2238         setup_timer(&priv->txtimer, stmmac_tx_timer, (unsigned long)priv);
2239         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2240         add_timer(&priv->txtimer);
2241 }
2242
2243 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2244 {
2245         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2246         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2247         u32 chan;
2248
2249         /* set TX ring length */
2250         if (priv->hw->dma->set_tx_ring_len) {
2251                 for (chan = 0; chan < tx_channels_count; chan++)
2252                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2253                                                        (DMA_TX_SIZE - 1), chan);
2254         }
2255
2256         /* set RX ring length */
2257         if (priv->hw->dma->set_rx_ring_len) {
2258                 for (chan = 0; chan < rx_channels_count; chan++)
2259                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2260                                                        (DMA_RX_SIZE - 1), chan);
2261         }
2262 }
2263
2264 /**
2265  *  stmmac_set_tx_queue_weight - Set TX queue weight
2266  *  @priv: driver private structure
2267  *  Description: It is used for setting TX queues weight
2268  */
2269 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2270 {
2271         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2272         u32 weight;
2273         u32 queue;
2274
2275         for (queue = 0; queue < tx_queues_count; queue++) {
2276                 weight = priv->plat->tx_queues_cfg[queue].weight;
2277                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2278         }
2279 }
2280
2281 /**
2282  *  stmmac_configure_cbs - Configure CBS in TX queue
2283  *  @priv: driver private structure
2284  *  Description: It is used for configuring CBS in AVB TX queues
2285  */
2286 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2287 {
2288         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2289         u32 mode_to_use;
2290         u32 queue;
2291
2292         /* queue 0 is reserved for legacy traffic */
2293         for (queue = 1; queue < tx_queues_count; queue++) {
2294                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2295                 if (mode_to_use == MTL_QUEUE_DCB)
2296                         continue;
2297
2298                 priv->hw->mac->config_cbs(priv->hw,
2299                                 priv->plat->tx_queues_cfg[queue].send_slope,
2300                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2301                                 priv->plat->tx_queues_cfg[queue].high_credit,
2302                                 priv->plat->tx_queues_cfg[queue].low_credit,
2303                                 queue);
2304         }
2305 }
2306
2307 /**
2308  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2309  *  @priv: driver private structure
2310  *  Description: It is used for mapping RX queues to RX dma channels
2311  */
2312 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2313 {
2314         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2315         u32 queue;
2316         u32 chan;
2317
2318         for (queue = 0; queue < rx_queues_count; queue++) {
2319                 chan = priv->plat->rx_queues_cfg[queue].chan;
2320                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2321         }
2322 }
2323
2324 /**
2325  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2326  *  @priv: driver private structure
2327  *  Description: It is used for configuring the RX Queue Priority
2328  */
2329 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2330 {
2331         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2332         u32 queue;
2333         u32 prio;
2334
2335         for (queue = 0; queue < rx_queues_count; queue++) {
2336                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2337                         continue;
2338
2339                 prio = priv->plat->rx_queues_cfg[queue].prio;
2340                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2341         }
2342 }
2343
2344 /**
2345  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2346  *  @priv: driver private structure
2347  *  Description: It is used for configuring the TX Queue Priority
2348  */
2349 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2350 {
2351         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2352         u32 queue;
2353         u32 prio;
2354
2355         for (queue = 0; queue < tx_queues_count; queue++) {
2356                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2357                         continue;
2358
2359                 prio = priv->plat->tx_queues_cfg[queue].prio;
2360                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2361         }
2362 }
2363
2364 /**
2365  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2366  *  @priv: driver private structure
2367  *  Description: It is used for configuring the RX queue routing
2368  */
2369 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2370 {
2371         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2372         u32 queue;
2373         u8 packet;
2374
2375         for (queue = 0; queue < rx_queues_count; queue++) {
2376                 /* no specific packet type routing specified for the queue */
2377                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2378                         continue;
2379
2380                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2381                 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2382         }
2383 }
2384
2385 /**
2386  *  stmmac_mtl_configuration - Configure MTL
2387  *  @priv: driver private structure
2388  *  Description: It is used for configurring MTL
2389  */
2390 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2391 {
2392         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2393         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2394
2395         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2396                 stmmac_set_tx_queue_weight(priv);
2397
2398         /* Configure MTL RX algorithms */
2399         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2400                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2401                                                 priv->plat->rx_sched_algorithm);
2402
2403         /* Configure MTL TX algorithms */
2404         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2405                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2406                                                 priv->plat->tx_sched_algorithm);
2407
2408         /* Configure CBS in AVB TX queues */
2409         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2410                 stmmac_configure_cbs(priv);
2411
2412         /* Map RX MTL to DMA channels */
2413         if (priv->hw->mac->map_mtl_to_dma)
2414                 stmmac_rx_queue_dma_chan_map(priv);
2415
2416         /* Enable MAC RX Queues */
2417         if (priv->hw->mac->rx_queue_enable)
2418                 stmmac_mac_enable_rx_queues(priv);
2419
2420         /* Set RX priorities */
2421         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2422                 stmmac_mac_config_rx_queues_prio(priv);
2423
2424         /* Set TX priorities */
2425         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2426                 stmmac_mac_config_tx_queues_prio(priv);
2427
2428         /* Set RX routing */
2429         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2430                 stmmac_mac_config_rx_queues_routing(priv);
2431 }
2432
2433 /**
2434  * stmmac_hw_setup - setup mac in a usable state.
2435  *  @dev : pointer to the device structure.
2436  *  Description:
2437  *  this is the main function to setup the HW in a usable state because the
2438  *  dma engine is reset, the core registers are configured (e.g. AXI,
2439  *  Checksum features, timers). The DMA is ready to start receiving and
2440  *  transmitting.
2441  *  Return value:
2442  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2443  *  file on failure.
2444  */
2445 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2446 {
2447         struct stmmac_priv *priv = netdev_priv(dev);
2448         u32 rx_cnt = priv->plat->rx_queues_to_use;
2449         u32 tx_cnt = priv->plat->tx_queues_to_use;
2450         u32 chan;
2451         int ret;
2452
2453         /* DMA initialization and SW reset */
2454         ret = stmmac_init_dma_engine(priv);
2455         if (ret < 0) {
2456                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2457                            __func__);
2458                 return ret;
2459         }
2460
2461         /* Copy the MAC addr into the HW  */
2462         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2463
2464         /* PS and related bits will be programmed according to the speed */
2465         if (priv->hw->pcs) {
2466                 int speed = priv->plat->mac_port_sel_speed;
2467
2468                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2469                     (speed == SPEED_1000)) {
2470                         priv->hw->ps = speed;
2471                 } else {
2472                         dev_warn(priv->device, "invalid port speed\n");
2473                         priv->hw->ps = 0;
2474                 }
2475         }
2476
2477         /* Initialize the MAC Core */
2478         priv->hw->mac->core_init(priv->hw, dev->mtu);
2479
2480         /* Initialize MTL*/
2481         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2482                 stmmac_mtl_configuration(priv);
2483
2484         ret = priv->hw->mac->rx_ipc(priv->hw);
2485         if (!ret) {
2486                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2487                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2488                 priv->hw->rx_csum = 0;
2489         }
2490
2491         /* Enable the MAC Rx/Tx */
2492         priv->hw->mac->set_mac(priv->ioaddr, true);
2493
2494         /* Set the HW DMA mode and the COE */
2495         stmmac_dma_operation_mode(priv);
2496
2497         stmmac_mmc_setup(priv);
2498
2499         if (init_ptp) {
2500                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2501                 if (ret < 0)
2502                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2503
2504                 ret = stmmac_init_ptp(priv);
2505                 if (ret == -EOPNOTSUPP)
2506                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2507                 else if (ret)
2508                         netdev_warn(priv->dev, "PTP init failed\n");
2509         }
2510
2511 #ifdef CONFIG_DEBUG_FS
2512         ret = stmmac_init_fs(dev);
2513         if (ret < 0)
2514                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2515                             __func__);
2516 #endif
2517         /* Start the ball rolling... */
2518         stmmac_start_all_dma(priv);
2519
2520         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2521
2522         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2523                 priv->rx_riwt = MAX_DMA_RIWT;
2524                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2525         }
2526
2527         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2528                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2529
2530         /* set TX and RX rings length */
2531         stmmac_set_rings_length(priv);
2532
2533         /* Enable TSO */
2534         if (priv->tso) {
2535                 for (chan = 0; chan < tx_cnt; chan++)
2536                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2537         }
2538
2539         return 0;
2540 }
2541
2542 static void stmmac_hw_teardown(struct net_device *dev)
2543 {
2544         struct stmmac_priv *priv = netdev_priv(dev);
2545
2546         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2547 }
2548
2549 /**
2550  *  stmmac_open - open entry point of the driver
2551  *  @dev : pointer to the device structure.
2552  *  Description:
2553  *  This function is the open entry point of the driver.
2554  *  Return value:
2555  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2556  *  file on failure.
2557  */
2558 static int stmmac_open(struct net_device *dev)
2559 {
2560         struct stmmac_priv *priv = netdev_priv(dev);
2561         int ret;
2562
2563         stmmac_check_ether_addr(priv);
2564
2565         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2566             priv->hw->pcs != STMMAC_PCS_TBI &&
2567             priv->hw->pcs != STMMAC_PCS_RTBI) {
2568                 ret = stmmac_init_phy(dev);
2569                 if (ret) {
2570                         netdev_err(priv->dev,
2571                                    "%s: Cannot attach to PHY (error: %d)\n",
2572                                    __func__, ret);
2573                         return ret;
2574                 }
2575         }
2576
2577         /* Extra statistics */
2578         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2579         priv->xstats.threshold = tc;
2580
2581         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2582         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2583
2584         ret = alloc_dma_desc_resources(priv);
2585         if (ret < 0) {
2586                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2587                            __func__);
2588                 goto dma_desc_error;
2589         }
2590
2591         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2592         if (ret < 0) {
2593                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2594                            __func__);
2595                 goto init_error;
2596         }
2597
2598         ret = stmmac_hw_setup(dev, true);
2599         if (ret < 0) {
2600                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2601                 goto init_error;
2602         }
2603
2604         stmmac_init_tx_coalesce(priv);
2605
2606         if (dev->phydev)
2607                 phy_start(dev->phydev);
2608
2609         /* Request the IRQ lines */
2610         ret = request_irq(dev->irq, stmmac_interrupt,
2611                           IRQF_SHARED, dev->name, dev);
2612         if (unlikely(ret < 0)) {
2613                 netdev_err(priv->dev,
2614                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2615                            __func__, dev->irq, ret);
2616                 goto irq_error;
2617         }
2618
2619         /* Request the Wake IRQ in case of another line is used for WoL */
2620         if (priv->wol_irq != dev->irq) {
2621                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2622                                   IRQF_SHARED, dev->name, dev);
2623                 if (unlikely(ret < 0)) {
2624                         netdev_err(priv->dev,
2625                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2626                                    __func__, priv->wol_irq, ret);
2627                         goto wolirq_error;
2628                 }
2629         }
2630
2631         /* Request the IRQ lines */
2632         if (priv->lpi_irq > 0) {
2633                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2634                                   dev->name, dev);
2635                 if (unlikely(ret < 0)) {
2636                         netdev_err(priv->dev,
2637                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2638                                    __func__, priv->lpi_irq, ret);
2639                         goto lpiirq_error;
2640                 }
2641         }
2642
2643         stmmac_enable_all_queues(priv);
2644         stmmac_start_all_queues(priv);
2645
2646         return 0;
2647
2648 lpiirq_error:
2649         if (priv->wol_irq != dev->irq)
2650                 free_irq(priv->wol_irq, dev);
2651 wolirq_error:
2652         free_irq(dev->irq, dev);
2653 irq_error:
2654         if (dev->phydev)
2655                 phy_stop(dev->phydev);
2656
2657         del_timer_sync(&priv->txtimer);
2658         stmmac_hw_teardown(dev);
2659 init_error:
2660         free_dma_desc_resources(priv);
2661 dma_desc_error:
2662         if (dev->phydev)
2663                 phy_disconnect(dev->phydev);
2664
2665         return ret;
2666 }
2667
2668 /**
2669  *  stmmac_release - close entry point of the driver
2670  *  @dev : device pointer.
2671  *  Description:
2672  *  This is the stop entry point of the driver.
2673  */
2674 static int stmmac_release(struct net_device *dev)
2675 {
2676         struct stmmac_priv *priv = netdev_priv(dev);
2677
2678         if (priv->eee_enabled)
2679                 del_timer_sync(&priv->eee_ctrl_timer);
2680
2681         /* Stop and disconnect the PHY */
2682         if (dev->phydev) {
2683                 phy_stop(dev->phydev);
2684                 phy_disconnect(dev->phydev);
2685         }
2686
2687         stmmac_stop_all_queues(priv);
2688
2689         stmmac_disable_all_queues(priv);
2690
2691         del_timer_sync(&priv->txtimer);
2692
2693         /* Free the IRQ lines */
2694         free_irq(dev->irq, dev);
2695         if (priv->wol_irq != dev->irq)
2696                 free_irq(priv->wol_irq, dev);
2697         if (priv->lpi_irq > 0)
2698                 free_irq(priv->lpi_irq, dev);
2699
2700         /* Stop TX/RX DMA and clear the descriptors */
2701         stmmac_stop_all_dma(priv);
2702
2703         /* Release and free the Rx/Tx resources */
2704         free_dma_desc_resources(priv);
2705
2706         /* Disable the MAC Rx/Tx */
2707         priv->hw->mac->set_mac(priv->ioaddr, false);
2708
2709         netif_carrier_off(dev);
2710
2711 #ifdef CONFIG_DEBUG_FS
2712         stmmac_exit_fs(dev);
2713 #endif
2714
2715         stmmac_release_ptp(priv);
2716
2717         return 0;
2718 }
2719
2720 /**
2721  *  stmmac_tso_allocator - close entry point of the driver
2722  *  @priv: driver private structure
2723  *  @des: buffer start address
2724  *  @total_len: total length to fill in descriptors
2725  *  @last_segmant: condition for the last descriptor
2726  *  @queue: TX queue index
2727  *  Description:
2728  *  This function fills descriptor and request new descriptors according to
2729  *  buffer length to fill
2730  */
2731 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2732                                  int total_len, bool last_segment, u32 queue)
2733 {
2734         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2735         struct dma_desc *desc;
2736         u32 buff_size;
2737         int tmp_len;
2738
2739         tmp_len = total_len;
2740
2741         while (tmp_len > 0) {
2742                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2743                 desc = tx_q->dma_tx + tx_q->cur_tx;
2744
2745                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2746                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2747                             TSO_MAX_BUFF_SIZE : tmp_len;
2748
2749                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2750                         0, 1,
2751                         (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2752                         0, 0);
2753
2754                 tmp_len -= TSO_MAX_BUFF_SIZE;
2755         }
2756 }
2757
2758 /**
2759  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2760  *  @skb : the socket buffer
2761  *  @dev : device pointer
2762  *  Description: this is the transmit function that is called on TSO frames
2763  *  (support available on GMAC4 and newer chips).
2764  *  Diagram below show the ring programming in case of TSO frames:
2765  *
2766  *  First Descriptor
2767  *   --------
2768  *   | DES0 |---> buffer1 = L2/L3/L4 header
2769  *   | DES1 |---> TCP Payload (can continue on next descr...)
2770  *   | DES2 |---> buffer 1 and 2 len
2771  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2772  *   --------
2773  *      |
2774  *     ...
2775  *      |
2776  *   --------
2777  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2778  *   | DES1 | --|
2779  *   | DES2 | --> buffer 1 and 2 len
2780  *   | DES3 |
2781  *   --------
2782  *
2783  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2784  */
2785 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2786 {
2787         struct dma_desc *desc, *first, *mss_desc = NULL;
2788         struct stmmac_priv *priv = netdev_priv(dev);
2789         int nfrags = skb_shinfo(skb)->nr_frags;
2790         u32 queue = skb_get_queue_mapping(skb);
2791         unsigned int first_entry, des;
2792         struct stmmac_tx_queue *tx_q;
2793         int tmp_pay_len = 0;
2794         u32 pay_len, mss;
2795         u8 proto_hdr_len;
2796         int i;
2797
2798         tx_q = &priv->tx_queue[queue];
2799
2800         /* Compute header lengths */
2801         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2802
2803         /* Desc availability based on threshold should be enough safe */
2804         if (unlikely(stmmac_tx_avail(priv, queue) <
2805                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2806                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2807                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2808                                                                 queue));
2809                         /* This is a hard error, log it. */
2810                         netdev_err(priv->dev,
2811                                    "%s: Tx Ring full when queue awake\n",
2812                                    __func__);
2813                 }
2814                 return NETDEV_TX_BUSY;
2815         }
2816
2817         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2818
2819         mss = skb_shinfo(skb)->gso_size;
2820
2821         /* set new MSS value if needed */
2822         if (mss != priv->mss) {
2823                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2824                 priv->hw->desc->set_mss(mss_desc, mss);
2825                 priv->mss = mss;
2826                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2827         }
2828
2829         if (netif_msg_tx_queued(priv)) {
2830                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2831                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2832                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2833                         skb->data_len);
2834         }
2835
2836         first_entry = tx_q->cur_tx;
2837
2838         desc = tx_q->dma_tx + first_entry;
2839         first = desc;
2840
2841         /* first descriptor: fill Headers on Buf1 */
2842         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2843                              DMA_TO_DEVICE);
2844         if (dma_mapping_error(priv->device, des))
2845                 goto dma_map_err;
2846
2847         tx_q->tx_skbuff_dma[first_entry].buf = des;
2848         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2849
2850         first->des0 = cpu_to_le32(des);
2851
2852         /* Fill start of payload in buff2 of first descriptor */
2853         if (pay_len)
2854                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2855
2856         /* If needed take extra descriptors to fill the remaining payload */
2857         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2858
2859         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2860
2861         /* Prepare fragments */
2862         for (i = 0; i < nfrags; i++) {
2863                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2864
2865                 des = skb_frag_dma_map(priv->device, frag, 0,
2866                                        skb_frag_size(frag),
2867                                        DMA_TO_DEVICE);
2868                 if (dma_mapping_error(priv->device, des))
2869                         goto dma_map_err;
2870
2871                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2872                                      (i == nfrags - 1), queue);
2873
2874                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2875                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2876                 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2877                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2878         }
2879
2880         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2881
2882         /* Only the last descriptor gets to point to the skb. */
2883         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2884
2885         /* We've used all descriptors we need for this skb, however,
2886          * advance cur_tx so that it references a fresh descriptor.
2887          * ndo_start_xmit will fill this descriptor the next time it's
2888          * called and stmmac_tx_clean may clean up to this descriptor.
2889          */
2890         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2891
2892         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2893                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2894                           __func__);
2895                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2896         }
2897
2898         dev->stats.tx_bytes += skb->len;
2899         priv->xstats.tx_tso_frames++;
2900         priv->xstats.tx_tso_nfrags += nfrags;
2901
2902         /* Manage tx mitigation */
2903         priv->tx_count_frames += nfrags + 1;
2904         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2905                 mod_timer(&priv->txtimer,
2906                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2907         } else {
2908                 priv->tx_count_frames = 0;
2909                 priv->hw->desc->set_tx_ic(desc);
2910                 priv->xstats.tx_set_ic_bit++;
2911         }
2912
2913         skb_tx_timestamp(skb);
2914
2915         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2916                      priv->hwts_tx_en)) {
2917                 /* declare that device is doing timestamping */
2918                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2919                 priv->hw->desc->enable_tx_timestamp(first);
2920         }
2921
2922         /* Complete the first descriptor before granting the DMA */
2923         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2924                         proto_hdr_len,
2925                         pay_len,
2926                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2927                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2928
2929         /* If context desc is used to change MSS */
2930         if (mss_desc)
2931                 priv->hw->desc->set_tx_owner(mss_desc);
2932
2933         /* The own bit must be the latest setting done when prepare the
2934          * descriptor and then barrier is needed to make sure that
2935          * all is coherent before granting the DMA engine.
2936          */
2937         dma_wmb();
2938
2939         if (netif_msg_pktdata(priv)) {
2940                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2941                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2942                         tx_q->cur_tx, first, nfrags);
2943
2944                 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2945                                              0);
2946
2947                 pr_info(">>> frame to be transmitted: ");
2948                 print_pkt(skb->data, skb_headlen(skb));
2949         }
2950
2951         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2952
2953         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2954                                        queue);
2955
2956         return NETDEV_TX_OK;
2957
2958 dma_map_err:
2959         dev_err(priv->device, "Tx dma map failed\n");
2960         dev_kfree_skb(skb);
2961         priv->dev->stats.tx_dropped++;
2962         return NETDEV_TX_OK;
2963 }
2964
2965 /**
2966  *  stmmac_xmit - Tx entry point of the driver
2967  *  @skb : the socket buffer
2968  *  @dev : device pointer
2969  *  Description : this is the tx entry point of the driver.
2970  *  It programs the chain or the ring and supports oversized frames
2971  *  and SG feature.
2972  */
2973 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2974 {
2975         struct stmmac_priv *priv = netdev_priv(dev);
2976         unsigned int nopaged_len = skb_headlen(skb);
2977         int i, csum_insertion = 0, is_jumbo = 0;
2978         u32 queue = skb_get_queue_mapping(skb);
2979         int nfrags = skb_shinfo(skb)->nr_frags;
2980         int entry;
2981         unsigned int first_entry;
2982         struct dma_desc *desc, *first;
2983         struct stmmac_tx_queue *tx_q;
2984         unsigned int enh_desc;
2985         unsigned int des;
2986
2987         tx_q = &priv->tx_queue[queue];
2988
2989         /* Manage oversized TCP frames for GMAC4 device */
2990         if (skb_is_gso(skb) && priv->tso) {
2991                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
2992                         return stmmac_tso_xmit(skb, dev);
2993         }
2994
2995         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2996                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2997                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2998                                                                 queue));
2999                         /* This is a hard error, log it. */
3000                         netdev_err(priv->dev,
3001                                    "%s: Tx Ring full when queue awake\n",
3002                                    __func__);
3003                 }
3004                 return NETDEV_TX_BUSY;
3005         }
3006
3007         if (priv->tx_path_in_lpi_mode)
3008                 stmmac_disable_eee_mode(priv);
3009
3010         entry = tx_q->cur_tx;
3011         first_entry = entry;
3012
3013         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3014
3015         if (likely(priv->extend_desc))
3016                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3017         else
3018                 desc = tx_q->dma_tx + entry;
3019
3020         first = desc;
3021
3022         enh_desc = priv->plat->enh_desc;
3023         /* To program the descriptors according to the size of the frame */
3024         if (enh_desc)
3025                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3026
3027         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3028                                          DWMAC_CORE_4_00)) {
3029                 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3030                 if (unlikely(entry < 0))
3031                         goto dma_map_err;
3032         }
3033
3034         for (i = 0; i < nfrags; i++) {
3035                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3036                 int len = skb_frag_size(frag);
3037                 bool last_segment = (i == (nfrags - 1));
3038
3039                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3040
3041                 if (likely(priv->extend_desc))
3042                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3043                 else
3044                         desc = tx_q->dma_tx + entry;
3045
3046                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3047                                        DMA_TO_DEVICE);
3048                 if (dma_mapping_error(priv->device, des))
3049                         goto dma_map_err; /* should reuse desc w/o issues */
3050
3051                 tx_q->tx_skbuff[entry] = NULL;
3052
3053                 tx_q->tx_skbuff_dma[entry].buf = des;
3054                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3055                         desc->des0 = cpu_to_le32(des);
3056                 else
3057                         desc->des2 = cpu_to_le32(des);
3058
3059                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3060                 tx_q->tx_skbuff_dma[entry].len = len;
3061                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3062
3063                 /* Prepare the descriptor and set the own bit too */
3064                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3065                                                 priv->mode, 1, last_segment,
3066                                                 skb->len);
3067         }
3068
3069         /* Only the last descriptor gets to point to the skb. */
3070         tx_q->tx_skbuff[entry] = skb;
3071
3072         /* We've used all descriptors we need for this skb, however,
3073          * advance cur_tx so that it references a fresh descriptor.
3074          * ndo_start_xmit will fill this descriptor the next time it's
3075          * called and stmmac_tx_clean may clean up to this descriptor.
3076          */
3077         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3078         tx_q->cur_tx = entry;
3079
3080         if (netif_msg_pktdata(priv)) {
3081                 void *tx_head;
3082
3083                 netdev_dbg(priv->dev,
3084                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3085                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3086                            entry, first, nfrags);
3087
3088                 if (priv->extend_desc)
3089                         tx_head = (void *)tx_q->dma_etx;
3090                 else
3091                         tx_head = (void *)tx_q->dma_tx;
3092
3093                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3094
3095                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3096                 print_pkt(skb->data, skb->len);
3097         }
3098
3099         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3100                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3101                           __func__);
3102                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3103         }
3104
3105         dev->stats.tx_bytes += skb->len;
3106
3107         /* According to the coalesce parameter the IC bit for the latest
3108          * segment is reset and the timer re-started to clean the tx status.
3109          * This approach takes care about the fragments: desc is the first
3110          * element in case of no SG.
3111          */
3112         priv->tx_count_frames += nfrags + 1;
3113         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3114                 mod_timer(&priv->txtimer,
3115                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3116         } else {
3117                 priv->tx_count_frames = 0;
3118                 priv->hw->desc->set_tx_ic(desc);
3119                 priv->xstats.tx_set_ic_bit++;
3120         }
3121
3122         skb_tx_timestamp(skb);
3123
3124         /* Ready to fill the first descriptor and set the OWN bit w/o any
3125          * problems because all the descriptors are actually ready to be
3126          * passed to the DMA engine.
3127          */
3128         if (likely(!is_jumbo)) {
3129                 bool last_segment = (nfrags == 0);
3130
3131                 des = dma_map_single(priv->device, skb->data,
3132                                      nopaged_len, DMA_TO_DEVICE);
3133                 if (dma_mapping_error(priv->device, des))
3134                         goto dma_map_err;
3135
3136                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3137                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3138                         first->des0 = cpu_to_le32(des);
3139                 else
3140                         first->des2 = cpu_to_le32(des);
3141
3142                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3143                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3144
3145                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3146                              priv->hwts_tx_en)) {
3147                         /* declare that device is doing timestamping */
3148                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3149                         priv->hw->desc->enable_tx_timestamp(first);
3150                 }
3151
3152                 /* Prepare the first descriptor setting the OWN bit too */
3153                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3154                                                 csum_insertion, priv->mode, 1,
3155                                                 last_segment, skb->len);
3156
3157                 /* The own bit must be the latest setting done when prepare the
3158                  * descriptor and then barrier is needed to make sure that
3159                  * all is coherent before granting the DMA engine.
3160                  */
3161                 dma_wmb();
3162         }
3163
3164         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3165
3166         if (priv->synopsys_id < DWMAC_CORE_4_00)
3167                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3168         else
3169                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3170                                                queue);
3171
3172         return NETDEV_TX_OK;
3173
3174 dma_map_err:
3175         netdev_err(priv->dev, "Tx DMA map failed\n");
3176         dev_kfree_skb(skb);
3177         priv->dev->stats.tx_dropped++;
3178         return NETDEV_TX_OK;
3179 }
3180
3181 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3182 {
3183         struct ethhdr *ehdr;
3184         u16 vlanid;
3185
3186         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3187             NETIF_F_HW_VLAN_CTAG_RX &&
3188             !__vlan_get_tag(skb, &vlanid)) {
3189                 /* pop the vlan tag */
3190                 ehdr = (struct ethhdr *)skb->data;
3191                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3192                 skb_pull(skb, VLAN_HLEN);
3193                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3194         }
3195 }
3196
3197
3198 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3199 {
3200         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3201                 return 0;
3202
3203         return 1;
3204 }
3205
3206 /**
3207  * stmmac_rx_refill - refill used skb preallocated buffers
3208  * @priv: driver private structure
3209  * @queue: RX queue index
3210  * Description : this is to reallocate the skb for the reception process
3211  * that is based on zero-copy.
3212  */
3213 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3214 {
3215         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3216         int dirty = stmmac_rx_dirty(priv, queue);
3217         unsigned int entry = rx_q->dirty_rx;
3218
3219         int bfsize = priv->dma_buf_sz;
3220
3221         while (dirty-- > 0) {
3222                 struct dma_desc *p;
3223
3224                 if (priv->extend_desc)
3225                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3226                 else
3227                         p = rx_q->dma_rx + entry;
3228
3229                 if (likely(!rx_q->rx_skbuff[entry])) {
3230                         struct sk_buff *skb;
3231
3232                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3233                         if (unlikely(!skb)) {
3234                                 /* so for a while no zero-copy! */
3235                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3236                                 if (unlikely(net_ratelimit()))
3237                                         dev_err(priv->device,
3238                                                 "fail to alloc skb entry %d\n",
3239                                                 entry);
3240                                 break;
3241                         }
3242
3243                         rx_q->rx_skbuff[entry] = skb;
3244                         rx_q->rx_skbuff_dma[entry] =
3245                             dma_map_single(priv->device, skb->data, bfsize,
3246                                            DMA_FROM_DEVICE);
3247                         if (dma_mapping_error(priv->device,
3248                                               rx_q->rx_skbuff_dma[entry])) {
3249                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3250                                 dev_kfree_skb(skb);
3251                                 break;
3252                         }
3253
3254                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3255                                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3256                                 p->des1 = 0;
3257                         } else {
3258                                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3259                         }
3260                         if (priv->hw->mode->refill_desc3)
3261                                 priv->hw->mode->refill_desc3(rx_q, p);
3262
3263                         if (rx_q->rx_zeroc_thresh > 0)
3264                                 rx_q->rx_zeroc_thresh--;
3265
3266                         netif_dbg(priv, rx_status, priv->dev,
3267                                   "refill entry #%d\n", entry);
3268                 }
3269                 dma_wmb();
3270
3271                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3272                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3273                 else
3274                         priv->hw->desc->set_rx_owner(p);
3275
3276                 dma_wmb();
3277
3278                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3279         }
3280         rx_q->dirty_rx = entry;
3281 }
3282
3283 /**
3284  * stmmac_rx - manage the receive process
3285  * @priv: driver private structure
3286  * @limit: napi bugget
3287  * @queue: RX queue index.
3288  * Description :  this the function called by the napi poll method.
3289  * It gets all the frames inside the ring.
3290  */
3291 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3292 {
3293         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3294         unsigned int entry = rx_q->cur_rx;
3295         int coe = priv->hw->rx_csum;
3296         unsigned int next_entry;
3297         unsigned int count = 0;
3298
3299         if (netif_msg_rx_status(priv)) {
3300                 void *rx_head;
3301
3302                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3303                 if (priv->extend_desc)
3304                         rx_head = (void *)rx_q->dma_erx;
3305                 else
3306                         rx_head = (void *)rx_q->dma_rx;
3307
3308                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3309         }
3310         while (count < limit) {
3311                 int status;
3312                 struct dma_desc *p;
3313                 struct dma_desc *np;
3314
3315                 if (priv->extend_desc)
3316                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3317                 else
3318                         p = rx_q->dma_rx + entry;
3319
3320                 /* read the status of the incoming frame */
3321                 status = priv->hw->desc->rx_status(&priv->dev->stats,
3322                                                    &priv->xstats, p);
3323                 /* check if managed by the DMA otherwise go ahead */
3324                 if (unlikely(status & dma_own))
3325                         break;
3326
3327                 count++;
3328
3329                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3330                 next_entry = rx_q->cur_rx;
3331
3332                 if (priv->extend_desc)
3333                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3334                 else
3335                         np = rx_q->dma_rx + next_entry;
3336
3337                 prefetch(np);
3338
3339                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3340                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
3341                                                            &priv->xstats,
3342                                                            rx_q->dma_erx +
3343                                                            entry);
3344                 if (unlikely(status == discard_frame)) {
3345                         priv->dev->stats.rx_errors++;
3346                         if (priv->hwts_rx_en && !priv->extend_desc) {
3347                                 /* DESC2 & DESC3 will be overwritten by device
3348                                  * with timestamp value, hence reinitialize
3349                                  * them in stmmac_rx_refill() function so that
3350                                  * device can reuse it.
3351                                  */
3352                                 rx_q->rx_skbuff[entry] = NULL;
3353                                 dma_unmap_single(priv->device,
3354                                                  rx_q->rx_skbuff_dma[entry],
3355                                                  priv->dma_buf_sz,
3356                                                  DMA_FROM_DEVICE);
3357                         }
3358                 } else {
3359                         struct sk_buff *skb;
3360                         int frame_len;
3361                         unsigned int des;
3362
3363                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3364                                 des = le32_to_cpu(p->des0);
3365                         else
3366                                 des = le32_to_cpu(p->des2);
3367
3368                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3369
3370                         /*  If frame length is greater than skb buffer size
3371                          *  (preallocated during init) then the packet is
3372                          *  ignored
3373                          */
3374                         if (frame_len > priv->dma_buf_sz) {
3375                                 netdev_err(priv->dev,
3376                                            "len %d larger than size (%d)\n",
3377                                            frame_len, priv->dma_buf_sz);
3378                                 priv->dev->stats.rx_length_errors++;
3379                                 break;
3380                         }
3381
3382                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3383                          * Type frames (LLC/LLC-SNAP)
3384                          */
3385                         if (unlikely(status != llc_snap))
3386                                 frame_len -= ETH_FCS_LEN;
3387
3388                         if (netif_msg_rx_status(priv)) {
3389                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3390                                            p, entry, des);
3391                                 if (frame_len > ETH_FRAME_LEN)
3392                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3393                                                    frame_len, status);
3394                         }
3395
3396                         /* The zero-copy is always used for all the sizes
3397                          * in case of GMAC4 because it needs
3398                          * to refill the used descriptors, always.
3399                          */
3400                         if (unlikely(!priv->plat->has_gmac4 &&
3401                                      ((frame_len < priv->rx_copybreak) ||
3402                                      stmmac_rx_threshold_count(rx_q)))) {
3403                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3404                                                                 frame_len);
3405                                 if (unlikely(!skb)) {
3406                                         if (net_ratelimit())
3407                                                 dev_warn(priv->device,
3408                                                          "packet dropped\n");
3409                                         priv->dev->stats.rx_dropped++;
3410                                         break;
3411                                 }
3412
3413                                 dma_sync_single_for_cpu(priv->device,
3414                                                         rx_q->rx_skbuff_dma
3415                                                         [entry], frame_len,
3416                                                         DMA_FROM_DEVICE);
3417                                 skb_copy_to_linear_data(skb,
3418                                                         rx_q->
3419                                                         rx_skbuff[entry]->data,
3420                                                         frame_len);
3421
3422                                 skb_put(skb, frame_len);
3423                                 dma_sync_single_for_device(priv->device,
3424                                                            rx_q->rx_skbuff_dma
3425                                                            [entry], frame_len,
3426                                                            DMA_FROM_DEVICE);
3427                         } else {
3428                                 skb = rx_q->rx_skbuff[entry];
3429                                 if (unlikely(!skb)) {
3430                                         netdev_err(priv->dev,
3431                                                    "%s: Inconsistent Rx chain\n",
3432                                                    priv->dev->name);
3433                                         priv->dev->stats.rx_dropped++;
3434                                         break;
3435                                 }
3436                                 prefetch(skb->data - NET_IP_ALIGN);
3437                                 rx_q->rx_skbuff[entry] = NULL;
3438                                 rx_q->rx_zeroc_thresh++;
3439
3440                                 skb_put(skb, frame_len);
3441                                 dma_unmap_single(priv->device,
3442                                                  rx_q->rx_skbuff_dma[entry],
3443                                                  priv->dma_buf_sz,
3444                                                  DMA_FROM_DEVICE);
3445                         }
3446
3447                         if (netif_msg_pktdata(priv)) {
3448                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3449                                            frame_len);
3450                                 print_pkt(skb->data, frame_len);
3451                         }
3452
3453                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3454
3455                         stmmac_rx_vlan(priv->dev, skb);
3456
3457                         skb->protocol = eth_type_trans(skb, priv->dev);
3458
3459                         if (unlikely(!coe))
3460                                 skb_checksum_none_assert(skb);
3461                         else
3462                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3463
3464                         napi_gro_receive(&rx_q->napi, skb);
3465
3466                         priv->dev->stats.rx_packets++;
3467                         priv->dev->stats.rx_bytes += frame_len;
3468                 }
3469                 entry = next_entry;
3470         }
3471
3472         stmmac_rx_refill(priv, queue);
3473
3474         priv->xstats.rx_pkt_n += count;
3475
3476         return count;
3477 }
3478
3479 /**
3480  *  stmmac_poll - stmmac poll method (NAPI)
3481  *  @napi : pointer to the napi structure.
3482  *  @budget : maximum number of packets that the current CPU can receive from
3483  *            all interfaces.
3484  *  Description :
3485  *  To look at the incoming frames and clear the tx resources.
3486  */
3487 static int stmmac_poll(struct napi_struct *napi, int budget)
3488 {
3489         struct stmmac_rx_queue *rx_q =
3490                 container_of(napi, struct stmmac_rx_queue, napi);
3491         struct stmmac_priv *priv = rx_q->priv_data;
3492         u32 tx_count = priv->plat->tx_queues_to_use;
3493         u32 chan = rx_q->queue_index;
3494         int work_done = 0;
3495         u32 queue;
3496
3497         priv->xstats.napi_poll++;
3498
3499         /* check all the queues */
3500         for (queue = 0; queue < tx_count; queue++)
3501                 stmmac_tx_clean(priv, queue);
3502
3503         work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3504         if (work_done < budget) {
3505                 napi_complete_done(napi, work_done);
3506                 stmmac_enable_dma_irq(priv, chan);
3507         }
3508         return work_done;
3509 }
3510
3511 /**
3512  *  stmmac_tx_timeout
3513  *  @dev : Pointer to net device structure
3514  *  Description: this function is called when a packet transmission fails to
3515  *   complete within a reasonable time. The driver will mark the error in the
3516  *   netdev structure and arrange for the device to be reset to a sane state
3517  *   in order to transmit a new packet.
3518  */
3519 static void stmmac_tx_timeout(struct net_device *dev)
3520 {
3521         struct stmmac_priv *priv = netdev_priv(dev);
3522         u32 tx_count = priv->plat->tx_queues_to_use;
3523         u32 chan;
3524
3525         /* Clear Tx resources and restart transmitting again */
3526         for (chan = 0; chan < tx_count; chan++)
3527                 stmmac_tx_err(priv, chan);
3528 }
3529
3530 /**
3531  *  stmmac_set_rx_mode - entry point for multicast addressing
3532  *  @dev : pointer to the device structure
3533  *  Description:
3534  *  This function is a driver entry point which gets called by the kernel
3535  *  whenever multicast addresses must be enabled/disabled.
3536  *  Return value:
3537  *  void.
3538  */
3539 static void stmmac_set_rx_mode(struct net_device *dev)
3540 {
3541         struct stmmac_priv *priv = netdev_priv(dev);
3542
3543         priv->hw->mac->set_filter(priv->hw, dev);
3544 }
3545
3546 /**
3547  *  stmmac_change_mtu - entry point to change MTU size for the device.
3548  *  @dev : device pointer.
3549  *  @new_mtu : the new MTU size for the device.
3550  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3551  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3552  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3553  *  Return value:
3554  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3555  *  file on failure.
3556  */
3557 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3558 {
3559         struct stmmac_priv *priv = netdev_priv(dev);
3560
3561         if (netif_running(dev)) {
3562                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3563                 return -EBUSY;
3564         }
3565
3566         dev->mtu = new_mtu;
3567
3568         netdev_update_features(dev);
3569
3570         return 0;
3571 }
3572
3573 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3574                                              netdev_features_t features)
3575 {
3576         struct stmmac_priv *priv = netdev_priv(dev);
3577
3578         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3579                 features &= ~NETIF_F_RXCSUM;
3580
3581         if (!priv->plat->tx_coe)
3582                 features &= ~NETIF_F_CSUM_MASK;
3583
3584         /* Some GMAC devices have a bugged Jumbo frame support that
3585          * needs to have the Tx COE disabled for oversized frames
3586          * (due to limited buffer sizes). In this case we disable
3587          * the TX csum insertion in the TDES and not use SF.
3588          */
3589         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3590                 features &= ~NETIF_F_CSUM_MASK;
3591
3592         /* Disable tso if asked by ethtool */
3593         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3594                 if (features & NETIF_F_TSO)
3595                         priv->tso = true;
3596                 else
3597                         priv->tso = false;
3598         }
3599
3600         return features;
3601 }
3602
3603 static int stmmac_set_features(struct net_device *netdev,
3604                                netdev_features_t features)
3605 {
3606         struct stmmac_priv *priv = netdev_priv(netdev);
3607
3608         /* Keep the COE Type in case of csum is supporting */
3609         if (features & NETIF_F_RXCSUM)
3610                 priv->hw->rx_csum = priv->plat->rx_coe;
3611         else
3612                 priv->hw->rx_csum = 0;
3613         /* No check needed because rx_coe has been set before and it will be
3614          * fixed in case of issue.
3615          */
3616         priv->hw->mac->rx_ipc(priv->hw);
3617
3618         return 0;
3619 }
3620
3621 /**
3622  *  stmmac_interrupt - main ISR
3623  *  @irq: interrupt number.
3624  *  @dev_id: to pass the net device pointer.
3625  *  Description: this is the main driver interrupt service routine.
3626  *  It can call:
3627  *  o DMA service routine (to manage incoming frame reception and transmission
3628  *    status)
3629  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3630  *    interrupts.
3631  */
3632 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3633 {
3634         struct net_device *dev = (struct net_device *)dev_id;
3635         struct stmmac_priv *priv = netdev_priv(dev);
3636         u32 rx_cnt = priv->plat->rx_queues_to_use;
3637         u32 tx_cnt = priv->plat->tx_queues_to_use;
3638         u32 queues_count;
3639         u32 queue;
3640
3641         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3642
3643         if (priv->irq_wake)
3644                 pm_wakeup_event(priv->device, 0);
3645
3646         if (unlikely(!dev)) {
3647                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3648                 return IRQ_NONE;
3649         }
3650
3651         /* To handle GMAC own interrupts */
3652         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3653                 int status = priv->hw->mac->host_irq_status(priv->hw,
3654                                                             &priv->xstats);
3655
3656                 if (unlikely(status)) {
3657                         /* For LPI we need to save the tx status */
3658                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3659                                 priv->tx_path_in_lpi_mode = true;
3660                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3661                                 priv->tx_path_in_lpi_mode = false;
3662                 }
3663
3664                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3665                         for (queue = 0; queue < queues_count; queue++) {
3666                                 struct stmmac_rx_queue *rx_q =
3667                                 &priv->rx_queue[queue];
3668
3669                                 status |=
3670                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3671                                                                    queue);
3672
3673                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3674                                     priv->hw->dma->set_rx_tail_ptr)
3675                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3676                                                                 rx_q->rx_tail_addr,
3677                                                                 queue);
3678                         }
3679                 }
3680
3681                 /* PCS link status */
3682                 if (priv->hw->pcs) {
3683                         if (priv->xstats.pcs_link)
3684                                 netif_carrier_on(dev);
3685                         else
3686                                 netif_carrier_off(dev);
3687                 }
3688         }
3689
3690         /* To handle DMA interrupts */
3691         stmmac_dma_interrupt(priv);
3692
3693         return IRQ_HANDLED;
3694 }
3695
3696 #ifdef CONFIG_NET_POLL_CONTROLLER
3697 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3698  * to allow network I/O with interrupts disabled.
3699  */
3700 static void stmmac_poll_controller(struct net_device *dev)
3701 {
3702         disable_irq(dev->irq);
3703         stmmac_interrupt(dev->irq, dev);
3704         enable_irq(dev->irq);
3705 }
3706 #endif
3707
3708 /**
3709  *  stmmac_ioctl - Entry point for the Ioctl
3710  *  @dev: Device pointer.
3711  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3712  *  a proprietary structure used to pass information to the driver.
3713  *  @cmd: IOCTL command
3714  *  Description:
3715  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3716  */
3717 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3718 {
3719         int ret = -EOPNOTSUPP;
3720
3721         if (!netif_running(dev))
3722                 return -EINVAL;
3723
3724         switch (cmd) {
3725         case SIOCGMIIPHY:
3726         case SIOCGMIIREG:
3727         case SIOCSMIIREG:
3728                 if (!dev->phydev)
3729                         return -EINVAL;
3730                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3731                 break;
3732         case SIOCSHWTSTAMP:
3733                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3734                 break;
3735         default:
3736                 break;
3737         }
3738
3739         return ret;
3740 }
3741
3742 #ifdef CONFIG_DEBUG_FS
3743 static struct dentry *stmmac_fs_dir;
3744
3745 static void sysfs_display_ring(void *head, int size, int extend_desc,
3746                                struct seq_file *seq)
3747 {
3748         int i;
3749         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3750         struct dma_desc *p = (struct dma_desc *)head;
3751
3752         for (i = 0; i < size; i++) {
3753                 if (extend_desc) {
3754                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3755                                    i, (unsigned int)virt_to_phys(ep),
3756                                    le32_to_cpu(ep->basic.des0),
3757                                    le32_to_cpu(ep->basic.des1),
3758                                    le32_to_cpu(ep->basic.des2),
3759                                    le32_to_cpu(ep->basic.des3));
3760                         ep++;
3761                 } else {
3762                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3763                                    i, (unsigned int)virt_to_phys(p),
3764                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3765                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3766                         p++;
3767                 }
3768                 seq_printf(seq, "\n");
3769         }
3770 }
3771
3772 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3773 {
3774         struct net_device *dev = seq->private;
3775         struct stmmac_priv *priv = netdev_priv(dev);
3776         u32 rx_count = priv->plat->rx_queues_to_use;
3777         u32 tx_count = priv->plat->tx_queues_to_use;
3778         u32 queue;
3779
3780         for (queue = 0; queue < rx_count; queue++) {
3781                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3782
3783                 seq_printf(seq, "RX Queue %d:\n", queue);
3784
3785                 if (priv->extend_desc) {
3786                         seq_printf(seq, "Extended descriptor ring:\n");
3787                         sysfs_display_ring((void *)rx_q->dma_erx,
3788                                            DMA_RX_SIZE, 1, seq);
3789                 } else {
3790                         seq_printf(seq, "Descriptor ring:\n");
3791                         sysfs_display_ring((void *)rx_q->dma_rx,
3792                                            DMA_RX_SIZE, 0, seq);
3793                 }
3794         }
3795
3796         for (queue = 0; queue < tx_count; queue++) {
3797                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3798
3799                 seq_printf(seq, "TX Queue %d:\n", queue);
3800
3801                 if (priv->extend_desc) {
3802                         seq_printf(seq, "Extended descriptor ring:\n");
3803                         sysfs_display_ring((void *)tx_q->dma_etx,
3804                                            DMA_TX_SIZE, 1, seq);
3805                 } else {
3806                         seq_printf(seq, "Descriptor ring:\n");
3807                         sysfs_display_ring((void *)tx_q->dma_tx,
3808                                            DMA_TX_SIZE, 0, seq);
3809                 }
3810         }
3811
3812         return 0;
3813 }
3814
3815 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3816 {
3817         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3818 }
3819
3820 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3821
3822 static const struct file_operations stmmac_rings_status_fops = {
3823         .owner = THIS_MODULE,
3824         .open = stmmac_sysfs_ring_open,
3825         .read = seq_read,
3826         .llseek = seq_lseek,
3827         .release = single_release,
3828 };
3829
3830 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3831 {
3832         struct net_device *dev = seq->private;
3833         struct stmmac_priv *priv = netdev_priv(dev);
3834
3835         if (!priv->hw_cap_support) {
3836                 seq_printf(seq, "DMA HW features not supported\n");
3837                 return 0;
3838         }
3839
3840         seq_printf(seq, "==============================\n");
3841         seq_printf(seq, "\tDMA HW features\n");
3842         seq_printf(seq, "==============================\n");
3843
3844         seq_printf(seq, "\t10/100 Mbps: %s\n",
3845                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3846         seq_printf(seq, "\t1000 Mbps: %s\n",
3847                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3848         seq_printf(seq, "\tHalf duplex: %s\n",
3849                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3850         seq_printf(seq, "\tHash Filter: %s\n",
3851                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3852         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3853                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3854         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3855                    (priv->dma_cap.pcs) ? "Y" : "N");
3856         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3857                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3858         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3859                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3860         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3861                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3862         seq_printf(seq, "\tRMON module: %s\n",
3863                    (priv->dma_cap.rmon) ? "Y" : "N");
3864         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3865                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3866         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3867                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3868         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3869                    (priv->dma_cap.eee) ? "Y" : "N");
3870         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3871         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3872                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3873         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3874                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3875                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3876         } else {
3877                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3878                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3879                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3880                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3881         }
3882         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3883                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3884         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3885                    priv->dma_cap.number_rx_channel);
3886         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3887                    priv->dma_cap.number_tx_channel);
3888         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3889                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3890
3891         return 0;
3892 }
3893
3894 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3895 {
3896         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3897 }
3898
3899 static const struct file_operations stmmac_dma_cap_fops = {
3900         .owner = THIS_MODULE,
3901         .open = stmmac_sysfs_dma_cap_open,
3902         .read = seq_read,
3903         .llseek = seq_lseek,
3904         .release = single_release,
3905 };
3906
3907 static int stmmac_init_fs(struct net_device *dev)
3908 {
3909         struct stmmac_priv *priv = netdev_priv(dev);
3910
3911         /* Create per netdev entries */
3912         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3913
3914         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3915                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3916
3917                 return -ENOMEM;
3918         }
3919
3920         /* Entry to report DMA RX/TX rings */
3921         priv->dbgfs_rings_status =
3922                 debugfs_create_file("descriptors_status", S_IRUGO,
3923                                     priv->dbgfs_dir, dev,
3924                                     &stmmac_rings_status_fops);
3925
3926         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3927                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3928                 debugfs_remove_recursive(priv->dbgfs_dir);
3929
3930                 return -ENOMEM;
3931         }
3932
3933         /* Entry to report the DMA HW features */
3934         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3935                                             priv->dbgfs_dir,
3936                                             dev, &stmmac_dma_cap_fops);
3937
3938         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3939                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3940                 debugfs_remove_recursive(priv->dbgfs_dir);
3941
3942                 return -ENOMEM;
3943         }
3944
3945         return 0;
3946 }
3947
3948 static void stmmac_exit_fs(struct net_device *dev)
3949 {
3950         struct stmmac_priv *priv = netdev_priv(dev);
3951
3952         debugfs_remove_recursive(priv->dbgfs_dir);
3953 }
3954 #endif /* CONFIG_DEBUG_FS */
3955
3956 static const struct net_device_ops stmmac_netdev_ops = {
3957         .ndo_open = stmmac_open,
3958         .ndo_start_xmit = stmmac_xmit,
3959         .ndo_stop = stmmac_release,
3960         .ndo_change_mtu = stmmac_change_mtu,
3961         .ndo_fix_features = stmmac_fix_features,
3962         .ndo_set_features = stmmac_set_features,
3963         .ndo_set_rx_mode = stmmac_set_rx_mode,
3964         .ndo_tx_timeout = stmmac_tx_timeout,
3965         .ndo_do_ioctl = stmmac_ioctl,
3966 #ifdef CONFIG_NET_POLL_CONTROLLER
3967         .ndo_poll_controller = stmmac_poll_controller,
3968 #endif
3969         .ndo_set_mac_address = eth_mac_addr,
3970 };
3971
3972 /**
3973  *  stmmac_hw_init - Init the MAC device
3974  *  @priv: driver private structure
3975  *  Description: this function is to configure the MAC device according to
3976  *  some platform parameters or the HW capability register. It prepares the
3977  *  driver to use either ring or chain modes and to setup either enhanced or
3978  *  normal descriptors.
3979  */
3980 static int stmmac_hw_init(struct stmmac_priv *priv)
3981 {
3982         struct mac_device_info *mac;
3983
3984         /* Identify the MAC HW device */
3985         if (priv->plat->setup) {
3986                 mac = priv->plat->setup(priv);
3987         } else if (priv->plat->has_gmac) {
3988                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3989                 mac = dwmac1000_setup(priv->ioaddr,
3990                                       priv->plat->multicast_filter_bins,
3991                                       priv->plat->unicast_filter_entries,
3992                                       &priv->synopsys_id);
3993         } else if (priv->plat->has_gmac4) {
3994                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3995                 mac = dwmac4_setup(priv->ioaddr,
3996                                    priv->plat->multicast_filter_bins,
3997                                    priv->plat->unicast_filter_entries,
3998                                    &priv->synopsys_id);
3999         } else {
4000                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
4001         }
4002         if (!mac)
4003                 return -ENOMEM;
4004
4005         priv->hw = mac;
4006
4007         /* dwmac-sun8i only work in chain mode */
4008         if (priv->plat->has_sun8i)
4009                 chain_mode = 1;
4010
4011         /* To use the chained or ring mode */
4012         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4013                 priv->hw->mode = &dwmac4_ring_mode_ops;
4014         } else {
4015                 if (chain_mode) {
4016                         priv->hw->mode = &chain_mode_ops;
4017                         dev_info(priv->device, "Chain mode enabled\n");
4018                         priv->mode = STMMAC_CHAIN_MODE;
4019                 } else {
4020                         priv->hw->mode = &ring_mode_ops;
4021                         dev_info(priv->device, "Ring mode enabled\n");
4022                         priv->mode = STMMAC_RING_MODE;
4023                 }
4024         }
4025
4026         /* Get the HW capability (new GMAC newer than 3.50a) */
4027         priv->hw_cap_support = stmmac_get_hw_features(priv);
4028         if (priv->hw_cap_support) {
4029                 dev_info(priv->device, "DMA HW capability register supported\n");
4030
4031                 /* We can override some gmac/dma configuration fields: e.g.
4032                  * enh_desc, tx_coe (e.g. that are passed through the
4033                  * platform) with the values from the HW capability
4034                  * register (if supported).
4035                  */
4036                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4037                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4038                 priv->hw->pmt = priv->plat->pmt;
4039
4040                 /* TXCOE doesn't work in thresh DMA mode */
4041                 if (priv->plat->force_thresh_dma_mode)
4042                         priv->plat->tx_coe = 0;
4043                 else
4044                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4045
4046                 /* In case of GMAC4 rx_coe is from HW cap register. */
4047                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4048
4049                 if (priv->dma_cap.rx_coe_type2)
4050                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4051                 else if (priv->dma_cap.rx_coe_type1)
4052                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4053
4054         } else {
4055                 dev_info(priv->device, "No HW DMA feature register supported\n");
4056         }
4057
4058         /* To use alternate (extended), normal or GMAC4 descriptor structures */
4059         if (priv->synopsys_id >= DWMAC_CORE_4_00)
4060                 priv->hw->desc = &dwmac4_desc_ops;
4061         else
4062                 stmmac_selec_desc_mode(priv);
4063
4064         if (priv->plat->rx_coe) {
4065                 priv->hw->rx_csum = priv->plat->rx_coe;
4066                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4067                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4068                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4069         }
4070         if (priv->plat->tx_coe)
4071                 dev_info(priv->device, "TX Checksum insertion supported\n");
4072
4073         if (priv->plat->pmt) {
4074                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4075                 device_set_wakeup_capable(priv->device, 1);
4076         }
4077
4078         if (priv->dma_cap.tsoen)
4079                 dev_info(priv->device, "TSO supported\n");
4080
4081         return 0;
4082 }
4083
4084 /**
4085  * stmmac_dvr_probe
4086  * @device: device pointer
4087  * @plat_dat: platform data pointer
4088  * @res: stmmac resource pointer
4089  * Description: this is the main probe function used to
4090  * call the alloc_etherdev, allocate the priv structure.
4091  * Return:
4092  * returns 0 on success, otherwise errno.
4093  */
4094 int stmmac_dvr_probe(struct device *device,
4095                      struct plat_stmmacenet_data *plat_dat,
4096                      struct stmmac_resources *res)
4097 {
4098         struct net_device *ndev = NULL;
4099         struct stmmac_priv *priv;
4100         int ret = 0;
4101         u32 queue;
4102
4103         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4104                                   MTL_MAX_TX_QUEUES,
4105                                   MTL_MAX_RX_QUEUES);
4106         if (!ndev)
4107                 return -ENOMEM;
4108
4109         SET_NETDEV_DEV(ndev, device);
4110
4111         priv = netdev_priv(ndev);
4112         priv->device = device;
4113         priv->dev = ndev;
4114
4115         stmmac_set_ethtool_ops(ndev);
4116         priv->pause = pause;
4117         priv->plat = plat_dat;
4118         priv->ioaddr = res->addr;
4119         priv->dev->base_addr = (unsigned long)res->addr;
4120
4121         priv->dev->irq = res->irq;
4122         priv->wol_irq = res->wol_irq;
4123         priv->lpi_irq = res->lpi_irq;
4124
4125         if (res->mac)
4126                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4127
4128         dev_set_drvdata(device, priv->dev);
4129
4130         /* Verify driver arguments */
4131         stmmac_verify_args();
4132
4133         /* Override with kernel parameters if supplied XXX CRS XXX
4134          * this needs to have multiple instances
4135          */
4136         if ((phyaddr >= 0) && (phyaddr <= 31))
4137                 priv->plat->phy_addr = phyaddr;
4138
4139         if (priv->plat->stmmac_rst) {
4140                 ret = reset_control_assert(priv->plat->stmmac_rst);
4141                 reset_control_deassert(priv->plat->stmmac_rst);
4142                 /* Some reset controllers have only reset callback instead of
4143                  * assert + deassert callbacks pair.
4144                  */
4145                 if (ret == -ENOTSUPP)
4146                         reset_control_reset(priv->plat->stmmac_rst);
4147         }
4148
4149         /* Init MAC and get the capabilities */
4150         ret = stmmac_hw_init(priv);
4151         if (ret)
4152                 goto error_hw_init;
4153
4154         /* Configure real RX and TX queues */
4155         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4156         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4157
4158         ndev->netdev_ops = &stmmac_netdev_ops;
4159
4160         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4161                             NETIF_F_RXCSUM;
4162
4163         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4164                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4165                 priv->tso = true;
4166                 dev_info(priv->device, "TSO feature enabled\n");
4167         }
4168         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4169         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4170 #ifdef STMMAC_VLAN_TAG_USED
4171         /* Both mac100 and gmac support receive VLAN tag detection */
4172         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4173 #endif
4174         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4175
4176         /* MTU range: 46 - hw-specific max */
4177         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4178         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4179                 ndev->max_mtu = JUMBO_LEN;
4180         else
4181                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4182         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4183          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4184          */
4185         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4186             (priv->plat->maxmtu >= ndev->min_mtu))
4187                 ndev->max_mtu = priv->plat->maxmtu;
4188         else if (priv->plat->maxmtu < ndev->min_mtu)
4189                 dev_warn(priv->device,
4190                          "%s: warning: maxmtu having invalid value (%d)\n",
4191                          __func__, priv->plat->maxmtu);
4192
4193         if (flow_ctrl)
4194                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4195
4196         /* Rx Watchdog is available in the COREs newer than the 3.40.
4197          * In some case, for example on bugged HW this feature
4198          * has to be disable and this can be done by passing the
4199          * riwt_off field from the platform.
4200          */
4201         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4202                 priv->use_riwt = 1;
4203                 dev_info(priv->device,
4204                          "Enable RX Mitigation via HW Watchdog Timer\n");
4205         }
4206
4207         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4208                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4209
4210                 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4211                                (8 * priv->plat->rx_queues_to_use));
4212         }
4213
4214         spin_lock_init(&priv->lock);
4215
4216         /* If a specific clk_csr value is passed from the platform
4217          * this means that the CSR Clock Range selection cannot be
4218          * changed at run-time and it is fixed. Viceversa the driver'll try to
4219          * set the MDC clock dynamically according to the csr actual
4220          * clock input.
4221          */
4222         if (!priv->plat->clk_csr)
4223                 stmmac_clk_csr_set(priv);
4224         else
4225                 priv->clk_csr = priv->plat->clk_csr;
4226
4227         stmmac_check_pcs_mode(priv);
4228
4229         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4230             priv->hw->pcs != STMMAC_PCS_TBI &&
4231             priv->hw->pcs != STMMAC_PCS_RTBI) {
4232                 /* MDIO bus Registration */
4233                 ret = stmmac_mdio_register(ndev);
4234                 if (ret < 0) {
4235                         dev_err(priv->device,
4236                                 "%s: MDIO bus (id: %d) registration failed",
4237                                 __func__, priv->plat->bus_id);
4238                         goto error_mdio_register;
4239                 }
4240         }
4241
4242         ret = register_netdev(ndev);
4243         if (ret) {
4244                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4245                         __func__, ret);
4246                 goto error_netdev_register;
4247         }
4248
4249         return ret;
4250
4251 error_netdev_register:
4252         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4253             priv->hw->pcs != STMMAC_PCS_TBI &&
4254             priv->hw->pcs != STMMAC_PCS_RTBI)
4255                 stmmac_mdio_unregister(ndev);
4256 error_mdio_register:
4257         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4258                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4259
4260                 netif_napi_del(&rx_q->napi);
4261         }
4262 error_hw_init:
4263         free_netdev(ndev);
4264
4265         return ret;
4266 }
4267 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4268
4269 /**
4270  * stmmac_dvr_remove
4271  * @dev: device pointer
4272  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4273  * changes the link status, releases the DMA descriptor rings.
4274  */
4275 int stmmac_dvr_remove(struct device *dev)
4276 {
4277         struct net_device *ndev = dev_get_drvdata(dev);
4278         struct stmmac_priv *priv = netdev_priv(ndev);
4279
4280         netdev_info(priv->dev, "%s: removing driver", __func__);
4281
4282         stmmac_stop_all_dma(priv);
4283
4284         priv->hw->mac->set_mac(priv->ioaddr, false);
4285         netif_carrier_off(ndev);
4286         unregister_netdev(ndev);
4287         if (priv->plat->stmmac_rst)
4288                 reset_control_assert(priv->plat->stmmac_rst);
4289         clk_disable_unprepare(priv->plat->pclk);
4290         clk_disable_unprepare(priv->plat->stmmac_clk);
4291         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4292             priv->hw->pcs != STMMAC_PCS_TBI &&
4293             priv->hw->pcs != STMMAC_PCS_RTBI)
4294                 stmmac_mdio_unregister(ndev);
4295         free_netdev(ndev);
4296
4297         return 0;
4298 }
4299 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4300
4301 /**
4302  * stmmac_suspend - suspend callback
4303  * @dev: device pointer
4304  * Description: this is the function to suspend the device and it is called
4305  * by the platform driver to stop the network queue, release the resources,
4306  * program the PMT register (for WoL), clean and release driver resources.
4307  */
4308 int stmmac_suspend(struct device *dev)
4309 {
4310         struct net_device *ndev = dev_get_drvdata(dev);
4311         struct stmmac_priv *priv = netdev_priv(ndev);
4312         unsigned long flags;
4313
4314         if (!ndev || !netif_running(ndev))
4315                 return 0;
4316
4317         if (ndev->phydev)
4318                 phy_stop(ndev->phydev);
4319
4320         spin_lock_irqsave(&priv->lock, flags);
4321
4322         netif_device_detach(ndev);
4323         stmmac_stop_all_queues(priv);
4324
4325         stmmac_disable_all_queues(priv);
4326
4327         /* Stop TX/RX DMA */
4328         stmmac_stop_all_dma(priv);
4329
4330         /* Enable Power down mode by programming the PMT regs */
4331         if (device_may_wakeup(priv->device)) {
4332                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4333                 priv->irq_wake = 1;
4334         } else {
4335                 priv->hw->mac->set_mac(priv->ioaddr, false);
4336                 pinctrl_pm_select_sleep_state(priv->device);
4337                 /* Disable clock in case of PWM is off */
4338                 clk_disable(priv->plat->pclk);
4339                 clk_disable(priv->plat->stmmac_clk);
4340         }
4341         spin_unlock_irqrestore(&priv->lock, flags);
4342
4343         priv->oldlink = false;
4344         priv->speed = SPEED_UNKNOWN;
4345         priv->oldduplex = DUPLEX_UNKNOWN;
4346         return 0;
4347 }
4348 EXPORT_SYMBOL_GPL(stmmac_suspend);
4349
4350 /**
4351  * stmmac_reset_queues_param - reset queue parameters
4352  * @dev: device pointer
4353  */
4354 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4355 {
4356         u32 rx_cnt = priv->plat->rx_queues_to_use;
4357         u32 tx_cnt = priv->plat->tx_queues_to_use;
4358         u32 queue;
4359
4360         for (queue = 0; queue < rx_cnt; queue++) {
4361                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4362
4363                 rx_q->cur_rx = 0;
4364                 rx_q->dirty_rx = 0;
4365         }
4366
4367         for (queue = 0; queue < tx_cnt; queue++) {
4368                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4369
4370                 tx_q->cur_tx = 0;
4371                 tx_q->dirty_tx = 0;
4372         }
4373 }
4374
4375 /**
4376  * stmmac_resume - resume callback
4377  * @dev: device pointer
4378  * Description: when resume this function is invoked to setup the DMA and CORE
4379  * in a usable state.
4380  */
4381 int stmmac_resume(struct device *dev)
4382 {
4383         struct net_device *ndev = dev_get_drvdata(dev);
4384         struct stmmac_priv *priv = netdev_priv(ndev);
4385         unsigned long flags;
4386
4387         if (!netif_running(ndev))
4388                 return 0;
4389
4390         /* Power Down bit, into the PM register, is cleared
4391          * automatically as soon as a magic packet or a Wake-up frame
4392          * is received. Anyway, it's better to manually clear
4393          * this bit because it can generate problems while resuming
4394          * from another devices (e.g. serial console).
4395          */
4396         if (device_may_wakeup(priv->device)) {
4397                 spin_lock_irqsave(&priv->lock, flags);
4398                 priv->hw->mac->pmt(priv->hw, 0);
4399                 spin_unlock_irqrestore(&priv->lock, flags);
4400                 priv->irq_wake = 0;
4401         } else {
4402                 pinctrl_pm_select_default_state(priv->device);
4403                 /* enable the clk previously disabled */
4404                 clk_enable(priv->plat->stmmac_clk);
4405                 clk_enable(priv->plat->pclk);
4406                 /* reset the phy so that it's ready */
4407                 if (priv->mii)
4408                         stmmac_mdio_reset(priv->mii);
4409         }
4410
4411         netif_device_attach(ndev);
4412
4413         spin_lock_irqsave(&priv->lock, flags);
4414
4415         stmmac_reset_queues_param(priv);
4416
4417         /* reset private mss value to force mss context settings at
4418          * next tso xmit (only used for gmac4).
4419          */
4420         priv->mss = 0;
4421
4422         stmmac_clear_descriptors(priv);
4423
4424         stmmac_hw_setup(ndev, false);
4425         stmmac_init_tx_coalesce(priv);
4426         stmmac_set_rx_mode(ndev);
4427
4428         stmmac_enable_all_queues(priv);
4429
4430         stmmac_start_all_queues(priv);
4431
4432         spin_unlock_irqrestore(&priv->lock, flags);
4433
4434         if (ndev->phydev)
4435                 phy_start(ndev->phydev);
4436
4437         return 0;
4438 }
4439 EXPORT_SYMBOL_GPL(stmmac_resume);
4440
4441 #ifndef MODULE
4442 static int __init stmmac_cmdline_opt(char *str)
4443 {
4444         char *opt;
4445
4446         if (!str || !*str)
4447                 return -EINVAL;
4448         while ((opt = strsep(&str, ",")) != NULL) {
4449                 if (!strncmp(opt, "debug:", 6)) {
4450                         if (kstrtoint(opt + 6, 0, &debug))
4451                                 goto err;
4452                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4453                         if (kstrtoint(opt + 8, 0, &phyaddr))
4454                                 goto err;
4455                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4456                         if (kstrtoint(opt + 7, 0, &buf_sz))
4457                                 goto err;
4458                 } else if (!strncmp(opt, "tc:", 3)) {
4459                         if (kstrtoint(opt + 3, 0, &tc))
4460                                 goto err;
4461                 } else if (!strncmp(opt, "watchdog:", 9)) {
4462                         if (kstrtoint(opt + 9, 0, &watchdog))
4463                                 goto err;
4464                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4465                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4466                                 goto err;
4467                 } else if (!strncmp(opt, "pause:", 6)) {
4468                         if (kstrtoint(opt + 6, 0, &pause))
4469                                 goto err;
4470                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4471                         if (kstrtoint(opt + 10, 0, &eee_timer))
4472                                 goto err;
4473                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4474                         if (kstrtoint(opt + 11, 0, &chain_mode))
4475                                 goto err;
4476                 }
4477         }
4478         return 0;
4479
4480 err:
4481         pr_err("%s: ERROR broken module parameter conversion", __func__);
4482         return -EINVAL;
4483 }
4484
4485 __setup("stmmaceth=", stmmac_cmdline_opt);
4486 #endif /* MODULE */
4487
4488 static int __init stmmac_init(void)
4489 {
4490 #ifdef CONFIG_DEBUG_FS
4491         /* Create debugfs main directory if it doesn't exist yet */
4492         if (!stmmac_fs_dir) {
4493                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4494
4495                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4496                         pr_err("ERROR %s, debugfs create directory failed\n",
4497                                STMMAC_RESOURCE_NAME);
4498
4499                         return -ENOMEM;
4500                 }
4501         }
4502 #endif
4503
4504         return 0;
4505 }
4506
4507 static void __exit stmmac_exit(void)
4508 {
4509 #ifdef CONFIG_DEBUG_FS
4510         debugfs_remove_recursive(stmmac_fs_dir);
4511 #endif
4512 }
4513
4514 module_init(stmmac_init)
4515 module_exit(stmmac_exit)
4516
4517 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4518 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4519 MODULE_LICENSE("GPL");