net: stmmac: enable TSO for IPv6
[linux-2.6-block.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_disable_all_queues - Disable all queues
143  * @priv: driver private structure
144  */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148         u32 queue;
149
150         for (queue = 0; queue < rx_queues_cnt; queue++) {
151                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153                 napi_disable(&rx_q->napi);
154         }
155 }
156
157 /**
158  * stmmac_enable_all_queues - Enable all queues
159  * @priv: driver private structure
160  */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164         u32 queue;
165
166         for (queue = 0; queue < rx_queues_cnt; queue++) {
167                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169                 napi_enable(&rx_q->napi);
170         }
171 }
172
173 /**
174  * stmmac_stop_all_queues - Stop all queues
175  * @priv: driver private structure
176  */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180         u32 queue;
181
182         for (queue = 0; queue < tx_queues_cnt; queue++)
183                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185
186 /**
187  * stmmac_start_all_queues - Start all queues
188  * @priv: driver private structure
189  */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193         u32 queue;
194
195         for (queue = 0; queue < tx_queues_cnt; queue++)
196                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 /**
200  * stmmac_clk_csr_set - dynamically set the MDC clock
201  * @priv: driver private structure
202  * Description: this is to dynamically set the MDC clock according to the csr
203  * clock input.
204  * Note:
205  *      If a specific clk_csr value is passed from the platform
206  *      this means that the CSR Clock Range selection cannot be
207  *      changed at run-time and it is fixed (as reported in the driver
208  *      documentation). Viceversa the driver will try to set the MDC
209  *      clock dynamically according to the actual clock input.
210  */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213         u32 clk_rate;
214
215         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216
217         /* Platform provided default clk_csr would be assumed valid
218          * for all other cases except for the below mentioned ones.
219          * For values higher than the IEEE 802.3 specified frequency
220          * we can not estimate the proper divider as it is not known
221          * the frequency of clk_csr_i. So we do not change the default
222          * divider.
223          */
224         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225                 if (clk_rate < CSR_F_35M)
226                         priv->clk_csr = STMMAC_CSR_20_35M;
227                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228                         priv->clk_csr = STMMAC_CSR_35_60M;
229                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230                         priv->clk_csr = STMMAC_CSR_60_100M;
231                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232                         priv->clk_csr = STMMAC_CSR_100_150M;
233                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234                         priv->clk_csr = STMMAC_CSR_150_250M;
235                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236                         priv->clk_csr = STMMAC_CSR_250_300M;
237         }
238
239         if (priv->plat->has_sun8i) {
240                 if (clk_rate > 160000000)
241                         priv->clk_csr = 0x03;
242                 else if (clk_rate > 80000000)
243                         priv->clk_csr = 0x02;
244                 else if (clk_rate > 40000000)
245                         priv->clk_csr = 0x01;
246                 else
247                         priv->clk_csr = 0;
248         }
249 }
250
251 static void print_pkt(unsigned char *buf, int len)
252 {
253         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260         u32 avail;
261
262         if (tx_q->dirty_tx > tx_q->cur_tx)
263                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264         else
265                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266
267         return avail;
268 }
269
270 /**
271  * stmmac_rx_dirty - Get RX queue dirty
272  * @priv: driver private structure
273  * @queue: RX queue index
274  */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278         u32 dirty;
279
280         if (rx_q->dirty_rx <= rx_q->cur_rx)
281                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
282         else
283                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284
285         return dirty;
286 }
287
288 /**
289  * stmmac_hw_fix_mac_speed - callback for speed selection
290  * @priv: driver private structure
291  * Description: on some platforms (e.g. ST), some HW system configuration
292  * registers have to be set according to the link speed negotiated.
293  */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296         struct net_device *ndev = priv->dev;
297         struct phy_device *phydev = ndev->phydev;
298
299         if (likely(priv->plat->fix_mac_speed))
300                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302
303 /**
304  * stmmac_enable_eee_mode - check and enter in LPI mode
305  * @priv: driver private structure
306  * Description: this function is to verify and enter in LPI mode in case of
307  * EEE.
308  */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311         u32 tx_cnt = priv->plat->tx_queues_to_use;
312         u32 queue;
313
314         /* check if all TX queues have the work finished */
315         for (queue = 0; queue < tx_cnt; queue++) {
316                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317
318                 if (tx_q->dirty_tx != tx_q->cur_tx)
319                         return; /* still unfinished work */
320         }
321
322         /* Check and enter in LPI mode */
323         if (!priv->tx_path_in_lpi_mode)
324                 priv->hw->mac->set_eee_mode(priv->hw,
325                                             priv->plat->en_tx_lpi_clockgating);
326 }
327
328 /**
329  * stmmac_disable_eee_mode - disable and exit from LPI mode
330  * @priv: driver private structure
331  * Description: this function is to exit and disable EEE in case of
332  * LPI state is true. This is called by the xmit.
333  */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336         priv->hw->mac->reset_eee_mode(priv->hw);
337         del_timer_sync(&priv->eee_ctrl_timer);
338         priv->tx_path_in_lpi_mode = false;
339 }
340
341 /**
342  * stmmac_eee_ctrl_timer - EEE TX SW timer.
343  * @arg : data hook
344  * Description:
345  *  if there is no data transfer and if we are not in LPI state,
346  *  then MAC Transmitter can be moved to LPI state.
347  */
348 static void stmmac_eee_ctrl_timer(unsigned long arg)
349 {
350         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
351
352         stmmac_enable_eee_mode(priv);
353         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355
356 /**
357  * stmmac_eee_init - init EEE
358  * @priv: driver private structure
359  * Description:
360  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
361  *  can also manage EEE, this function enable the LPI state and start related
362  *  timer.
363  */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366         struct net_device *ndev = priv->dev;
367         unsigned long flags;
368         bool ret = false;
369
370         /* Using PCS we cannot dial with the phy registers at this stage
371          * so we do not support extra feature like EEE.
372          */
373         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
374             (priv->hw->pcs == STMMAC_PCS_TBI) ||
375             (priv->hw->pcs == STMMAC_PCS_RTBI))
376                 goto out;
377
378         /* MAC core supports the EEE feature. */
379         if (priv->dma_cap.eee) {
380                 int tx_lpi_timer = priv->tx_lpi_timer;
381
382                 /* Check if the PHY supports EEE */
383                 if (phy_init_eee(ndev->phydev, 1)) {
384                         /* To manage at run-time if the EEE cannot be supported
385                          * anymore (for example because the lp caps have been
386                          * changed).
387                          * In that case the driver disable own timers.
388                          */
389                         spin_lock_irqsave(&priv->lock, flags);
390                         if (priv->eee_active) {
391                                 netdev_dbg(priv->dev, "disable EEE\n");
392                                 del_timer_sync(&priv->eee_ctrl_timer);
393                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
394                                                              tx_lpi_timer);
395                         }
396                         priv->eee_active = 0;
397                         spin_unlock_irqrestore(&priv->lock, flags);
398                         goto out;
399                 }
400                 /* Activate the EEE and start timers */
401                 spin_lock_irqsave(&priv->lock, flags);
402                 if (!priv->eee_active) {
403                         priv->eee_active = 1;
404                         setup_timer(&priv->eee_ctrl_timer,
405                                     stmmac_eee_ctrl_timer,
406                                     (unsigned long)priv);
407                         mod_timer(&priv->eee_ctrl_timer,
408                                   STMMAC_LPI_T(eee_timer));
409
410                         priv->hw->mac->set_eee_timer(priv->hw,
411                                                      STMMAC_DEFAULT_LIT_LS,
412                                                      tx_lpi_timer);
413                 }
414                 /* Set HW EEE according to the speed */
415                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
416
417                 ret = true;
418                 spin_unlock_irqrestore(&priv->lock, flags);
419
420                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
421         }
422 out:
423         return ret;
424 }
425
426 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
427  * @priv: driver private structure
428  * @p : descriptor pointer
429  * @skb : the socket buffer
430  * Description :
431  * This function will read timestamp from the descriptor & pass it to stack.
432  * and also perform some sanity checks.
433  */
434 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
435                                    struct dma_desc *p, struct sk_buff *skb)
436 {
437         struct skb_shared_hwtstamps shhwtstamp;
438         u64 ns;
439
440         if (!priv->hwts_tx_en)
441                 return;
442
443         /* exit if skb doesn't support hw tstamp */
444         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
445                 return;
446
447         /* check tx tstamp status */
448         if (priv->hw->desc->get_tx_timestamp_status(p)) {
449                 /* get the valid tstamp */
450                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
451
452                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
453                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
454
455                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
456                 /* pass tstamp to stack */
457                 skb_tstamp_tx(skb, &shhwtstamp);
458         }
459
460         return;
461 }
462
463 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
464  * @priv: driver private structure
465  * @p : descriptor pointer
466  * @np : next descriptor pointer
467  * @skb : the socket buffer
468  * Description :
469  * This function will read received packet's timestamp from the descriptor
470  * and pass it to stack. It also perform some sanity checks.
471  */
472 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473                                    struct dma_desc *np, struct sk_buff *skb)
474 {
475         struct skb_shared_hwtstamps *shhwtstamp = NULL;
476         u64 ns;
477
478         if (!priv->hwts_rx_en)
479                 return;
480
481         /* Check if timestamp is available */
482         if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
483                 /* For GMAC4, the valid timestamp is from CTX next desc. */
484                 if (priv->plat->has_gmac4)
485                         ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
486                 else
487                         ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
488
489                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490                 shhwtstamp = skb_hwtstamps(skb);
491                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
493         } else  {
494                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
495         }
496 }
497
498 /**
499  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
500  *  @dev: device pointer.
501  *  @ifr: An IOCTL specific structure, that can contain a pointer to
502  *  a proprietary structure used to pass information to the driver.
503  *  Description:
504  *  This function configures the MAC to enable/disable both outgoing(TX)
505  *  and incoming(RX) packets time stamping based on user input.
506  *  Return Value:
507  *  0 on success and an appropriate -ve integer on failure.
508  */
509 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
510 {
511         struct stmmac_priv *priv = netdev_priv(dev);
512         struct hwtstamp_config config;
513         struct timespec64 now;
514         u64 temp = 0;
515         u32 ptp_v2 = 0;
516         u32 tstamp_all = 0;
517         u32 ptp_over_ipv4_udp = 0;
518         u32 ptp_over_ipv6_udp = 0;
519         u32 ptp_over_ethernet = 0;
520         u32 snap_type_sel = 0;
521         u32 ts_master_en = 0;
522         u32 ts_event_en = 0;
523         u32 value = 0;
524         u32 sec_inc;
525
526         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
527                 netdev_alert(priv->dev, "No support for HW time stamping\n");
528                 priv->hwts_tx_en = 0;
529                 priv->hwts_rx_en = 0;
530
531                 return -EOPNOTSUPP;
532         }
533
534         if (copy_from_user(&config, ifr->ifr_data,
535                            sizeof(struct hwtstamp_config)))
536                 return -EFAULT;
537
538         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
539                    __func__, config.flags, config.tx_type, config.rx_filter);
540
541         /* reserved for future extensions */
542         if (config.flags)
543                 return -EINVAL;
544
545         if (config.tx_type != HWTSTAMP_TX_OFF &&
546             config.tx_type != HWTSTAMP_TX_ON)
547                 return -ERANGE;
548
549         if (priv->adv_ts) {
550                 switch (config.rx_filter) {
551                 case HWTSTAMP_FILTER_NONE:
552                         /* time stamp no incoming packet at all */
553                         config.rx_filter = HWTSTAMP_FILTER_NONE;
554                         break;
555
556                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
557                         /* PTP v1, UDP, any kind of event packet */
558                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
559                         /* take time stamp for all event messages */
560                         if (priv->plat->has_gmac4)
561                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
562                         else
563                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
564
565                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
566                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
567                         break;
568
569                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
570                         /* PTP v1, UDP, Sync packet */
571                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
572                         /* take time stamp for SYNC messages only */
573                         ts_event_en = PTP_TCR_TSEVNTENA;
574
575                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
576                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
577                         break;
578
579                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
580                         /* PTP v1, UDP, Delay_req packet */
581                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
582                         /* take time stamp for Delay_Req messages only */
583                         ts_master_en = PTP_TCR_TSMSTRENA;
584                         ts_event_en = PTP_TCR_TSEVNTENA;
585
586                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
587                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
588                         break;
589
590                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
591                         /* PTP v2, UDP, any kind of event packet */
592                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
593                         ptp_v2 = PTP_TCR_TSVER2ENA;
594                         /* take time stamp for all event messages */
595                         if (priv->plat->has_gmac4)
596                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
597                         else
598                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
599
600                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
601                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
602                         break;
603
604                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
605                         /* PTP v2, UDP, Sync packet */
606                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
607                         ptp_v2 = PTP_TCR_TSVER2ENA;
608                         /* take time stamp for SYNC messages only */
609                         ts_event_en = PTP_TCR_TSEVNTENA;
610
611                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
612                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
613                         break;
614
615                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
616                         /* PTP v2, UDP, Delay_req packet */
617                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
618                         ptp_v2 = PTP_TCR_TSVER2ENA;
619                         /* take time stamp for Delay_Req messages only */
620                         ts_master_en = PTP_TCR_TSMSTRENA;
621                         ts_event_en = PTP_TCR_TSEVNTENA;
622
623                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625                         break;
626
627                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
628                         /* PTP v2/802.AS1 any layer, any kind of event packet */
629                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
630                         ptp_v2 = PTP_TCR_TSVER2ENA;
631                         /* take time stamp for all event messages */
632                         if (priv->plat->has_gmac4)
633                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
634                         else
635                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
636
637                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
638                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
639                         ptp_over_ethernet = PTP_TCR_TSIPENA;
640                         break;
641
642                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
643                         /* PTP v2/802.AS1, any layer, Sync packet */
644                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
645                         ptp_v2 = PTP_TCR_TSVER2ENA;
646                         /* take time stamp for SYNC messages only */
647                         ts_event_en = PTP_TCR_TSEVNTENA;
648
649                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
650                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
651                         ptp_over_ethernet = PTP_TCR_TSIPENA;
652                         break;
653
654                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
655                         /* PTP v2/802.AS1, any layer, Delay_req packet */
656                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
657                         ptp_v2 = PTP_TCR_TSVER2ENA;
658                         /* take time stamp for Delay_Req messages only */
659                         ts_master_en = PTP_TCR_TSMSTRENA;
660                         ts_event_en = PTP_TCR_TSEVNTENA;
661
662                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
663                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
664                         ptp_over_ethernet = PTP_TCR_TSIPENA;
665                         break;
666
667                 case HWTSTAMP_FILTER_NTP_ALL:
668                 case HWTSTAMP_FILTER_ALL:
669                         /* time stamp any incoming packet */
670                         config.rx_filter = HWTSTAMP_FILTER_ALL;
671                         tstamp_all = PTP_TCR_TSENALL;
672                         break;
673
674                 default:
675                         return -ERANGE;
676                 }
677         } else {
678                 switch (config.rx_filter) {
679                 case HWTSTAMP_FILTER_NONE:
680                         config.rx_filter = HWTSTAMP_FILTER_NONE;
681                         break;
682                 default:
683                         /* PTP v1, UDP, any kind of event packet */
684                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
685                         break;
686                 }
687         }
688         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
689         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
690
691         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
692                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
693         else {
694                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
695                          tstamp_all | ptp_v2 | ptp_over_ethernet |
696                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
697                          ts_master_en | snap_type_sel);
698                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
699
700                 /* program Sub Second Increment reg */
701                 sec_inc = priv->hw->ptp->config_sub_second_increment(
702                         priv->ptpaddr, priv->plat->clk_ptp_rate,
703                         priv->plat->has_gmac4);
704                 temp = div_u64(1000000000ULL, sec_inc);
705
706                 /* calculate default added value:
707                  * formula is :
708                  * addend = (2^32)/freq_div_ratio;
709                  * where, freq_div_ratio = 1e9ns/sec_inc
710                  */
711                 temp = (u64)(temp << 32);
712                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
713                 priv->hw->ptp->config_addend(priv->ptpaddr,
714                                              priv->default_addend);
715
716                 /* initialize system time */
717                 ktime_get_real_ts64(&now);
718
719                 /* lower 32 bits of tv_sec are safe until y2106 */
720                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
721                                             now.tv_nsec);
722         }
723
724         return copy_to_user(ifr->ifr_data, &config,
725                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
726 }
727
728 /**
729  * stmmac_init_ptp - init PTP
730  * @priv: driver private structure
731  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
732  * This is done by looking at the HW cap. register.
733  * This function also registers the ptp driver.
734  */
735 static int stmmac_init_ptp(struct stmmac_priv *priv)
736 {
737         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
738                 return -EOPNOTSUPP;
739
740         priv->adv_ts = 0;
741         /* Check if adv_ts can be enabled for dwmac 4.x core */
742         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
743                 priv->adv_ts = 1;
744         /* Dwmac 3.x core with extend_desc can support adv_ts */
745         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
746                 priv->adv_ts = 1;
747
748         if (priv->dma_cap.time_stamp)
749                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
750
751         if (priv->adv_ts)
752                 netdev_info(priv->dev,
753                             "IEEE 1588-2008 Advanced Timestamp supported\n");
754
755         priv->hw->ptp = &stmmac_ptp;
756         priv->hwts_tx_en = 0;
757         priv->hwts_rx_en = 0;
758
759         stmmac_ptp_register(priv);
760
761         return 0;
762 }
763
764 static void stmmac_release_ptp(struct stmmac_priv *priv)
765 {
766         if (priv->plat->clk_ptp_ref)
767                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
768         stmmac_ptp_unregister(priv);
769 }
770
771 /**
772  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
773  *  @priv: driver private structure
774  *  Description: It is used for configuring the flow control in all queues
775  */
776 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
777 {
778         u32 tx_cnt = priv->plat->tx_queues_to_use;
779
780         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
781                                  priv->pause, tx_cnt);
782 }
783
784 /**
785  * stmmac_adjust_link - adjusts the link parameters
786  * @dev: net device structure
787  * Description: this is the helper called by the physical abstraction layer
788  * drivers to communicate the phy link status. According the speed and duplex
789  * this driver can invoke registered glue-logic as well.
790  * It also invoke the eee initialization because it could happen when switch
791  * on different networks (that are eee capable).
792  */
793 static void stmmac_adjust_link(struct net_device *dev)
794 {
795         struct stmmac_priv *priv = netdev_priv(dev);
796         struct phy_device *phydev = dev->phydev;
797         unsigned long flags;
798         bool new_state = false;
799
800         if (!phydev)
801                 return;
802
803         spin_lock_irqsave(&priv->lock, flags);
804
805         if (phydev->link) {
806                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
807
808                 /* Now we make sure that we can be in full duplex mode.
809                  * If not, we operate in half-duplex mode. */
810                 if (phydev->duplex != priv->oldduplex) {
811                         new_state = true;
812                         if (!phydev->duplex)
813                                 ctrl &= ~priv->hw->link.duplex;
814                         else
815                                 ctrl |= priv->hw->link.duplex;
816                         priv->oldduplex = phydev->duplex;
817                 }
818                 /* Flow Control operation */
819                 if (phydev->pause)
820                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
821
822                 if (phydev->speed != priv->speed) {
823                         new_state = true;
824                         ctrl &= ~priv->hw->link.speed_mask;
825                         switch (phydev->speed) {
826                         case SPEED_1000:
827                                 ctrl |= priv->hw->link.speed1000;
828                                 break;
829                         case SPEED_100:
830                                 ctrl |= priv->hw->link.speed100;
831                                 break;
832                         case SPEED_10:
833                                 ctrl |= priv->hw->link.speed10;
834                                 break;
835                         default:
836                                 netif_warn(priv, link, priv->dev,
837                                            "broken speed: %d\n", phydev->speed);
838                                 phydev->speed = SPEED_UNKNOWN;
839                                 break;
840                         }
841                         if (phydev->speed != SPEED_UNKNOWN)
842                                 stmmac_hw_fix_mac_speed(priv);
843                         priv->speed = phydev->speed;
844                 }
845
846                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
847
848                 if (!priv->oldlink) {
849                         new_state = true;
850                         priv->oldlink = true;
851                 }
852         } else if (priv->oldlink) {
853                 new_state = true;
854                 priv->oldlink = false;
855                 priv->speed = SPEED_UNKNOWN;
856                 priv->oldduplex = DUPLEX_UNKNOWN;
857         }
858
859         if (new_state && netif_msg_link(priv))
860                 phy_print_status(phydev);
861
862         spin_unlock_irqrestore(&priv->lock, flags);
863
864         if (phydev->is_pseudo_fixed_link)
865                 /* Stop PHY layer to call the hook to adjust the link in case
866                  * of a switch is attached to the stmmac driver.
867                  */
868                 phydev->irq = PHY_IGNORE_INTERRUPT;
869         else
870                 /* At this stage, init the EEE if supported.
871                  * Never called in case of fixed_link.
872                  */
873                 priv->eee_enabled = stmmac_eee_init(priv);
874 }
875
876 /**
877  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PCS.
880  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
881  * configured for the TBI, RTBI, or SGMII PHY interface.
882  */
883 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
884 {
885         int interface = priv->plat->interface;
886
887         if (priv->dma_cap.pcs) {
888                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
889                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
890                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
891                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
892                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
893                         priv->hw->pcs = STMMAC_PCS_RGMII;
894                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
895                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
896                         priv->hw->pcs = STMMAC_PCS_SGMII;
897                 }
898         }
899 }
900
901 /**
902  * stmmac_init_phy - PHY initialization
903  * @dev: net device structure
904  * Description: it initializes the driver's PHY state, and attaches the PHY
905  * to the mac driver.
906  *  Return value:
907  *  0 on success
908  */
909 static int stmmac_init_phy(struct net_device *dev)
910 {
911         struct stmmac_priv *priv = netdev_priv(dev);
912         struct phy_device *phydev;
913         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
914         char bus_id[MII_BUS_ID_SIZE];
915         int interface = priv->plat->interface;
916         int max_speed = priv->plat->max_speed;
917         priv->oldlink = false;
918         priv->speed = SPEED_UNKNOWN;
919         priv->oldduplex = DUPLEX_UNKNOWN;
920
921         if (priv->plat->phy_node) {
922                 phydev = of_phy_connect(dev, priv->plat->phy_node,
923                                         &stmmac_adjust_link, 0, interface);
924         } else {
925                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
926                          priv->plat->bus_id);
927
928                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
929                          priv->plat->phy_addr);
930                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
931                            phy_id_fmt);
932
933                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
934                                      interface);
935         }
936
937         if (IS_ERR_OR_NULL(phydev)) {
938                 netdev_err(priv->dev, "Could not attach to PHY\n");
939                 if (!phydev)
940                         return -ENODEV;
941
942                 return PTR_ERR(phydev);
943         }
944
945         /* Stop Advertising 1000BASE Capability if interface is not GMII */
946         if ((interface == PHY_INTERFACE_MODE_MII) ||
947             (interface == PHY_INTERFACE_MODE_RMII) ||
948                 (max_speed < 1000 && max_speed > 0))
949                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
950                                          SUPPORTED_1000baseT_Full);
951
952         /*
953          * Broken HW is sometimes missing the pull-up resistor on the
954          * MDIO line, which results in reads to non-existent devices returning
955          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
956          * device as well.
957          * Note: phydev->phy_id is the result of reading the UID PHY registers.
958          */
959         if (!priv->plat->phy_node && phydev->phy_id == 0) {
960                 phy_disconnect(phydev);
961                 return -ENODEV;
962         }
963
964         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
965          * subsequent PHY polling, make sure we force a link transition if
966          * we have a UP/DOWN/UP transition
967          */
968         if (phydev->is_pseudo_fixed_link)
969                 phydev->irq = PHY_POLL;
970
971         phy_attached_info(phydev);
972         return 0;
973 }
974
975 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
976 {
977         u32 rx_cnt = priv->plat->rx_queues_to_use;
978         void *head_rx;
979         u32 queue;
980
981         /* Display RX rings */
982         for (queue = 0; queue < rx_cnt; queue++) {
983                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
984
985                 pr_info("\tRX Queue %u rings\n", queue);
986
987                 if (priv->extend_desc)
988                         head_rx = (void *)rx_q->dma_erx;
989                 else
990                         head_rx = (void *)rx_q->dma_rx;
991
992                 /* Display RX ring */
993                 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
994         }
995 }
996
997 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
998 {
999         u32 tx_cnt = priv->plat->tx_queues_to_use;
1000         void *head_tx;
1001         u32 queue;
1002
1003         /* Display TX rings */
1004         for (queue = 0; queue < tx_cnt; queue++) {
1005                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1006
1007                 pr_info("\tTX Queue %d rings\n", queue);
1008
1009                 if (priv->extend_desc)
1010                         head_tx = (void *)tx_q->dma_etx;
1011                 else
1012                         head_tx = (void *)tx_q->dma_tx;
1013
1014                 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1015         }
1016 }
1017
1018 static void stmmac_display_rings(struct stmmac_priv *priv)
1019 {
1020         /* Display RX ring */
1021         stmmac_display_rx_rings(priv);
1022
1023         /* Display TX ring */
1024         stmmac_display_tx_rings(priv);
1025 }
1026
1027 static int stmmac_set_bfsize(int mtu, int bufsize)
1028 {
1029         int ret = bufsize;
1030
1031         if (mtu >= BUF_SIZE_4KiB)
1032                 ret = BUF_SIZE_8KiB;
1033         else if (mtu >= BUF_SIZE_2KiB)
1034                 ret = BUF_SIZE_4KiB;
1035         else if (mtu > DEFAULT_BUFSIZE)
1036                 ret = BUF_SIZE_2KiB;
1037         else
1038                 ret = DEFAULT_BUFSIZE;
1039
1040         return ret;
1041 }
1042
1043 /**
1044  * stmmac_clear_rx_descriptors - clear RX descriptors
1045  * @priv: driver private structure
1046  * @queue: RX queue index
1047  * Description: this function is called to clear the RX descriptors
1048  * in case of both basic and extended descriptors are used.
1049  */
1050 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1051 {
1052         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1053         int i;
1054
1055         /* Clear the RX descriptors */
1056         for (i = 0; i < DMA_RX_SIZE; i++)
1057                 if (priv->extend_desc)
1058                         priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1059                                                      priv->use_riwt, priv->mode,
1060                                                      (i == DMA_RX_SIZE - 1));
1061                 else
1062                         priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1063                                                      priv->use_riwt, priv->mode,
1064                                                      (i == DMA_RX_SIZE - 1));
1065 }
1066
1067 /**
1068  * stmmac_clear_tx_descriptors - clear tx descriptors
1069  * @priv: driver private structure
1070  * @queue: TX queue index.
1071  * Description: this function is called to clear the TX descriptors
1072  * in case of both basic and extended descriptors are used.
1073  */
1074 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1075 {
1076         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1077         int i;
1078
1079         /* Clear the TX descriptors */
1080         for (i = 0; i < DMA_TX_SIZE; i++)
1081                 if (priv->extend_desc)
1082                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1083                                                      priv->mode,
1084                                                      (i == DMA_TX_SIZE - 1));
1085                 else
1086                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1087                                                      priv->mode,
1088                                                      (i == DMA_TX_SIZE - 1));
1089 }
1090
1091 /**
1092  * stmmac_clear_descriptors - clear descriptors
1093  * @priv: driver private structure
1094  * Description: this function is called to clear the TX and RX descriptors
1095  * in case of both basic and extended descriptors are used.
1096  */
1097 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1098 {
1099         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1100         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1101         u32 queue;
1102
1103         /* Clear the RX descriptors */
1104         for (queue = 0; queue < rx_queue_cnt; queue++)
1105                 stmmac_clear_rx_descriptors(priv, queue);
1106
1107         /* Clear the TX descriptors */
1108         for (queue = 0; queue < tx_queue_cnt; queue++)
1109                 stmmac_clear_tx_descriptors(priv, queue);
1110 }
1111
1112 /**
1113  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1114  * @priv: driver private structure
1115  * @p: descriptor pointer
1116  * @i: descriptor index
1117  * @flags: gfp flag
1118  * @queue: RX queue index
1119  * Description: this function is called to allocate a receive buffer, perform
1120  * the DMA mapping and init the descriptor.
1121  */
1122 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1123                                   int i, gfp_t flags, u32 queue)
1124 {
1125         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1126         struct sk_buff *skb;
1127
1128         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1129         if (!skb) {
1130                 netdev_err(priv->dev,
1131                            "%s: Rx init fails; skb is NULL\n", __func__);
1132                 return -ENOMEM;
1133         }
1134         rx_q->rx_skbuff[i] = skb;
1135         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1136                                                 priv->dma_buf_sz,
1137                                                 DMA_FROM_DEVICE);
1138         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1139                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1140                 dev_kfree_skb_any(skb);
1141                 return -EINVAL;
1142         }
1143
1144         if (priv->synopsys_id >= DWMAC_CORE_4_00)
1145                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1146         else
1147                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1148
1149         if ((priv->hw->mode->init_desc3) &&
1150             (priv->dma_buf_sz == BUF_SIZE_16KiB))
1151                 priv->hw->mode->init_desc3(p);
1152
1153         return 0;
1154 }
1155
1156 /**
1157  * stmmac_free_rx_buffer - free RX dma buffers
1158  * @priv: private structure
1159  * @queue: RX queue index
1160  * @i: buffer index.
1161  */
1162 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1163 {
1164         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1165
1166         if (rx_q->rx_skbuff[i]) {
1167                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1168                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1169                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1170         }
1171         rx_q->rx_skbuff[i] = NULL;
1172 }
1173
1174 /**
1175  * stmmac_free_tx_buffer - free RX dma buffers
1176  * @priv: private structure
1177  * @queue: RX queue index
1178  * @i: buffer index.
1179  */
1180 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1181 {
1182         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1183
1184         if (tx_q->tx_skbuff_dma[i].buf) {
1185                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1186                         dma_unmap_page(priv->device,
1187                                        tx_q->tx_skbuff_dma[i].buf,
1188                                        tx_q->tx_skbuff_dma[i].len,
1189                                        DMA_TO_DEVICE);
1190                 else
1191                         dma_unmap_single(priv->device,
1192                                          tx_q->tx_skbuff_dma[i].buf,
1193                                          tx_q->tx_skbuff_dma[i].len,
1194                                          DMA_TO_DEVICE);
1195         }
1196
1197         if (tx_q->tx_skbuff[i]) {
1198                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1199                 tx_q->tx_skbuff[i] = NULL;
1200                 tx_q->tx_skbuff_dma[i].buf = 0;
1201                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1202         }
1203 }
1204
1205 /**
1206  * init_dma_rx_desc_rings - init the RX descriptor rings
1207  * @dev: net device structure
1208  * @flags: gfp flag.
1209  * Description: this function initializes the DMA RX descriptors
1210  * and allocates the socket buffers. It supports the chained and ring
1211  * modes.
1212  */
1213 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1214 {
1215         struct stmmac_priv *priv = netdev_priv(dev);
1216         u32 rx_count = priv->plat->rx_queues_to_use;
1217         unsigned int bfsize = 0;
1218         int ret = -ENOMEM;
1219         int queue;
1220         int i;
1221
1222         if (priv->hw->mode->set_16kib_bfsize)
1223                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1224
1225         if (bfsize < BUF_SIZE_16KiB)
1226                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1227
1228         priv->dma_buf_sz = bfsize;
1229
1230         /* RX INITIALIZATION */
1231         netif_dbg(priv, probe, priv->dev,
1232                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1233
1234         for (queue = 0; queue < rx_count; queue++) {
1235                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1236
1237                 netif_dbg(priv, probe, priv->dev,
1238                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1239                           (u32)rx_q->dma_rx_phy);
1240
1241                 for (i = 0; i < DMA_RX_SIZE; i++) {
1242                         struct dma_desc *p;
1243
1244                         if (priv->extend_desc)
1245                                 p = &((rx_q->dma_erx + i)->basic);
1246                         else
1247                                 p = rx_q->dma_rx + i;
1248
1249                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1250                                                      queue);
1251                         if (ret)
1252                                 goto err_init_rx_buffers;
1253
1254                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1255                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1256                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1257                 }
1258
1259                 rx_q->cur_rx = 0;
1260                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1261
1262                 stmmac_clear_rx_descriptors(priv, queue);
1263
1264                 /* Setup the chained descriptor addresses */
1265                 if (priv->mode == STMMAC_CHAIN_MODE) {
1266                         if (priv->extend_desc)
1267                                 priv->hw->mode->init(rx_q->dma_erx,
1268                                                      rx_q->dma_rx_phy,
1269                                                      DMA_RX_SIZE, 1);
1270                         else
1271                                 priv->hw->mode->init(rx_q->dma_rx,
1272                                                      rx_q->dma_rx_phy,
1273                                                      DMA_RX_SIZE, 0);
1274                 }
1275         }
1276
1277         buf_sz = bfsize;
1278
1279         return 0;
1280
1281 err_init_rx_buffers:
1282         while (queue >= 0) {
1283                 while (--i >= 0)
1284                         stmmac_free_rx_buffer(priv, queue, i);
1285
1286                 if (queue == 0)
1287                         break;
1288
1289                 i = DMA_RX_SIZE;
1290                 queue--;
1291         }
1292
1293         return ret;
1294 }
1295
1296 /**
1297  * init_dma_tx_desc_rings - init the TX descriptor rings
1298  * @dev: net device structure.
1299  * Description: this function initializes the DMA TX descriptors
1300  * and allocates the socket buffers. It supports the chained and ring
1301  * modes.
1302  */
1303 static int init_dma_tx_desc_rings(struct net_device *dev)
1304 {
1305         struct stmmac_priv *priv = netdev_priv(dev);
1306         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1307         u32 queue;
1308         int i;
1309
1310         for (queue = 0; queue < tx_queue_cnt; queue++) {
1311                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1312
1313                 netif_dbg(priv, probe, priv->dev,
1314                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1315                          (u32)tx_q->dma_tx_phy);
1316
1317                 /* Setup the chained descriptor addresses */
1318                 if (priv->mode == STMMAC_CHAIN_MODE) {
1319                         if (priv->extend_desc)
1320                                 priv->hw->mode->init(tx_q->dma_etx,
1321                                                      tx_q->dma_tx_phy,
1322                                                      DMA_TX_SIZE, 1);
1323                         else
1324                                 priv->hw->mode->init(tx_q->dma_tx,
1325                                                      tx_q->dma_tx_phy,
1326                                                      DMA_TX_SIZE, 0);
1327                 }
1328
1329                 for (i = 0; i < DMA_TX_SIZE; i++) {
1330                         struct dma_desc *p;
1331                         if (priv->extend_desc)
1332                                 p = &((tx_q->dma_etx + i)->basic);
1333                         else
1334                                 p = tx_q->dma_tx + i;
1335
1336                         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1337                                 p->des0 = 0;
1338                                 p->des1 = 0;
1339                                 p->des2 = 0;
1340                                 p->des3 = 0;
1341                         } else {
1342                                 p->des2 = 0;
1343                         }
1344
1345                         tx_q->tx_skbuff_dma[i].buf = 0;
1346                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1347                         tx_q->tx_skbuff_dma[i].len = 0;
1348                         tx_q->tx_skbuff_dma[i].last_segment = false;
1349                         tx_q->tx_skbuff[i] = NULL;
1350                 }
1351
1352                 tx_q->dirty_tx = 0;
1353                 tx_q->cur_tx = 0;
1354
1355                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1356         }
1357
1358         return 0;
1359 }
1360
1361 /**
1362  * init_dma_desc_rings - init the RX/TX descriptor rings
1363  * @dev: net device structure
1364  * @flags: gfp flag.
1365  * Description: this function initializes the DMA RX/TX descriptors
1366  * and allocates the socket buffers. It supports the chained and ring
1367  * modes.
1368  */
1369 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1370 {
1371         struct stmmac_priv *priv = netdev_priv(dev);
1372         int ret;
1373
1374         ret = init_dma_rx_desc_rings(dev, flags);
1375         if (ret)
1376                 return ret;
1377
1378         ret = init_dma_tx_desc_rings(dev);
1379
1380         stmmac_clear_descriptors(priv);
1381
1382         if (netif_msg_hw(priv))
1383                 stmmac_display_rings(priv);
1384
1385         return ret;
1386 }
1387
1388 /**
1389  * dma_free_rx_skbufs - free RX dma buffers
1390  * @priv: private structure
1391  * @queue: RX queue index
1392  */
1393 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1394 {
1395         int i;
1396
1397         for (i = 0; i < DMA_RX_SIZE; i++)
1398                 stmmac_free_rx_buffer(priv, queue, i);
1399 }
1400
1401 /**
1402  * dma_free_tx_skbufs - free TX dma buffers
1403  * @priv: private structure
1404  * @queue: TX queue index
1405  */
1406 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1407 {
1408         int i;
1409
1410         for (i = 0; i < DMA_TX_SIZE; i++)
1411                 stmmac_free_tx_buffer(priv, queue, i);
1412 }
1413
1414 /**
1415  * free_dma_rx_desc_resources - free RX dma desc resources
1416  * @priv: private structure
1417  */
1418 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1419 {
1420         u32 rx_count = priv->plat->rx_queues_to_use;
1421         u32 queue;
1422
1423         /* Free RX queue resources */
1424         for (queue = 0; queue < rx_count; queue++) {
1425                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1426
1427                 /* Release the DMA RX socket buffers */
1428                 dma_free_rx_skbufs(priv, queue);
1429
1430                 /* Free DMA regions of consistent memory previously allocated */
1431                 if (!priv->extend_desc)
1432                         dma_free_coherent(priv->device,
1433                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1434                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1435                 else
1436                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1437                                           sizeof(struct dma_extended_desc),
1438                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1439
1440                 kfree(rx_q->rx_skbuff_dma);
1441                 kfree(rx_q->rx_skbuff);
1442         }
1443 }
1444
1445 /**
1446  * free_dma_tx_desc_resources - free TX dma desc resources
1447  * @priv: private structure
1448  */
1449 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1450 {
1451         u32 tx_count = priv->plat->tx_queues_to_use;
1452         u32 queue = 0;
1453
1454         /* Free TX queue resources */
1455         for (queue = 0; queue < tx_count; queue++) {
1456                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1457
1458                 /* Release the DMA TX socket buffers */
1459                 dma_free_tx_skbufs(priv, queue);
1460
1461                 /* Free DMA regions of consistent memory previously allocated */
1462                 if (!priv->extend_desc)
1463                         dma_free_coherent(priv->device,
1464                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1465                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1466                 else
1467                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1468                                           sizeof(struct dma_extended_desc),
1469                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1470
1471                 kfree(tx_q->tx_skbuff_dma);
1472                 kfree(tx_q->tx_skbuff);
1473         }
1474 }
1475
1476 /**
1477  * alloc_dma_rx_desc_resources - alloc RX resources.
1478  * @priv: private structure
1479  * Description: according to which descriptor can be used (extend or basic)
1480  * this function allocates the resources for TX and RX paths. In case of
1481  * reception, for example, it pre-allocated the RX socket buffer in order to
1482  * allow zero-copy mechanism.
1483  */
1484 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1485 {
1486         u32 rx_count = priv->plat->rx_queues_to_use;
1487         int ret = -ENOMEM;
1488         u32 queue;
1489
1490         /* RX queues buffers and DMA */
1491         for (queue = 0; queue < rx_count; queue++) {
1492                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1493
1494                 rx_q->queue_index = queue;
1495                 rx_q->priv_data = priv;
1496
1497                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1498                                                     sizeof(dma_addr_t),
1499                                                     GFP_KERNEL);
1500                 if (!rx_q->rx_skbuff_dma)
1501                         return -ENOMEM;
1502
1503                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1504                                                 sizeof(struct sk_buff *),
1505                                                 GFP_KERNEL);
1506                 if (!rx_q->rx_skbuff)
1507                         goto err_dma;
1508
1509                 if (priv->extend_desc) {
1510                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1511                                                             DMA_RX_SIZE *
1512                                                             sizeof(struct
1513                                                             dma_extended_desc),
1514                                                             &rx_q->dma_rx_phy,
1515                                                             GFP_KERNEL);
1516                         if (!rx_q->dma_erx)
1517                                 goto err_dma;
1518
1519                 } else {
1520                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1521                                                            DMA_RX_SIZE *
1522                                                            sizeof(struct
1523                                                            dma_desc),
1524                                                            &rx_q->dma_rx_phy,
1525                                                            GFP_KERNEL);
1526                         if (!rx_q->dma_rx)
1527                                 goto err_dma;
1528                 }
1529         }
1530
1531         return 0;
1532
1533 err_dma:
1534         free_dma_rx_desc_resources(priv);
1535
1536         return ret;
1537 }
1538
1539 /**
1540  * alloc_dma_tx_desc_resources - alloc TX resources.
1541  * @priv: private structure
1542  * Description: according to which descriptor can be used (extend or basic)
1543  * this function allocates the resources for TX and RX paths. In case of
1544  * reception, for example, it pre-allocated the RX socket buffer in order to
1545  * allow zero-copy mechanism.
1546  */
1547 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1548 {
1549         u32 tx_count = priv->plat->tx_queues_to_use;
1550         int ret = -ENOMEM;
1551         u32 queue;
1552
1553         /* TX queues buffers and DMA */
1554         for (queue = 0; queue < tx_count; queue++) {
1555                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1556
1557                 tx_q->queue_index = queue;
1558                 tx_q->priv_data = priv;
1559
1560                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1561                                                     sizeof(*tx_q->tx_skbuff_dma),
1562                                                     GFP_KERNEL);
1563                 if (!tx_q->tx_skbuff_dma)
1564                         return -ENOMEM;
1565
1566                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1567                                                 sizeof(struct sk_buff *),
1568                                                 GFP_KERNEL);
1569                 if (!tx_q->tx_skbuff)
1570                         goto err_dma_buffers;
1571
1572                 if (priv->extend_desc) {
1573                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1574                                                             DMA_TX_SIZE *
1575                                                             sizeof(struct
1576                                                             dma_extended_desc),
1577                                                             &tx_q->dma_tx_phy,
1578                                                             GFP_KERNEL);
1579                         if (!tx_q->dma_etx)
1580                                 goto err_dma_buffers;
1581                 } else {
1582                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1583                                                            DMA_TX_SIZE *
1584                                                            sizeof(struct
1585                                                                   dma_desc),
1586                                                            &tx_q->dma_tx_phy,
1587                                                            GFP_KERNEL);
1588                         if (!tx_q->dma_tx)
1589                                 goto err_dma_buffers;
1590                 }
1591         }
1592
1593         return 0;
1594
1595 err_dma_buffers:
1596         free_dma_tx_desc_resources(priv);
1597
1598         return ret;
1599 }
1600
1601 /**
1602  * alloc_dma_desc_resources - alloc TX/RX resources.
1603  * @priv: private structure
1604  * Description: according to which descriptor can be used (extend or basic)
1605  * this function allocates the resources for TX and RX paths. In case of
1606  * reception, for example, it pre-allocated the RX socket buffer in order to
1607  * allow zero-copy mechanism.
1608  */
1609 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1610 {
1611         /* RX Allocation */
1612         int ret = alloc_dma_rx_desc_resources(priv);
1613
1614         if (ret)
1615                 return ret;
1616
1617         ret = alloc_dma_tx_desc_resources(priv);
1618
1619         return ret;
1620 }
1621
1622 /**
1623  * free_dma_desc_resources - free dma desc resources
1624  * @priv: private structure
1625  */
1626 static void free_dma_desc_resources(struct stmmac_priv *priv)
1627 {
1628         /* Release the DMA RX socket buffers */
1629         free_dma_rx_desc_resources(priv);
1630
1631         /* Release the DMA TX socket buffers */
1632         free_dma_tx_desc_resources(priv);
1633 }
1634
1635 /**
1636  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1637  *  @priv: driver private structure
1638  *  Description: It is used for enabling the rx queues in the MAC
1639  */
1640 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1641 {
1642         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1643         int queue;
1644         u8 mode;
1645
1646         for (queue = 0; queue < rx_queues_count; queue++) {
1647                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1648                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1649         }
1650 }
1651
1652 /**
1653  * stmmac_start_rx_dma - start RX DMA channel
1654  * @priv: driver private structure
1655  * @chan: RX channel index
1656  * Description:
1657  * This starts a RX DMA channel
1658  */
1659 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1660 {
1661         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1662         priv->hw->dma->start_rx(priv->ioaddr, chan);
1663 }
1664
1665 /**
1666  * stmmac_start_tx_dma - start TX DMA channel
1667  * @priv: driver private structure
1668  * @chan: TX channel index
1669  * Description:
1670  * This starts a TX DMA channel
1671  */
1672 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1673 {
1674         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1675         priv->hw->dma->start_tx(priv->ioaddr, chan);
1676 }
1677
1678 /**
1679  * stmmac_stop_rx_dma - stop RX DMA channel
1680  * @priv: driver private structure
1681  * @chan: RX channel index
1682  * Description:
1683  * This stops a RX DMA channel
1684  */
1685 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1686 {
1687         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1688         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1689 }
1690
1691 /**
1692  * stmmac_stop_tx_dma - stop TX DMA channel
1693  * @priv: driver private structure
1694  * @chan: TX channel index
1695  * Description:
1696  * This stops a TX DMA channel
1697  */
1698 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1699 {
1700         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1701         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1702 }
1703
1704 /**
1705  * stmmac_start_all_dma - start all RX and TX DMA channels
1706  * @priv: driver private structure
1707  * Description:
1708  * This starts all the RX and TX DMA channels
1709  */
1710 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1711 {
1712         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1713         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1714         u32 chan = 0;
1715
1716         for (chan = 0; chan < rx_channels_count; chan++)
1717                 stmmac_start_rx_dma(priv, chan);
1718
1719         for (chan = 0; chan < tx_channels_count; chan++)
1720                 stmmac_start_tx_dma(priv, chan);
1721 }
1722
1723 /**
1724  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1725  * @priv: driver private structure
1726  * Description:
1727  * This stops the RX and TX DMA channels
1728  */
1729 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1730 {
1731         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1732         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1733         u32 chan = 0;
1734
1735         for (chan = 0; chan < rx_channels_count; chan++)
1736                 stmmac_stop_rx_dma(priv, chan);
1737
1738         for (chan = 0; chan < tx_channels_count; chan++)
1739                 stmmac_stop_tx_dma(priv, chan);
1740 }
1741
1742 /**
1743  *  stmmac_dma_operation_mode - HW DMA operation mode
1744  *  @priv: driver private structure
1745  *  Description: it is used for configuring the DMA operation mode register in
1746  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1747  */
1748 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1749 {
1750         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1751         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1752         int rxfifosz = priv->plat->rx_fifo_size;
1753         u32 txmode = 0;
1754         u32 rxmode = 0;
1755         u32 chan = 0;
1756
1757         if (rxfifosz == 0)
1758                 rxfifosz = priv->dma_cap.rx_fifo_size;
1759
1760         if (priv->plat->force_thresh_dma_mode) {
1761                 txmode = tc;
1762                 rxmode = tc;
1763         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1764                 /*
1765                  * In case of GMAC, SF mode can be enabled
1766                  * to perform the TX COE in HW. This depends on:
1767                  * 1) TX COE if actually supported
1768                  * 2) There is no bugged Jumbo frame support
1769                  *    that needs to not insert csum in the TDES.
1770                  */
1771                 txmode = SF_DMA_MODE;
1772                 rxmode = SF_DMA_MODE;
1773                 priv->xstats.threshold = SF_DMA_MODE;
1774         } else {
1775                 txmode = tc;
1776                 rxmode = SF_DMA_MODE;
1777         }
1778
1779         /* configure all channels */
1780         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1781                 for (chan = 0; chan < rx_channels_count; chan++)
1782                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1783                                                    rxfifosz);
1784
1785                 for (chan = 0; chan < tx_channels_count; chan++)
1786                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1787         } else {
1788                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1789                                         rxfifosz);
1790         }
1791 }
1792
1793 /**
1794  * stmmac_tx_clean - to manage the transmission completion
1795  * @priv: driver private structure
1796  * @queue: TX queue index
1797  * Description: it reclaims the transmit resources after transmission completes.
1798  */
1799 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1800 {
1801         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1802         unsigned int bytes_compl = 0, pkts_compl = 0;
1803         unsigned int entry = tx_q->dirty_tx;
1804
1805         netif_tx_lock(priv->dev);
1806
1807         priv->xstats.tx_clean++;
1808
1809         while (entry != tx_q->cur_tx) {
1810                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1811                 struct dma_desc *p;
1812                 int status;
1813
1814                 if (priv->extend_desc)
1815                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1816                 else
1817                         p = tx_q->dma_tx + entry;
1818
1819                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1820                                                       &priv->xstats, p,
1821                                                       priv->ioaddr);
1822                 /* Check if the descriptor is owned by the DMA */
1823                 if (unlikely(status & tx_dma_own))
1824                         break;
1825
1826                 /* Just consider the last segment and ...*/
1827                 if (likely(!(status & tx_not_ls))) {
1828                         /* ... verify the status error condition */
1829                         if (unlikely(status & tx_err)) {
1830                                 priv->dev->stats.tx_errors++;
1831                         } else {
1832                                 priv->dev->stats.tx_packets++;
1833                                 priv->xstats.tx_pkt_n++;
1834                         }
1835                         stmmac_get_tx_hwtstamp(priv, p, skb);
1836                 }
1837
1838                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1839                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1840                                 dma_unmap_page(priv->device,
1841                                                tx_q->tx_skbuff_dma[entry].buf,
1842                                                tx_q->tx_skbuff_dma[entry].len,
1843                                                DMA_TO_DEVICE);
1844                         else
1845                                 dma_unmap_single(priv->device,
1846                                                  tx_q->tx_skbuff_dma[entry].buf,
1847                                                  tx_q->tx_skbuff_dma[entry].len,
1848                                                  DMA_TO_DEVICE);
1849                         tx_q->tx_skbuff_dma[entry].buf = 0;
1850                         tx_q->tx_skbuff_dma[entry].len = 0;
1851                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1852                 }
1853
1854                 if (priv->hw->mode->clean_desc3)
1855                         priv->hw->mode->clean_desc3(tx_q, p);
1856
1857                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1858                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1859
1860                 if (likely(skb != NULL)) {
1861                         pkts_compl++;
1862                         bytes_compl += skb->len;
1863                         dev_consume_skb_any(skb);
1864                         tx_q->tx_skbuff[entry] = NULL;
1865                 }
1866
1867                 priv->hw->desc->release_tx_desc(p, priv->mode);
1868
1869                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1870         }
1871         tx_q->dirty_tx = entry;
1872
1873         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1874                                   pkts_compl, bytes_compl);
1875
1876         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1877                                                                 queue))) &&
1878             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1879
1880                 netif_dbg(priv, tx_done, priv->dev,
1881                           "%s: restart transmit\n", __func__);
1882                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1883         }
1884
1885         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1886                 stmmac_enable_eee_mode(priv);
1887                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1888         }
1889         netif_tx_unlock(priv->dev);
1890 }
1891
1892 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1893 {
1894         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1895 }
1896
1897 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1898 {
1899         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1900 }
1901
1902 /**
1903  * stmmac_tx_err - to manage the tx error
1904  * @priv: driver private structure
1905  * @chan: channel index
1906  * Description: it cleans the descriptors and restarts the transmission
1907  * in case of transmission errors.
1908  */
1909 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1910 {
1911         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1912         int i;
1913
1914         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1915
1916         stmmac_stop_tx_dma(priv, chan);
1917         dma_free_tx_skbufs(priv, chan);
1918         for (i = 0; i < DMA_TX_SIZE; i++)
1919                 if (priv->extend_desc)
1920                         priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1921                                                      priv->mode,
1922                                                      (i == DMA_TX_SIZE - 1));
1923                 else
1924                         priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1925                                                      priv->mode,
1926                                                      (i == DMA_TX_SIZE - 1));
1927         tx_q->dirty_tx = 0;
1928         tx_q->cur_tx = 0;
1929         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1930         stmmac_start_tx_dma(priv, chan);
1931
1932         priv->dev->stats.tx_errors++;
1933         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1934 }
1935
1936 /**
1937  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1938  *  @priv: driver private structure
1939  *  @txmode: TX operating mode
1940  *  @rxmode: RX operating mode
1941  *  @chan: channel index
1942  *  Description: it is used for configuring of the DMA operation mode in
1943  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1944  *  mode.
1945  */
1946 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1947                                           u32 rxmode, u32 chan)
1948 {
1949         int rxfifosz = priv->plat->rx_fifo_size;
1950
1951         if (rxfifosz == 0)
1952                 rxfifosz = priv->dma_cap.rx_fifo_size;
1953
1954         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1955                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1956                                            rxfifosz);
1957                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1958         } else {
1959                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1960                                         rxfifosz);
1961         }
1962 }
1963
1964 /**
1965  * stmmac_dma_interrupt - DMA ISR
1966  * @priv: driver private structure
1967  * Description: this is the DMA ISR. It is called by the main ISR.
1968  * It calls the dwmac dma routine and schedule poll method in case of some
1969  * work can be done.
1970  */
1971 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1972 {
1973         u32 tx_channel_count = priv->plat->tx_queues_to_use;
1974         int status;
1975         u32 chan;
1976
1977         for (chan = 0; chan < tx_channel_count; chan++) {
1978                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1979
1980                 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1981                                                       &priv->xstats, chan);
1982                 if (likely((status & handle_rx)) || (status & handle_tx)) {
1983                         if (likely(napi_schedule_prep(&rx_q->napi))) {
1984                                 stmmac_disable_dma_irq(priv, chan);
1985                                 __napi_schedule(&rx_q->napi);
1986                         }
1987                 }
1988
1989                 if (unlikely(status & tx_hard_error_bump_tc)) {
1990                         /* Try to bump up the dma threshold on this failure */
1991                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1992                             (tc <= 256)) {
1993                                 tc += 64;
1994                                 if (priv->plat->force_thresh_dma_mode)
1995                                         stmmac_set_dma_operation_mode(priv,
1996                                                                       tc,
1997                                                                       tc,
1998                                                                       chan);
1999                                 else
2000                                         stmmac_set_dma_operation_mode(priv,
2001                                                                     tc,
2002                                                                     SF_DMA_MODE,
2003                                                                     chan);
2004                                 priv->xstats.threshold = tc;
2005                         }
2006                 } else if (unlikely(status == tx_hard_error)) {
2007                         stmmac_tx_err(priv, chan);
2008                 }
2009         }
2010 }
2011
2012 /**
2013  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2014  * @priv: driver private structure
2015  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2016  */
2017 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2018 {
2019         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2020                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2021
2022         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2023                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2024                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2025         } else {
2026                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2027                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2028         }
2029
2030         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2031
2032         if (priv->dma_cap.rmon) {
2033                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2034                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2035         } else
2036                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2037 }
2038
2039 /**
2040  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2041  * @priv: driver private structure
2042  * Description: select the Enhanced/Alternate or Normal descriptors.
2043  * In case of Enhanced/Alternate, it checks if the extended descriptors are
2044  * supported by the HW capability register.
2045  */
2046 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2047 {
2048         if (priv->plat->enh_desc) {
2049                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2050
2051                 /* GMAC older than 3.50 has no extended descriptors */
2052                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2053                         dev_info(priv->device, "Enabled extended descriptors\n");
2054                         priv->extend_desc = 1;
2055                 } else
2056                         dev_warn(priv->device, "Extended descriptors not supported\n");
2057
2058                 priv->hw->desc = &enh_desc_ops;
2059         } else {
2060                 dev_info(priv->device, "Normal descriptors\n");
2061                 priv->hw->desc = &ndesc_ops;
2062         }
2063 }
2064
2065 /**
2066  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2067  * @priv: driver private structure
2068  * Description:
2069  *  new GMAC chip generations have a new register to indicate the
2070  *  presence of the optional feature/functions.
2071  *  This can be also used to override the value passed through the
2072  *  platform and necessary for old MAC10/100 and GMAC chips.
2073  */
2074 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2075 {
2076         u32 ret = 0;
2077
2078         if (priv->hw->dma->get_hw_feature) {
2079                 priv->hw->dma->get_hw_feature(priv->ioaddr,
2080                                               &priv->dma_cap);
2081                 ret = 1;
2082         }
2083
2084         return ret;
2085 }
2086
2087 /**
2088  * stmmac_check_ether_addr - check if the MAC addr is valid
2089  * @priv: driver private structure
2090  * Description:
2091  * it is to verify if the MAC address is valid, in case of failures it
2092  * generates a random MAC address
2093  */
2094 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2095 {
2096         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2097                 priv->hw->mac->get_umac_addr(priv->hw,
2098                                              priv->dev->dev_addr, 0);
2099                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2100                         eth_hw_addr_random(priv->dev);
2101                 netdev_info(priv->dev, "device MAC address %pM\n",
2102                             priv->dev->dev_addr);
2103         }
2104 }
2105
2106 /**
2107  * stmmac_init_dma_engine - DMA init.
2108  * @priv: driver private structure
2109  * Description:
2110  * It inits the DMA invoking the specific MAC/GMAC callback.
2111  * Some DMA parameters can be passed from the platform;
2112  * in case of these are not passed a default is kept for the MAC or GMAC.
2113  */
2114 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2115 {
2116         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2117         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2118         struct stmmac_rx_queue *rx_q;
2119         struct stmmac_tx_queue *tx_q;
2120         u32 dummy_dma_rx_phy = 0;
2121         u32 dummy_dma_tx_phy = 0;
2122         u32 chan = 0;
2123         int atds = 0;
2124         int ret = 0;
2125
2126         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2127                 dev_err(priv->device, "Invalid DMA configuration\n");
2128                 return -EINVAL;
2129         }
2130
2131         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2132                 atds = 1;
2133
2134         ret = priv->hw->dma->reset(priv->ioaddr);
2135         if (ret) {
2136                 dev_err(priv->device, "Failed to reset the dma\n");
2137                 return ret;
2138         }
2139
2140         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2141                 /* DMA Configuration */
2142                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2143                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2144
2145                 /* DMA RX Channel Configuration */
2146                 for (chan = 0; chan < rx_channels_count; chan++) {
2147                         rx_q = &priv->rx_queue[chan];
2148
2149                         priv->hw->dma->init_rx_chan(priv->ioaddr,
2150                                                     priv->plat->dma_cfg,
2151                                                     rx_q->dma_rx_phy, chan);
2152
2153                         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2154                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
2155                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2156                                                        rx_q->rx_tail_addr,
2157                                                        chan);
2158                 }
2159
2160                 /* DMA TX Channel Configuration */
2161                 for (chan = 0; chan < tx_channels_count; chan++) {
2162                         tx_q = &priv->tx_queue[chan];
2163
2164                         priv->hw->dma->init_chan(priv->ioaddr,
2165                                                  priv->plat->dma_cfg,
2166                                                  chan);
2167
2168                         priv->hw->dma->init_tx_chan(priv->ioaddr,
2169                                                     priv->plat->dma_cfg,
2170                                                     tx_q->dma_tx_phy, chan);
2171
2172                         tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2173                                     (DMA_TX_SIZE * sizeof(struct dma_desc));
2174                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2175                                                        tx_q->tx_tail_addr,
2176                                                        chan);
2177                 }
2178         } else {
2179                 rx_q = &priv->rx_queue[chan];
2180                 tx_q = &priv->tx_queue[chan];
2181                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2182                                     tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2183         }
2184
2185         if (priv->plat->axi && priv->hw->dma->axi)
2186                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2187
2188         return ret;
2189 }
2190
2191 /**
2192  * stmmac_tx_timer - mitigation sw timer for tx.
2193  * @data: data pointer
2194  * Description:
2195  * This is the timer handler to directly invoke the stmmac_tx_clean.
2196  */
2197 static void stmmac_tx_timer(unsigned long data)
2198 {
2199         struct stmmac_priv *priv = (struct stmmac_priv *)data;
2200         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2201         u32 queue;
2202
2203         /* let's scan all the tx queues */
2204         for (queue = 0; queue < tx_queues_count; queue++)
2205                 stmmac_tx_clean(priv, queue);
2206 }
2207
2208 /**
2209  * stmmac_init_tx_coalesce - init tx mitigation options.
2210  * @priv: driver private structure
2211  * Description:
2212  * This inits the transmit coalesce parameters: i.e. timer rate,
2213  * timer handler and default threshold used for enabling the
2214  * interrupt on completion bit.
2215  */
2216 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2217 {
2218         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2219         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2220         init_timer(&priv->txtimer);
2221         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2222         priv->txtimer.data = (unsigned long)priv;
2223         priv->txtimer.function = stmmac_tx_timer;
2224         add_timer(&priv->txtimer);
2225 }
2226
2227 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2228 {
2229         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2230         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2231         u32 chan;
2232
2233         /* set TX ring length */
2234         if (priv->hw->dma->set_tx_ring_len) {
2235                 for (chan = 0; chan < tx_channels_count; chan++)
2236                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2237                                                        (DMA_TX_SIZE - 1), chan);
2238         }
2239
2240         /* set RX ring length */
2241         if (priv->hw->dma->set_rx_ring_len) {
2242                 for (chan = 0; chan < rx_channels_count; chan++)
2243                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2244                                                        (DMA_RX_SIZE - 1), chan);
2245         }
2246 }
2247
2248 /**
2249  *  stmmac_set_tx_queue_weight - Set TX queue weight
2250  *  @priv: driver private structure
2251  *  Description: It is used for setting TX queues weight
2252  */
2253 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2254 {
2255         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2256         u32 weight;
2257         u32 queue;
2258
2259         for (queue = 0; queue < tx_queues_count; queue++) {
2260                 weight = priv->plat->tx_queues_cfg[queue].weight;
2261                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2262         }
2263 }
2264
2265 /**
2266  *  stmmac_configure_cbs - Configure CBS in TX queue
2267  *  @priv: driver private structure
2268  *  Description: It is used for configuring CBS in AVB TX queues
2269  */
2270 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2271 {
2272         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2273         u32 mode_to_use;
2274         u32 queue;
2275
2276         /* queue 0 is reserved for legacy traffic */
2277         for (queue = 1; queue < tx_queues_count; queue++) {
2278                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2279                 if (mode_to_use == MTL_QUEUE_DCB)
2280                         continue;
2281
2282                 priv->hw->mac->config_cbs(priv->hw,
2283                                 priv->plat->tx_queues_cfg[queue].send_slope,
2284                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2285                                 priv->plat->tx_queues_cfg[queue].high_credit,
2286                                 priv->plat->tx_queues_cfg[queue].low_credit,
2287                                 queue);
2288         }
2289 }
2290
2291 /**
2292  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2293  *  @priv: driver private structure
2294  *  Description: It is used for mapping RX queues to RX dma channels
2295  */
2296 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2297 {
2298         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2299         u32 queue;
2300         u32 chan;
2301
2302         for (queue = 0; queue < rx_queues_count; queue++) {
2303                 chan = priv->plat->rx_queues_cfg[queue].chan;
2304                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2305         }
2306 }
2307
2308 /**
2309  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2310  *  @priv: driver private structure
2311  *  Description: It is used for configuring the RX Queue Priority
2312  */
2313 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2314 {
2315         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2316         u32 queue;
2317         u32 prio;
2318
2319         for (queue = 0; queue < rx_queues_count; queue++) {
2320                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2321                         continue;
2322
2323                 prio = priv->plat->rx_queues_cfg[queue].prio;
2324                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2325         }
2326 }
2327
2328 /**
2329  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2330  *  @priv: driver private structure
2331  *  Description: It is used for configuring the TX Queue Priority
2332  */
2333 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2334 {
2335         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2336         u32 queue;
2337         u32 prio;
2338
2339         for (queue = 0; queue < tx_queues_count; queue++) {
2340                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2341                         continue;
2342
2343                 prio = priv->plat->tx_queues_cfg[queue].prio;
2344                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2345         }
2346 }
2347
2348 /**
2349  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2350  *  @priv: driver private structure
2351  *  Description: It is used for configuring the RX queue routing
2352  */
2353 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2354 {
2355         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2356         u32 queue;
2357         u8 packet;
2358
2359         for (queue = 0; queue < rx_queues_count; queue++) {
2360                 /* no specific packet type routing specified for the queue */
2361                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2362                         continue;
2363
2364                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2365                 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2366         }
2367 }
2368
2369 /**
2370  *  stmmac_mtl_configuration - Configure MTL
2371  *  @priv: driver private structure
2372  *  Description: It is used for configurring MTL
2373  */
2374 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2375 {
2376         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2377         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2378
2379         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2380                 stmmac_set_tx_queue_weight(priv);
2381
2382         /* Configure MTL RX algorithms */
2383         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2384                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2385                                                 priv->plat->rx_sched_algorithm);
2386
2387         /* Configure MTL TX algorithms */
2388         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2389                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2390                                                 priv->plat->tx_sched_algorithm);
2391
2392         /* Configure CBS in AVB TX queues */
2393         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2394                 stmmac_configure_cbs(priv);
2395
2396         /* Map RX MTL to DMA channels */
2397         if (priv->hw->mac->map_mtl_to_dma)
2398                 stmmac_rx_queue_dma_chan_map(priv);
2399
2400         /* Enable MAC RX Queues */
2401         if (priv->hw->mac->rx_queue_enable)
2402                 stmmac_mac_enable_rx_queues(priv);
2403
2404         /* Set RX priorities */
2405         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2406                 stmmac_mac_config_rx_queues_prio(priv);
2407
2408         /* Set TX priorities */
2409         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2410                 stmmac_mac_config_tx_queues_prio(priv);
2411
2412         /* Set RX routing */
2413         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2414                 stmmac_mac_config_rx_queues_routing(priv);
2415 }
2416
2417 /**
2418  * stmmac_hw_setup - setup mac in a usable state.
2419  *  @dev : pointer to the device structure.
2420  *  Description:
2421  *  this is the main function to setup the HW in a usable state because the
2422  *  dma engine is reset, the core registers are configured (e.g. AXI,
2423  *  Checksum features, timers). The DMA is ready to start receiving and
2424  *  transmitting.
2425  *  Return value:
2426  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2427  *  file on failure.
2428  */
2429 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2430 {
2431         struct stmmac_priv *priv = netdev_priv(dev);
2432         u32 rx_cnt = priv->plat->rx_queues_to_use;
2433         u32 tx_cnt = priv->plat->tx_queues_to_use;
2434         u32 chan;
2435         int ret;
2436
2437         /* DMA initialization and SW reset */
2438         ret = stmmac_init_dma_engine(priv);
2439         if (ret < 0) {
2440                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2441                            __func__);
2442                 return ret;
2443         }
2444
2445         /* Copy the MAC addr into the HW  */
2446         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2447
2448         /* PS and related bits will be programmed according to the speed */
2449         if (priv->hw->pcs) {
2450                 int speed = priv->plat->mac_port_sel_speed;
2451
2452                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2453                     (speed == SPEED_1000)) {
2454                         priv->hw->ps = speed;
2455                 } else {
2456                         dev_warn(priv->device, "invalid port speed\n");
2457                         priv->hw->ps = 0;
2458                 }
2459         }
2460
2461         /* Initialize the MAC Core */
2462         priv->hw->mac->core_init(priv->hw, dev->mtu);
2463
2464         /* Initialize MTL*/
2465         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2466                 stmmac_mtl_configuration(priv);
2467
2468         ret = priv->hw->mac->rx_ipc(priv->hw);
2469         if (!ret) {
2470                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2471                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2472                 priv->hw->rx_csum = 0;
2473         }
2474
2475         /* Enable the MAC Rx/Tx */
2476         priv->hw->mac->set_mac(priv->ioaddr, true);
2477
2478         /* Set the HW DMA mode and the COE */
2479         stmmac_dma_operation_mode(priv);
2480
2481         stmmac_mmc_setup(priv);
2482
2483         if (init_ptp) {
2484                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2485                 if (ret < 0)
2486                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2487
2488                 ret = stmmac_init_ptp(priv);
2489                 if (ret == -EOPNOTSUPP)
2490                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2491                 else if (ret)
2492                         netdev_warn(priv->dev, "PTP init failed\n");
2493         }
2494
2495 #ifdef CONFIG_DEBUG_FS
2496         ret = stmmac_init_fs(dev);
2497         if (ret < 0)
2498                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2499                             __func__);
2500 #endif
2501         /* Start the ball rolling... */
2502         stmmac_start_all_dma(priv);
2503
2504         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2505
2506         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2507                 priv->rx_riwt = MAX_DMA_RIWT;
2508                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2509         }
2510
2511         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2512                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2513
2514         /* set TX and RX rings length */
2515         stmmac_set_rings_length(priv);
2516
2517         /* Enable TSO */
2518         if (priv->tso) {
2519                 for (chan = 0; chan < tx_cnt; chan++)
2520                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2521         }
2522
2523         return 0;
2524 }
2525
2526 static void stmmac_hw_teardown(struct net_device *dev)
2527 {
2528         struct stmmac_priv *priv = netdev_priv(dev);
2529
2530         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2531 }
2532
2533 /**
2534  *  stmmac_open - open entry point of the driver
2535  *  @dev : pointer to the device structure.
2536  *  Description:
2537  *  This function is the open entry point of the driver.
2538  *  Return value:
2539  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2540  *  file on failure.
2541  */
2542 static int stmmac_open(struct net_device *dev)
2543 {
2544         struct stmmac_priv *priv = netdev_priv(dev);
2545         int ret;
2546
2547         stmmac_check_ether_addr(priv);
2548
2549         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2550             priv->hw->pcs != STMMAC_PCS_TBI &&
2551             priv->hw->pcs != STMMAC_PCS_RTBI) {
2552                 ret = stmmac_init_phy(dev);
2553                 if (ret) {
2554                         netdev_err(priv->dev,
2555                                    "%s: Cannot attach to PHY (error: %d)\n",
2556                                    __func__, ret);
2557                         return ret;
2558                 }
2559         }
2560
2561         /* Extra statistics */
2562         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2563         priv->xstats.threshold = tc;
2564
2565         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2566         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2567
2568         ret = alloc_dma_desc_resources(priv);
2569         if (ret < 0) {
2570                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2571                            __func__);
2572                 goto dma_desc_error;
2573         }
2574
2575         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2576         if (ret < 0) {
2577                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2578                            __func__);
2579                 goto init_error;
2580         }
2581
2582         ret = stmmac_hw_setup(dev, true);
2583         if (ret < 0) {
2584                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2585                 goto init_error;
2586         }
2587
2588         stmmac_init_tx_coalesce(priv);
2589
2590         if (dev->phydev)
2591                 phy_start(dev->phydev);
2592
2593         /* Request the IRQ lines */
2594         ret = request_irq(dev->irq, stmmac_interrupt,
2595                           IRQF_SHARED, dev->name, dev);
2596         if (unlikely(ret < 0)) {
2597                 netdev_err(priv->dev,
2598                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2599                            __func__, dev->irq, ret);
2600                 goto irq_error;
2601         }
2602
2603         /* Request the Wake IRQ in case of another line is used for WoL */
2604         if (priv->wol_irq != dev->irq) {
2605                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2606                                   IRQF_SHARED, dev->name, dev);
2607                 if (unlikely(ret < 0)) {
2608                         netdev_err(priv->dev,
2609                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2610                                    __func__, priv->wol_irq, ret);
2611                         goto wolirq_error;
2612                 }
2613         }
2614
2615         /* Request the IRQ lines */
2616         if (priv->lpi_irq > 0) {
2617                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2618                                   dev->name, dev);
2619                 if (unlikely(ret < 0)) {
2620                         netdev_err(priv->dev,
2621                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2622                                    __func__, priv->lpi_irq, ret);
2623                         goto lpiirq_error;
2624                 }
2625         }
2626
2627         stmmac_enable_all_queues(priv);
2628         stmmac_start_all_queues(priv);
2629
2630         return 0;
2631
2632 lpiirq_error:
2633         if (priv->wol_irq != dev->irq)
2634                 free_irq(priv->wol_irq, dev);
2635 wolirq_error:
2636         free_irq(dev->irq, dev);
2637 irq_error:
2638         if (dev->phydev)
2639                 phy_stop(dev->phydev);
2640
2641         del_timer_sync(&priv->txtimer);
2642         stmmac_hw_teardown(dev);
2643 init_error:
2644         free_dma_desc_resources(priv);
2645 dma_desc_error:
2646         if (dev->phydev)
2647                 phy_disconnect(dev->phydev);
2648
2649         return ret;
2650 }
2651
2652 /**
2653  *  stmmac_release - close entry point of the driver
2654  *  @dev : device pointer.
2655  *  Description:
2656  *  This is the stop entry point of the driver.
2657  */
2658 static int stmmac_release(struct net_device *dev)
2659 {
2660         struct stmmac_priv *priv = netdev_priv(dev);
2661
2662         if (priv->eee_enabled)
2663                 del_timer_sync(&priv->eee_ctrl_timer);
2664
2665         /* Stop and disconnect the PHY */
2666         if (dev->phydev) {
2667                 phy_stop(dev->phydev);
2668                 phy_disconnect(dev->phydev);
2669         }
2670
2671         stmmac_stop_all_queues(priv);
2672
2673         stmmac_disable_all_queues(priv);
2674
2675         del_timer_sync(&priv->txtimer);
2676
2677         /* Free the IRQ lines */
2678         free_irq(dev->irq, dev);
2679         if (priv->wol_irq != dev->irq)
2680                 free_irq(priv->wol_irq, dev);
2681         if (priv->lpi_irq > 0)
2682                 free_irq(priv->lpi_irq, dev);
2683
2684         /* Stop TX/RX DMA and clear the descriptors */
2685         stmmac_stop_all_dma(priv);
2686
2687         /* Release and free the Rx/Tx resources */
2688         free_dma_desc_resources(priv);
2689
2690         /* Disable the MAC Rx/Tx */
2691         priv->hw->mac->set_mac(priv->ioaddr, false);
2692
2693         netif_carrier_off(dev);
2694
2695 #ifdef CONFIG_DEBUG_FS
2696         stmmac_exit_fs(dev);
2697 #endif
2698
2699         stmmac_release_ptp(priv);
2700
2701         return 0;
2702 }
2703
2704 /**
2705  *  stmmac_tso_allocator - close entry point of the driver
2706  *  @priv: driver private structure
2707  *  @des: buffer start address
2708  *  @total_len: total length to fill in descriptors
2709  *  @last_segmant: condition for the last descriptor
2710  *  @queue: TX queue index
2711  *  Description:
2712  *  This function fills descriptor and request new descriptors according to
2713  *  buffer length to fill
2714  */
2715 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2716                                  int total_len, bool last_segment, u32 queue)
2717 {
2718         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2719         struct dma_desc *desc;
2720         u32 buff_size;
2721         int tmp_len;
2722
2723         tmp_len = total_len;
2724
2725         while (tmp_len > 0) {
2726                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2727                 desc = tx_q->dma_tx + tx_q->cur_tx;
2728
2729                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2730                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2731                             TSO_MAX_BUFF_SIZE : tmp_len;
2732
2733                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2734                         0, 1,
2735                         (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2736                         0, 0);
2737
2738                 tmp_len -= TSO_MAX_BUFF_SIZE;
2739         }
2740 }
2741
2742 /**
2743  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2744  *  @skb : the socket buffer
2745  *  @dev : device pointer
2746  *  Description: this is the transmit function that is called on TSO frames
2747  *  (support available on GMAC4 and newer chips).
2748  *  Diagram below show the ring programming in case of TSO frames:
2749  *
2750  *  First Descriptor
2751  *   --------
2752  *   | DES0 |---> buffer1 = L2/L3/L4 header
2753  *   | DES1 |---> TCP Payload (can continue on next descr...)
2754  *   | DES2 |---> buffer 1 and 2 len
2755  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2756  *   --------
2757  *      |
2758  *     ...
2759  *      |
2760  *   --------
2761  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2762  *   | DES1 | --|
2763  *   | DES2 | --> buffer 1 and 2 len
2764  *   | DES3 |
2765  *   --------
2766  *
2767  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2768  */
2769 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2770 {
2771         struct dma_desc *desc, *first, *mss_desc = NULL;
2772         struct stmmac_priv *priv = netdev_priv(dev);
2773         int nfrags = skb_shinfo(skb)->nr_frags;
2774         u32 queue = skb_get_queue_mapping(skb);
2775         unsigned int first_entry, des;
2776         struct stmmac_tx_queue *tx_q;
2777         int tmp_pay_len = 0;
2778         u32 pay_len, mss;
2779         u8 proto_hdr_len;
2780         int i;
2781
2782         tx_q = &priv->tx_queue[queue];
2783
2784         /* Compute header lengths */
2785         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2786
2787         /* Desc availability based on threshold should be enough safe */
2788         if (unlikely(stmmac_tx_avail(priv, queue) <
2789                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2790                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2791                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2792                                                                 queue));
2793                         /* This is a hard error, log it. */
2794                         netdev_err(priv->dev,
2795                                    "%s: Tx Ring full when queue awake\n",
2796                                    __func__);
2797                 }
2798                 return NETDEV_TX_BUSY;
2799         }
2800
2801         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2802
2803         mss = skb_shinfo(skb)->gso_size;
2804
2805         /* set new MSS value if needed */
2806         if (mss != priv->mss) {
2807                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2808                 priv->hw->desc->set_mss(mss_desc, mss);
2809                 priv->mss = mss;
2810                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2811         }
2812
2813         if (netif_msg_tx_queued(priv)) {
2814                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2815                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2816                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2817                         skb->data_len);
2818         }
2819
2820         first_entry = tx_q->cur_tx;
2821
2822         desc = tx_q->dma_tx + first_entry;
2823         first = desc;
2824
2825         /* first descriptor: fill Headers on Buf1 */
2826         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2827                              DMA_TO_DEVICE);
2828         if (dma_mapping_error(priv->device, des))
2829                 goto dma_map_err;
2830
2831         tx_q->tx_skbuff_dma[first_entry].buf = des;
2832         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2833         tx_q->tx_skbuff[first_entry] = skb;
2834
2835         first->des0 = cpu_to_le32(des);
2836
2837         /* Fill start of payload in buff2 of first descriptor */
2838         if (pay_len)
2839                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2840
2841         /* If needed take extra descriptors to fill the remaining payload */
2842         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2843
2844         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2845
2846         /* Prepare fragments */
2847         for (i = 0; i < nfrags; i++) {
2848                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2849
2850                 des = skb_frag_dma_map(priv->device, frag, 0,
2851                                        skb_frag_size(frag),
2852                                        DMA_TO_DEVICE);
2853                 if (dma_mapping_error(priv->device, des))
2854                         goto dma_map_err;
2855
2856                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2857                                      (i == nfrags - 1), queue);
2858
2859                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2860                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2861                 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2862                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2863         }
2864
2865         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2866
2867         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2868
2869         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2870                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2871                           __func__);
2872                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2873         }
2874
2875         dev->stats.tx_bytes += skb->len;
2876         priv->xstats.tx_tso_frames++;
2877         priv->xstats.tx_tso_nfrags += nfrags;
2878
2879         /* Manage tx mitigation */
2880         priv->tx_count_frames += nfrags + 1;
2881         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2882                 mod_timer(&priv->txtimer,
2883                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2884         } else {
2885                 priv->tx_count_frames = 0;
2886                 priv->hw->desc->set_tx_ic(desc);
2887                 priv->xstats.tx_set_ic_bit++;
2888         }
2889
2890         skb_tx_timestamp(skb);
2891
2892         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2893                      priv->hwts_tx_en)) {
2894                 /* declare that device is doing timestamping */
2895                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2896                 priv->hw->desc->enable_tx_timestamp(first);
2897         }
2898
2899         /* Complete the first descriptor before granting the DMA */
2900         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2901                         proto_hdr_len,
2902                         pay_len,
2903                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2904                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2905
2906         /* If context desc is used to change MSS */
2907         if (mss_desc)
2908                 priv->hw->desc->set_tx_owner(mss_desc);
2909
2910         /* The own bit must be the latest setting done when prepare the
2911          * descriptor and then barrier is needed to make sure that
2912          * all is coherent before granting the DMA engine.
2913          */
2914         dma_wmb();
2915
2916         if (netif_msg_pktdata(priv)) {
2917                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2918                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2919                         tx_q->cur_tx, first, nfrags);
2920
2921                 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2922                                              0);
2923
2924                 pr_info(">>> frame to be transmitted: ");
2925                 print_pkt(skb->data, skb_headlen(skb));
2926         }
2927
2928         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2929
2930         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2931                                        queue);
2932
2933         return NETDEV_TX_OK;
2934
2935 dma_map_err:
2936         dev_err(priv->device, "Tx dma map failed\n");
2937         dev_kfree_skb(skb);
2938         priv->dev->stats.tx_dropped++;
2939         return NETDEV_TX_OK;
2940 }
2941
2942 /**
2943  *  stmmac_xmit - Tx entry point of the driver
2944  *  @skb : the socket buffer
2945  *  @dev : device pointer
2946  *  Description : this is the tx entry point of the driver.
2947  *  It programs the chain or the ring and supports oversized frames
2948  *  and SG feature.
2949  */
2950 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2951 {
2952         struct stmmac_priv *priv = netdev_priv(dev);
2953         unsigned int nopaged_len = skb_headlen(skb);
2954         int i, csum_insertion = 0, is_jumbo = 0;
2955         u32 queue = skb_get_queue_mapping(skb);
2956         int nfrags = skb_shinfo(skb)->nr_frags;
2957         int entry;
2958         unsigned int first_entry;
2959         struct dma_desc *desc, *first;
2960         struct stmmac_tx_queue *tx_q;
2961         unsigned int enh_desc;
2962         unsigned int des;
2963
2964         tx_q = &priv->tx_queue[queue];
2965
2966         /* Manage oversized TCP frames for GMAC4 device */
2967         if (skb_is_gso(skb) && priv->tso) {
2968                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
2969                         return stmmac_tso_xmit(skb, dev);
2970         }
2971
2972         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2973                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2974                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2975                                                                 queue));
2976                         /* This is a hard error, log it. */
2977                         netdev_err(priv->dev,
2978                                    "%s: Tx Ring full when queue awake\n",
2979                                    __func__);
2980                 }
2981                 return NETDEV_TX_BUSY;
2982         }
2983
2984         if (priv->tx_path_in_lpi_mode)
2985                 stmmac_disable_eee_mode(priv);
2986
2987         entry = tx_q->cur_tx;
2988         first_entry = entry;
2989
2990         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2991
2992         if (likely(priv->extend_desc))
2993                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2994         else
2995                 desc = tx_q->dma_tx + entry;
2996
2997         first = desc;
2998
2999         tx_q->tx_skbuff[first_entry] = skb;
3000
3001         enh_desc = priv->plat->enh_desc;
3002         /* To program the descriptors according to the size of the frame */
3003         if (enh_desc)
3004                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3005
3006         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3007                                          DWMAC_CORE_4_00)) {
3008                 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3009                 if (unlikely(entry < 0))
3010                         goto dma_map_err;
3011         }
3012
3013         for (i = 0; i < nfrags; i++) {
3014                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3015                 int len = skb_frag_size(frag);
3016                 bool last_segment = (i == (nfrags - 1));
3017
3018                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3019
3020                 if (likely(priv->extend_desc))
3021                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3022                 else
3023                         desc = tx_q->dma_tx + entry;
3024
3025                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3026                                        DMA_TO_DEVICE);
3027                 if (dma_mapping_error(priv->device, des))
3028                         goto dma_map_err; /* should reuse desc w/o issues */
3029
3030                 tx_q->tx_skbuff[entry] = NULL;
3031
3032                 tx_q->tx_skbuff_dma[entry].buf = des;
3033                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3034                         desc->des0 = cpu_to_le32(des);
3035                 else
3036                         desc->des2 = cpu_to_le32(des);
3037
3038                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3039                 tx_q->tx_skbuff_dma[entry].len = len;
3040                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3041
3042                 /* Prepare the descriptor and set the own bit too */
3043                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3044                                                 priv->mode, 1, last_segment,
3045                                                 skb->len);
3046         }
3047
3048         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3049
3050         tx_q->cur_tx = entry;
3051
3052         if (netif_msg_pktdata(priv)) {
3053                 void *tx_head;
3054
3055                 netdev_dbg(priv->dev,
3056                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3057                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3058                            entry, first, nfrags);
3059
3060                 if (priv->extend_desc)
3061                         tx_head = (void *)tx_q->dma_etx;
3062                 else
3063                         tx_head = (void *)tx_q->dma_tx;
3064
3065                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3066
3067                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3068                 print_pkt(skb->data, skb->len);
3069         }
3070
3071         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3072                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3073                           __func__);
3074                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3075         }
3076
3077         dev->stats.tx_bytes += skb->len;
3078
3079         /* According to the coalesce parameter the IC bit for the latest
3080          * segment is reset and the timer re-started to clean the tx status.
3081          * This approach takes care about the fragments: desc is the first
3082          * element in case of no SG.
3083          */
3084         priv->tx_count_frames += nfrags + 1;
3085         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3086                 mod_timer(&priv->txtimer,
3087                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3088         } else {
3089                 priv->tx_count_frames = 0;
3090                 priv->hw->desc->set_tx_ic(desc);
3091                 priv->xstats.tx_set_ic_bit++;
3092         }
3093
3094         skb_tx_timestamp(skb);
3095
3096         /* Ready to fill the first descriptor and set the OWN bit w/o any
3097          * problems because all the descriptors are actually ready to be
3098          * passed to the DMA engine.
3099          */
3100         if (likely(!is_jumbo)) {
3101                 bool last_segment = (nfrags == 0);
3102
3103                 des = dma_map_single(priv->device, skb->data,
3104                                      nopaged_len, DMA_TO_DEVICE);
3105                 if (dma_mapping_error(priv->device, des))
3106                         goto dma_map_err;
3107
3108                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3109                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3110                         first->des0 = cpu_to_le32(des);
3111                 else
3112                         first->des2 = cpu_to_le32(des);
3113
3114                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3115                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3116
3117                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3118                              priv->hwts_tx_en)) {
3119                         /* declare that device is doing timestamping */
3120                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3121                         priv->hw->desc->enable_tx_timestamp(first);
3122                 }
3123
3124                 /* Prepare the first descriptor setting the OWN bit too */
3125                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3126                                                 csum_insertion, priv->mode, 1,
3127                                                 last_segment, skb->len);
3128
3129                 /* The own bit must be the latest setting done when prepare the
3130                  * descriptor and then barrier is needed to make sure that
3131                  * all is coherent before granting the DMA engine.
3132                  */
3133                 dma_wmb();
3134         }
3135
3136         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3137
3138         if (priv->synopsys_id < DWMAC_CORE_4_00)
3139                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3140         else
3141                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3142                                                queue);
3143
3144         return NETDEV_TX_OK;
3145
3146 dma_map_err:
3147         netdev_err(priv->dev, "Tx DMA map failed\n");
3148         dev_kfree_skb(skb);
3149         priv->dev->stats.tx_dropped++;
3150         return NETDEV_TX_OK;
3151 }
3152
3153 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3154 {
3155         struct ethhdr *ehdr;
3156         u16 vlanid;
3157
3158         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3159             NETIF_F_HW_VLAN_CTAG_RX &&
3160             !__vlan_get_tag(skb, &vlanid)) {
3161                 /* pop the vlan tag */
3162                 ehdr = (struct ethhdr *)skb->data;
3163                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3164                 skb_pull(skb, VLAN_HLEN);
3165                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3166         }
3167 }
3168
3169
3170 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3171 {
3172         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3173                 return 0;
3174
3175         return 1;
3176 }
3177
3178 /**
3179  * stmmac_rx_refill - refill used skb preallocated buffers
3180  * @priv: driver private structure
3181  * @queue: RX queue index
3182  * Description : this is to reallocate the skb for the reception process
3183  * that is based on zero-copy.
3184  */
3185 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3186 {
3187         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3188         int dirty = stmmac_rx_dirty(priv, queue);
3189         unsigned int entry = rx_q->dirty_rx;
3190
3191         int bfsize = priv->dma_buf_sz;
3192
3193         while (dirty-- > 0) {
3194                 struct dma_desc *p;
3195
3196                 if (priv->extend_desc)
3197                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3198                 else
3199                         p = rx_q->dma_rx + entry;
3200
3201                 if (likely(!rx_q->rx_skbuff[entry])) {
3202                         struct sk_buff *skb;
3203
3204                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3205                         if (unlikely(!skb)) {
3206                                 /* so for a while no zero-copy! */
3207                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3208                                 if (unlikely(net_ratelimit()))
3209                                         dev_err(priv->device,
3210                                                 "fail to alloc skb entry %d\n",
3211                                                 entry);
3212                                 break;
3213                         }
3214
3215                         rx_q->rx_skbuff[entry] = skb;
3216                         rx_q->rx_skbuff_dma[entry] =
3217                             dma_map_single(priv->device, skb->data, bfsize,
3218                                            DMA_FROM_DEVICE);
3219                         if (dma_mapping_error(priv->device,
3220                                               rx_q->rx_skbuff_dma[entry])) {
3221                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3222                                 dev_kfree_skb(skb);
3223                                 break;
3224                         }
3225
3226                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3227                                 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3228                                 p->des1 = 0;
3229                         } else {
3230                                 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3231                         }
3232                         if (priv->hw->mode->refill_desc3)
3233                                 priv->hw->mode->refill_desc3(rx_q, p);
3234
3235                         if (rx_q->rx_zeroc_thresh > 0)
3236                                 rx_q->rx_zeroc_thresh--;
3237
3238                         netif_dbg(priv, rx_status, priv->dev,
3239                                   "refill entry #%d\n", entry);
3240                 }
3241                 dma_wmb();
3242
3243                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3244                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3245                 else
3246                         priv->hw->desc->set_rx_owner(p);
3247
3248                 dma_wmb();
3249
3250                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3251         }
3252         rx_q->dirty_rx = entry;
3253 }
3254
3255 /**
3256  * stmmac_rx - manage the receive process
3257  * @priv: driver private structure
3258  * @limit: napi bugget
3259  * @queue: RX queue index.
3260  * Description :  this the function called by the napi poll method.
3261  * It gets all the frames inside the ring.
3262  */
3263 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3264 {
3265         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3266         unsigned int entry = rx_q->cur_rx;
3267         int coe = priv->hw->rx_csum;
3268         unsigned int next_entry;
3269         unsigned int count = 0;
3270
3271         if (netif_msg_rx_status(priv)) {
3272                 void *rx_head;
3273
3274                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3275                 if (priv->extend_desc)
3276                         rx_head = (void *)rx_q->dma_erx;
3277                 else
3278                         rx_head = (void *)rx_q->dma_rx;
3279
3280                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3281         }
3282         while (count < limit) {
3283                 int status;
3284                 struct dma_desc *p;
3285                 struct dma_desc *np;
3286
3287                 if (priv->extend_desc)
3288                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3289                 else
3290                         p = rx_q->dma_rx + entry;
3291
3292                 /* read the status of the incoming frame */
3293                 status = priv->hw->desc->rx_status(&priv->dev->stats,
3294                                                    &priv->xstats, p);
3295                 /* check if managed by the DMA otherwise go ahead */
3296                 if (unlikely(status & dma_own))
3297                         break;
3298
3299                 count++;
3300
3301                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3302                 next_entry = rx_q->cur_rx;
3303
3304                 if (priv->extend_desc)
3305                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3306                 else
3307                         np = rx_q->dma_rx + next_entry;
3308
3309                 prefetch(np);
3310
3311                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3312                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
3313                                                            &priv->xstats,
3314                                                            rx_q->dma_erx +
3315                                                            entry);
3316                 if (unlikely(status == discard_frame)) {
3317                         priv->dev->stats.rx_errors++;
3318                         if (priv->hwts_rx_en && !priv->extend_desc) {
3319                                 /* DESC2 & DESC3 will be overwritten by device
3320                                  * with timestamp value, hence reinitialize
3321                                  * them in stmmac_rx_refill() function so that
3322                                  * device can reuse it.
3323                                  */
3324                                 rx_q->rx_skbuff[entry] = NULL;
3325                                 dma_unmap_single(priv->device,
3326                                                  rx_q->rx_skbuff_dma[entry],
3327                                                  priv->dma_buf_sz,
3328                                                  DMA_FROM_DEVICE);
3329                         }
3330                 } else {
3331                         struct sk_buff *skb;
3332                         int frame_len;
3333                         unsigned int des;
3334
3335                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3336                                 des = le32_to_cpu(p->des0);
3337                         else
3338                                 des = le32_to_cpu(p->des2);
3339
3340                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3341
3342                         /*  If frame length is greater than skb buffer size
3343                          *  (preallocated during init) then the packet is
3344                          *  ignored
3345                          */
3346                         if (frame_len > priv->dma_buf_sz) {
3347                                 netdev_err(priv->dev,
3348                                            "len %d larger than size (%d)\n",
3349                                            frame_len, priv->dma_buf_sz);
3350                                 priv->dev->stats.rx_length_errors++;
3351                                 break;
3352                         }
3353
3354                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3355                          * Type frames (LLC/LLC-SNAP)
3356                          */
3357                         if (unlikely(status != llc_snap))
3358                                 frame_len -= ETH_FCS_LEN;
3359
3360                         if (netif_msg_rx_status(priv)) {
3361                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3362                                            p, entry, des);
3363                                 if (frame_len > ETH_FRAME_LEN)
3364                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3365                                                    frame_len, status);
3366                         }
3367
3368                         /* The zero-copy is always used for all the sizes
3369                          * in case of GMAC4 because it needs
3370                          * to refill the used descriptors, always.
3371                          */
3372                         if (unlikely(!priv->plat->has_gmac4 &&
3373                                      ((frame_len < priv->rx_copybreak) ||
3374                                      stmmac_rx_threshold_count(rx_q)))) {
3375                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3376                                                                 frame_len);
3377                                 if (unlikely(!skb)) {
3378                                         if (net_ratelimit())
3379                                                 dev_warn(priv->device,
3380                                                          "packet dropped\n");
3381                                         priv->dev->stats.rx_dropped++;
3382                                         break;
3383                                 }
3384
3385                                 dma_sync_single_for_cpu(priv->device,
3386                                                         rx_q->rx_skbuff_dma
3387                                                         [entry], frame_len,
3388                                                         DMA_FROM_DEVICE);
3389                                 skb_copy_to_linear_data(skb,
3390                                                         rx_q->
3391                                                         rx_skbuff[entry]->data,
3392                                                         frame_len);
3393
3394                                 skb_put(skb, frame_len);
3395                                 dma_sync_single_for_device(priv->device,
3396                                                            rx_q->rx_skbuff_dma
3397                                                            [entry], frame_len,
3398                                                            DMA_FROM_DEVICE);
3399                         } else {
3400                                 skb = rx_q->rx_skbuff[entry];
3401                                 if (unlikely(!skb)) {
3402                                         netdev_err(priv->dev,
3403                                                    "%s: Inconsistent Rx chain\n",
3404                                                    priv->dev->name);
3405                                         priv->dev->stats.rx_dropped++;
3406                                         break;
3407                                 }
3408                                 prefetch(skb->data - NET_IP_ALIGN);
3409                                 rx_q->rx_skbuff[entry] = NULL;
3410                                 rx_q->rx_zeroc_thresh++;
3411
3412                                 skb_put(skb, frame_len);
3413                                 dma_unmap_single(priv->device,
3414                                                  rx_q->rx_skbuff_dma[entry],
3415                                                  priv->dma_buf_sz,
3416                                                  DMA_FROM_DEVICE);
3417                         }
3418
3419                         if (netif_msg_pktdata(priv)) {
3420                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3421                                            frame_len);
3422                                 print_pkt(skb->data, frame_len);
3423                         }
3424
3425                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3426
3427                         stmmac_rx_vlan(priv->dev, skb);
3428
3429                         skb->protocol = eth_type_trans(skb, priv->dev);
3430
3431                         if (unlikely(!coe))
3432                                 skb_checksum_none_assert(skb);
3433                         else
3434                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3435
3436                         napi_gro_receive(&rx_q->napi, skb);
3437
3438                         priv->dev->stats.rx_packets++;
3439                         priv->dev->stats.rx_bytes += frame_len;
3440                 }
3441                 entry = next_entry;
3442         }
3443
3444         stmmac_rx_refill(priv, queue);
3445
3446         priv->xstats.rx_pkt_n += count;
3447
3448         return count;
3449 }
3450
3451 /**
3452  *  stmmac_poll - stmmac poll method (NAPI)
3453  *  @napi : pointer to the napi structure.
3454  *  @budget : maximum number of packets that the current CPU can receive from
3455  *            all interfaces.
3456  *  Description :
3457  *  To look at the incoming frames and clear the tx resources.
3458  */
3459 static int stmmac_poll(struct napi_struct *napi, int budget)
3460 {
3461         struct stmmac_rx_queue *rx_q =
3462                 container_of(napi, struct stmmac_rx_queue, napi);
3463         struct stmmac_priv *priv = rx_q->priv_data;
3464         u32 tx_count = priv->plat->tx_queues_to_use;
3465         u32 chan = rx_q->queue_index;
3466         int work_done = 0;
3467         u32 queue;
3468
3469         priv->xstats.napi_poll++;
3470
3471         /* check all the queues */
3472         for (queue = 0; queue < tx_count; queue++)
3473                 stmmac_tx_clean(priv, queue);
3474
3475         work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3476         if (work_done < budget) {
3477                 napi_complete_done(napi, work_done);
3478                 stmmac_enable_dma_irq(priv, chan);
3479         }
3480         return work_done;
3481 }
3482
3483 /**
3484  *  stmmac_tx_timeout
3485  *  @dev : Pointer to net device structure
3486  *  Description: this function is called when a packet transmission fails to
3487  *   complete within a reasonable time. The driver will mark the error in the
3488  *   netdev structure and arrange for the device to be reset to a sane state
3489  *   in order to transmit a new packet.
3490  */
3491 static void stmmac_tx_timeout(struct net_device *dev)
3492 {
3493         struct stmmac_priv *priv = netdev_priv(dev);
3494         u32 tx_count = priv->plat->tx_queues_to_use;
3495         u32 chan;
3496
3497         /* Clear Tx resources and restart transmitting again */
3498         for (chan = 0; chan < tx_count; chan++)
3499                 stmmac_tx_err(priv, chan);
3500 }
3501
3502 /**
3503  *  stmmac_set_rx_mode - entry point for multicast addressing
3504  *  @dev : pointer to the device structure
3505  *  Description:
3506  *  This function is a driver entry point which gets called by the kernel
3507  *  whenever multicast addresses must be enabled/disabled.
3508  *  Return value:
3509  *  void.
3510  */
3511 static void stmmac_set_rx_mode(struct net_device *dev)
3512 {
3513         struct stmmac_priv *priv = netdev_priv(dev);
3514
3515         priv->hw->mac->set_filter(priv->hw, dev);
3516 }
3517
3518 /**
3519  *  stmmac_change_mtu - entry point to change MTU size for the device.
3520  *  @dev : device pointer.
3521  *  @new_mtu : the new MTU size for the device.
3522  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3523  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3524  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3525  *  Return value:
3526  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3527  *  file on failure.
3528  */
3529 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3530 {
3531         struct stmmac_priv *priv = netdev_priv(dev);
3532
3533         if (netif_running(dev)) {
3534                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3535                 return -EBUSY;
3536         }
3537
3538         dev->mtu = new_mtu;
3539
3540         netdev_update_features(dev);
3541
3542         return 0;
3543 }
3544
3545 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3546                                              netdev_features_t features)
3547 {
3548         struct stmmac_priv *priv = netdev_priv(dev);
3549
3550         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3551                 features &= ~NETIF_F_RXCSUM;
3552
3553         if (!priv->plat->tx_coe)
3554                 features &= ~NETIF_F_CSUM_MASK;
3555
3556         /* Some GMAC devices have a bugged Jumbo frame support that
3557          * needs to have the Tx COE disabled for oversized frames
3558          * (due to limited buffer sizes). In this case we disable
3559          * the TX csum insertion in the TDES and not use SF.
3560          */
3561         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3562                 features &= ~NETIF_F_CSUM_MASK;
3563
3564         /* Disable tso if asked by ethtool */
3565         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3566                 if (features & NETIF_F_TSO)
3567                         priv->tso = true;
3568                 else
3569                         priv->tso = false;
3570         }
3571
3572         return features;
3573 }
3574
3575 static int stmmac_set_features(struct net_device *netdev,
3576                                netdev_features_t features)
3577 {
3578         struct stmmac_priv *priv = netdev_priv(netdev);
3579
3580         /* Keep the COE Type in case of csum is supporting */
3581         if (features & NETIF_F_RXCSUM)
3582                 priv->hw->rx_csum = priv->plat->rx_coe;
3583         else
3584                 priv->hw->rx_csum = 0;
3585         /* No check needed because rx_coe has been set before and it will be
3586          * fixed in case of issue.
3587          */
3588         priv->hw->mac->rx_ipc(priv->hw);
3589
3590         return 0;
3591 }
3592
3593 /**
3594  *  stmmac_interrupt - main ISR
3595  *  @irq: interrupt number.
3596  *  @dev_id: to pass the net device pointer.
3597  *  Description: this is the main driver interrupt service routine.
3598  *  It can call:
3599  *  o DMA service routine (to manage incoming frame reception and transmission
3600  *    status)
3601  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3602  *    interrupts.
3603  */
3604 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3605 {
3606         struct net_device *dev = (struct net_device *)dev_id;
3607         struct stmmac_priv *priv = netdev_priv(dev);
3608         u32 rx_cnt = priv->plat->rx_queues_to_use;
3609         u32 tx_cnt = priv->plat->tx_queues_to_use;
3610         u32 queues_count;
3611         u32 queue;
3612
3613         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3614
3615         if (priv->irq_wake)
3616                 pm_wakeup_event(priv->device, 0);
3617
3618         if (unlikely(!dev)) {
3619                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3620                 return IRQ_NONE;
3621         }
3622
3623         /* To handle GMAC own interrupts */
3624         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3625                 int status = priv->hw->mac->host_irq_status(priv->hw,
3626                                                             &priv->xstats);
3627
3628                 if (unlikely(status)) {
3629                         /* For LPI we need to save the tx status */
3630                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3631                                 priv->tx_path_in_lpi_mode = true;
3632                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3633                                 priv->tx_path_in_lpi_mode = false;
3634                 }
3635
3636                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3637                         for (queue = 0; queue < queues_count; queue++) {
3638                                 struct stmmac_rx_queue *rx_q =
3639                                 &priv->rx_queue[queue];
3640
3641                                 status |=
3642                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3643                                                                    queue);
3644
3645                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3646                                     priv->hw->dma->set_rx_tail_ptr)
3647                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3648                                                                 rx_q->rx_tail_addr,
3649                                                                 queue);
3650                         }
3651                 }
3652
3653                 /* PCS link status */
3654                 if (priv->hw->pcs) {
3655                         if (priv->xstats.pcs_link)
3656                                 netif_carrier_on(dev);
3657                         else
3658                                 netif_carrier_off(dev);
3659                 }
3660         }
3661
3662         /* To handle DMA interrupts */
3663         stmmac_dma_interrupt(priv);
3664
3665         return IRQ_HANDLED;
3666 }
3667
3668 #ifdef CONFIG_NET_POLL_CONTROLLER
3669 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3670  * to allow network I/O with interrupts disabled.
3671  */
3672 static void stmmac_poll_controller(struct net_device *dev)
3673 {
3674         disable_irq(dev->irq);
3675         stmmac_interrupt(dev->irq, dev);
3676         enable_irq(dev->irq);
3677 }
3678 #endif
3679
3680 /**
3681  *  stmmac_ioctl - Entry point for the Ioctl
3682  *  @dev: Device pointer.
3683  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3684  *  a proprietary structure used to pass information to the driver.
3685  *  @cmd: IOCTL command
3686  *  Description:
3687  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3688  */
3689 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3690 {
3691         int ret = -EOPNOTSUPP;
3692
3693         if (!netif_running(dev))
3694                 return -EINVAL;
3695
3696         switch (cmd) {
3697         case SIOCGMIIPHY:
3698         case SIOCGMIIREG:
3699         case SIOCSMIIREG:
3700                 if (!dev->phydev)
3701                         return -EINVAL;
3702                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3703                 break;
3704         case SIOCSHWTSTAMP:
3705                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3706                 break;
3707         default:
3708                 break;
3709         }
3710
3711         return ret;
3712 }
3713
3714 #ifdef CONFIG_DEBUG_FS
3715 static struct dentry *stmmac_fs_dir;
3716
3717 static void sysfs_display_ring(void *head, int size, int extend_desc,
3718                                struct seq_file *seq)
3719 {
3720         int i;
3721         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3722         struct dma_desc *p = (struct dma_desc *)head;
3723
3724         for (i = 0; i < size; i++) {
3725                 if (extend_desc) {
3726                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3727                                    i, (unsigned int)virt_to_phys(ep),
3728                                    le32_to_cpu(ep->basic.des0),
3729                                    le32_to_cpu(ep->basic.des1),
3730                                    le32_to_cpu(ep->basic.des2),
3731                                    le32_to_cpu(ep->basic.des3));
3732                         ep++;
3733                 } else {
3734                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3735                                    i, (unsigned int)virt_to_phys(p),
3736                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3737                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3738                         p++;
3739                 }
3740                 seq_printf(seq, "\n");
3741         }
3742 }
3743
3744 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3745 {
3746         struct net_device *dev = seq->private;
3747         struct stmmac_priv *priv = netdev_priv(dev);
3748         u32 rx_count = priv->plat->rx_queues_to_use;
3749         u32 tx_count = priv->plat->tx_queues_to_use;
3750         u32 queue;
3751
3752         for (queue = 0; queue < rx_count; queue++) {
3753                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3754
3755                 seq_printf(seq, "RX Queue %d:\n", queue);
3756
3757                 if (priv->extend_desc) {
3758                         seq_printf(seq, "Extended descriptor ring:\n");
3759                         sysfs_display_ring((void *)rx_q->dma_erx,
3760                                            DMA_RX_SIZE, 1, seq);
3761                 } else {
3762                         seq_printf(seq, "Descriptor ring:\n");
3763                         sysfs_display_ring((void *)rx_q->dma_rx,
3764                                            DMA_RX_SIZE, 0, seq);
3765                 }
3766         }
3767
3768         for (queue = 0; queue < tx_count; queue++) {
3769                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3770
3771                 seq_printf(seq, "TX Queue %d:\n", queue);
3772
3773                 if (priv->extend_desc) {
3774                         seq_printf(seq, "Extended descriptor ring:\n");
3775                         sysfs_display_ring((void *)tx_q->dma_etx,
3776                                            DMA_TX_SIZE, 1, seq);
3777                 } else {
3778                         seq_printf(seq, "Descriptor ring:\n");
3779                         sysfs_display_ring((void *)tx_q->dma_tx,
3780                                            DMA_TX_SIZE, 0, seq);
3781                 }
3782         }
3783
3784         return 0;
3785 }
3786
3787 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3788 {
3789         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3790 }
3791
3792 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3793
3794 static const struct file_operations stmmac_rings_status_fops = {
3795         .owner = THIS_MODULE,
3796         .open = stmmac_sysfs_ring_open,
3797         .read = seq_read,
3798         .llseek = seq_lseek,
3799         .release = single_release,
3800 };
3801
3802 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3803 {
3804         struct net_device *dev = seq->private;
3805         struct stmmac_priv *priv = netdev_priv(dev);
3806
3807         if (!priv->hw_cap_support) {
3808                 seq_printf(seq, "DMA HW features not supported\n");
3809                 return 0;
3810         }
3811
3812         seq_printf(seq, "==============================\n");
3813         seq_printf(seq, "\tDMA HW features\n");
3814         seq_printf(seq, "==============================\n");
3815
3816         seq_printf(seq, "\t10/100 Mbps: %s\n",
3817                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3818         seq_printf(seq, "\t1000 Mbps: %s\n",
3819                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3820         seq_printf(seq, "\tHalf duplex: %s\n",
3821                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3822         seq_printf(seq, "\tHash Filter: %s\n",
3823                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3824         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3825                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3826         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3827                    (priv->dma_cap.pcs) ? "Y" : "N");
3828         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3829                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3830         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3831                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3832         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3833                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3834         seq_printf(seq, "\tRMON module: %s\n",
3835                    (priv->dma_cap.rmon) ? "Y" : "N");
3836         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3837                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3838         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3839                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3840         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3841                    (priv->dma_cap.eee) ? "Y" : "N");
3842         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3843         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3844                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3845         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3846                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3847                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3848         } else {
3849                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3850                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3851                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3852                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3853         }
3854         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3855                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3856         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3857                    priv->dma_cap.number_rx_channel);
3858         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3859                    priv->dma_cap.number_tx_channel);
3860         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3861                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3862
3863         return 0;
3864 }
3865
3866 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3867 {
3868         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3869 }
3870
3871 static const struct file_operations stmmac_dma_cap_fops = {
3872         .owner = THIS_MODULE,
3873         .open = stmmac_sysfs_dma_cap_open,
3874         .read = seq_read,
3875         .llseek = seq_lseek,
3876         .release = single_release,
3877 };
3878
3879 static int stmmac_init_fs(struct net_device *dev)
3880 {
3881         struct stmmac_priv *priv = netdev_priv(dev);
3882
3883         /* Create per netdev entries */
3884         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3885
3886         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3887                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3888
3889                 return -ENOMEM;
3890         }
3891
3892         /* Entry to report DMA RX/TX rings */
3893         priv->dbgfs_rings_status =
3894                 debugfs_create_file("descriptors_status", S_IRUGO,
3895                                     priv->dbgfs_dir, dev,
3896                                     &stmmac_rings_status_fops);
3897
3898         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3899                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3900                 debugfs_remove_recursive(priv->dbgfs_dir);
3901
3902                 return -ENOMEM;
3903         }
3904
3905         /* Entry to report the DMA HW features */
3906         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3907                                             priv->dbgfs_dir,
3908                                             dev, &stmmac_dma_cap_fops);
3909
3910         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3911                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3912                 debugfs_remove_recursive(priv->dbgfs_dir);
3913
3914                 return -ENOMEM;
3915         }
3916
3917         return 0;
3918 }
3919
3920 static void stmmac_exit_fs(struct net_device *dev)
3921 {
3922         struct stmmac_priv *priv = netdev_priv(dev);
3923
3924         debugfs_remove_recursive(priv->dbgfs_dir);
3925 }
3926 #endif /* CONFIG_DEBUG_FS */
3927
3928 static const struct net_device_ops stmmac_netdev_ops = {
3929         .ndo_open = stmmac_open,
3930         .ndo_start_xmit = stmmac_xmit,
3931         .ndo_stop = stmmac_release,
3932         .ndo_change_mtu = stmmac_change_mtu,
3933         .ndo_fix_features = stmmac_fix_features,
3934         .ndo_set_features = stmmac_set_features,
3935         .ndo_set_rx_mode = stmmac_set_rx_mode,
3936         .ndo_tx_timeout = stmmac_tx_timeout,
3937         .ndo_do_ioctl = stmmac_ioctl,
3938 #ifdef CONFIG_NET_POLL_CONTROLLER
3939         .ndo_poll_controller = stmmac_poll_controller,
3940 #endif
3941         .ndo_set_mac_address = eth_mac_addr,
3942 };
3943
3944 /**
3945  *  stmmac_hw_init - Init the MAC device
3946  *  @priv: driver private structure
3947  *  Description: this function is to configure the MAC device according to
3948  *  some platform parameters or the HW capability register. It prepares the
3949  *  driver to use either ring or chain modes and to setup either enhanced or
3950  *  normal descriptors.
3951  */
3952 static int stmmac_hw_init(struct stmmac_priv *priv)
3953 {
3954         struct mac_device_info *mac;
3955
3956         /* Identify the MAC HW device */
3957         if (priv->plat->setup) {
3958                 mac = priv->plat->setup(priv);
3959         } else if (priv->plat->has_gmac) {
3960                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3961                 mac = dwmac1000_setup(priv->ioaddr,
3962                                       priv->plat->multicast_filter_bins,
3963                                       priv->plat->unicast_filter_entries,
3964                                       &priv->synopsys_id);
3965         } else if (priv->plat->has_gmac4) {
3966                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3967                 mac = dwmac4_setup(priv->ioaddr,
3968                                    priv->plat->multicast_filter_bins,
3969                                    priv->plat->unicast_filter_entries,
3970                                    &priv->synopsys_id);
3971         } else {
3972                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3973         }
3974         if (!mac)
3975                 return -ENOMEM;
3976
3977         priv->hw = mac;
3978
3979         /* dwmac-sun8i only work in chain mode */
3980         if (priv->plat->has_sun8i)
3981                 chain_mode = 1;
3982
3983         /* To use the chained or ring mode */
3984         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3985                 priv->hw->mode = &dwmac4_ring_mode_ops;
3986         } else {
3987                 if (chain_mode) {
3988                         priv->hw->mode = &chain_mode_ops;
3989                         dev_info(priv->device, "Chain mode enabled\n");
3990                         priv->mode = STMMAC_CHAIN_MODE;
3991                 } else {
3992                         priv->hw->mode = &ring_mode_ops;
3993                         dev_info(priv->device, "Ring mode enabled\n");
3994                         priv->mode = STMMAC_RING_MODE;
3995                 }
3996         }
3997
3998         /* Get the HW capability (new GMAC newer than 3.50a) */
3999         priv->hw_cap_support = stmmac_get_hw_features(priv);
4000         if (priv->hw_cap_support) {
4001                 dev_info(priv->device, "DMA HW capability register supported\n");
4002
4003                 /* We can override some gmac/dma configuration fields: e.g.
4004                  * enh_desc, tx_coe (e.g. that are passed through the
4005                  * platform) with the values from the HW capability
4006                  * register (if supported).
4007                  */
4008                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4009                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4010                 priv->hw->pmt = priv->plat->pmt;
4011
4012                 /* TXCOE doesn't work in thresh DMA mode */
4013                 if (priv->plat->force_thresh_dma_mode)
4014                         priv->plat->tx_coe = 0;
4015                 else
4016                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4017
4018                 /* In case of GMAC4 rx_coe is from HW cap register. */
4019                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4020
4021                 if (priv->dma_cap.rx_coe_type2)
4022                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4023                 else if (priv->dma_cap.rx_coe_type1)
4024                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4025
4026         } else {
4027                 dev_info(priv->device, "No HW DMA feature register supported\n");
4028         }
4029
4030         /* To use alternate (extended), normal or GMAC4 descriptor structures */
4031         if (priv->synopsys_id >= DWMAC_CORE_4_00)
4032                 priv->hw->desc = &dwmac4_desc_ops;
4033         else
4034                 stmmac_selec_desc_mode(priv);
4035
4036         if (priv->plat->rx_coe) {
4037                 priv->hw->rx_csum = priv->plat->rx_coe;
4038                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4039                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4040                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4041         }
4042         if (priv->plat->tx_coe)
4043                 dev_info(priv->device, "TX Checksum insertion supported\n");
4044
4045         if (priv->plat->pmt) {
4046                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4047                 device_set_wakeup_capable(priv->device, 1);
4048         }
4049
4050         if (priv->dma_cap.tsoen)
4051                 dev_info(priv->device, "TSO supported\n");
4052
4053         return 0;
4054 }
4055
4056 /**
4057  * stmmac_dvr_probe
4058  * @device: device pointer
4059  * @plat_dat: platform data pointer
4060  * @res: stmmac resource pointer
4061  * Description: this is the main probe function used to
4062  * call the alloc_etherdev, allocate the priv structure.
4063  * Return:
4064  * returns 0 on success, otherwise errno.
4065  */
4066 int stmmac_dvr_probe(struct device *device,
4067                      struct plat_stmmacenet_data *plat_dat,
4068                      struct stmmac_resources *res)
4069 {
4070         struct net_device *ndev = NULL;
4071         struct stmmac_priv *priv;
4072         int ret = 0;
4073         u32 queue;
4074
4075         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4076                                   MTL_MAX_TX_QUEUES,
4077                                   MTL_MAX_RX_QUEUES);
4078         if (!ndev)
4079                 return -ENOMEM;
4080
4081         SET_NETDEV_DEV(ndev, device);
4082
4083         priv = netdev_priv(ndev);
4084         priv->device = device;
4085         priv->dev = ndev;
4086
4087         stmmac_set_ethtool_ops(ndev);
4088         priv->pause = pause;
4089         priv->plat = plat_dat;
4090         priv->ioaddr = res->addr;
4091         priv->dev->base_addr = (unsigned long)res->addr;
4092
4093         priv->dev->irq = res->irq;
4094         priv->wol_irq = res->wol_irq;
4095         priv->lpi_irq = res->lpi_irq;
4096
4097         if (res->mac)
4098                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4099
4100         dev_set_drvdata(device, priv->dev);
4101
4102         /* Verify driver arguments */
4103         stmmac_verify_args();
4104
4105         /* Override with kernel parameters if supplied XXX CRS XXX
4106          * this needs to have multiple instances
4107          */
4108         if ((phyaddr >= 0) && (phyaddr <= 31))
4109                 priv->plat->phy_addr = phyaddr;
4110
4111         if (priv->plat->stmmac_rst)
4112                 reset_control_deassert(priv->plat->stmmac_rst);
4113
4114         /* Init MAC and get the capabilities */
4115         ret = stmmac_hw_init(priv);
4116         if (ret)
4117                 goto error_hw_init;
4118
4119         /* Configure real RX and TX queues */
4120         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4121         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4122
4123         ndev->netdev_ops = &stmmac_netdev_ops;
4124
4125         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4126                             NETIF_F_RXCSUM;
4127
4128         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4129                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4130                 priv->tso = true;
4131                 dev_info(priv->device, "TSO feature enabled\n");
4132         }
4133         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4134         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4135 #ifdef STMMAC_VLAN_TAG_USED
4136         /* Both mac100 and gmac support receive VLAN tag detection */
4137         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4138 #endif
4139         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4140
4141         /* MTU range: 46 - hw-specific max */
4142         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4143         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4144                 ndev->max_mtu = JUMBO_LEN;
4145         else
4146                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4147         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4148          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4149          */
4150         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4151             (priv->plat->maxmtu >= ndev->min_mtu))
4152                 ndev->max_mtu = priv->plat->maxmtu;
4153         else if (priv->plat->maxmtu < ndev->min_mtu)
4154                 dev_warn(priv->device,
4155                          "%s: warning: maxmtu having invalid value (%d)\n",
4156                          __func__, priv->plat->maxmtu);
4157
4158         if (flow_ctrl)
4159                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4160
4161         /* Rx Watchdog is available in the COREs newer than the 3.40.
4162          * In some case, for example on bugged HW this feature
4163          * has to be disable and this can be done by passing the
4164          * riwt_off field from the platform.
4165          */
4166         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4167                 priv->use_riwt = 1;
4168                 dev_info(priv->device,
4169                          "Enable RX Mitigation via HW Watchdog Timer\n");
4170         }
4171
4172         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4173                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4174
4175                 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4176                                (8 * priv->plat->rx_queues_to_use));
4177         }
4178
4179         spin_lock_init(&priv->lock);
4180
4181         /* If a specific clk_csr value is passed from the platform
4182          * this means that the CSR Clock Range selection cannot be
4183          * changed at run-time and it is fixed. Viceversa the driver'll try to
4184          * set the MDC clock dynamically according to the csr actual
4185          * clock input.
4186          */
4187         if (!priv->plat->clk_csr)
4188                 stmmac_clk_csr_set(priv);
4189         else
4190                 priv->clk_csr = priv->plat->clk_csr;
4191
4192         stmmac_check_pcs_mode(priv);
4193
4194         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4195             priv->hw->pcs != STMMAC_PCS_TBI &&
4196             priv->hw->pcs != STMMAC_PCS_RTBI) {
4197                 /* MDIO bus Registration */
4198                 ret = stmmac_mdio_register(ndev);
4199                 if (ret < 0) {
4200                         dev_err(priv->device,
4201                                 "%s: MDIO bus (id: %d) registration failed",
4202                                 __func__, priv->plat->bus_id);
4203                         goto error_mdio_register;
4204                 }
4205         }
4206
4207         ret = register_netdev(ndev);
4208         if (ret) {
4209                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4210                         __func__, ret);
4211                 goto error_netdev_register;
4212         }
4213
4214         return ret;
4215
4216 error_netdev_register:
4217         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4218             priv->hw->pcs != STMMAC_PCS_TBI &&
4219             priv->hw->pcs != STMMAC_PCS_RTBI)
4220                 stmmac_mdio_unregister(ndev);
4221 error_mdio_register:
4222         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4223                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4224
4225                 netif_napi_del(&rx_q->napi);
4226         }
4227 error_hw_init:
4228         free_netdev(ndev);
4229
4230         return ret;
4231 }
4232 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4233
4234 /**
4235  * stmmac_dvr_remove
4236  * @dev: device pointer
4237  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4238  * changes the link status, releases the DMA descriptor rings.
4239  */
4240 int stmmac_dvr_remove(struct device *dev)
4241 {
4242         struct net_device *ndev = dev_get_drvdata(dev);
4243         struct stmmac_priv *priv = netdev_priv(ndev);
4244
4245         netdev_info(priv->dev, "%s: removing driver", __func__);
4246
4247         stmmac_stop_all_dma(priv);
4248
4249         priv->hw->mac->set_mac(priv->ioaddr, false);
4250         netif_carrier_off(ndev);
4251         unregister_netdev(ndev);
4252         if (priv->plat->stmmac_rst)
4253                 reset_control_assert(priv->plat->stmmac_rst);
4254         clk_disable_unprepare(priv->plat->pclk);
4255         clk_disable_unprepare(priv->plat->stmmac_clk);
4256         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4257             priv->hw->pcs != STMMAC_PCS_TBI &&
4258             priv->hw->pcs != STMMAC_PCS_RTBI)
4259                 stmmac_mdio_unregister(ndev);
4260         free_netdev(ndev);
4261
4262         return 0;
4263 }
4264 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4265
4266 /**
4267  * stmmac_suspend - suspend callback
4268  * @dev: device pointer
4269  * Description: this is the function to suspend the device and it is called
4270  * by the platform driver to stop the network queue, release the resources,
4271  * program the PMT register (for WoL), clean and release driver resources.
4272  */
4273 int stmmac_suspend(struct device *dev)
4274 {
4275         struct net_device *ndev = dev_get_drvdata(dev);
4276         struct stmmac_priv *priv = netdev_priv(ndev);
4277         unsigned long flags;
4278
4279         if (!ndev || !netif_running(ndev))
4280                 return 0;
4281
4282         if (ndev->phydev)
4283                 phy_stop(ndev->phydev);
4284
4285         spin_lock_irqsave(&priv->lock, flags);
4286
4287         netif_device_detach(ndev);
4288         stmmac_stop_all_queues(priv);
4289
4290         stmmac_disable_all_queues(priv);
4291
4292         /* Stop TX/RX DMA */
4293         stmmac_stop_all_dma(priv);
4294
4295         /* Enable Power down mode by programming the PMT regs */
4296         if (device_may_wakeup(priv->device)) {
4297                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4298                 priv->irq_wake = 1;
4299         } else {
4300                 priv->hw->mac->set_mac(priv->ioaddr, false);
4301                 pinctrl_pm_select_sleep_state(priv->device);
4302                 /* Disable clock in case of PWM is off */
4303                 clk_disable(priv->plat->pclk);
4304                 clk_disable(priv->plat->stmmac_clk);
4305         }
4306         spin_unlock_irqrestore(&priv->lock, flags);
4307
4308         priv->oldlink = false;
4309         priv->speed = SPEED_UNKNOWN;
4310         priv->oldduplex = DUPLEX_UNKNOWN;
4311         return 0;
4312 }
4313 EXPORT_SYMBOL_GPL(stmmac_suspend);
4314
4315 /**
4316  * stmmac_reset_queues_param - reset queue parameters
4317  * @dev: device pointer
4318  */
4319 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4320 {
4321         u32 rx_cnt = priv->plat->rx_queues_to_use;
4322         u32 tx_cnt = priv->plat->tx_queues_to_use;
4323         u32 queue;
4324
4325         for (queue = 0; queue < rx_cnt; queue++) {
4326                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4327
4328                 rx_q->cur_rx = 0;
4329                 rx_q->dirty_rx = 0;
4330         }
4331
4332         for (queue = 0; queue < tx_cnt; queue++) {
4333                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4334
4335                 tx_q->cur_tx = 0;
4336                 tx_q->dirty_tx = 0;
4337         }
4338 }
4339
4340 /**
4341  * stmmac_resume - resume callback
4342  * @dev: device pointer
4343  * Description: when resume this function is invoked to setup the DMA and CORE
4344  * in a usable state.
4345  */
4346 int stmmac_resume(struct device *dev)
4347 {
4348         struct net_device *ndev = dev_get_drvdata(dev);
4349         struct stmmac_priv *priv = netdev_priv(ndev);
4350         unsigned long flags;
4351
4352         if (!netif_running(ndev))
4353                 return 0;
4354
4355         /* Power Down bit, into the PM register, is cleared
4356          * automatically as soon as a magic packet or a Wake-up frame
4357          * is received. Anyway, it's better to manually clear
4358          * this bit because it can generate problems while resuming
4359          * from another devices (e.g. serial console).
4360          */
4361         if (device_may_wakeup(priv->device)) {
4362                 spin_lock_irqsave(&priv->lock, flags);
4363                 priv->hw->mac->pmt(priv->hw, 0);
4364                 spin_unlock_irqrestore(&priv->lock, flags);
4365                 priv->irq_wake = 0;
4366         } else {
4367                 pinctrl_pm_select_default_state(priv->device);
4368                 /* enable the clk previously disabled */
4369                 clk_enable(priv->plat->stmmac_clk);
4370                 clk_enable(priv->plat->pclk);
4371                 /* reset the phy so that it's ready */
4372                 if (priv->mii)
4373                         stmmac_mdio_reset(priv->mii);
4374         }
4375
4376         netif_device_attach(ndev);
4377
4378         spin_lock_irqsave(&priv->lock, flags);
4379
4380         stmmac_reset_queues_param(priv);
4381
4382         /* reset private mss value to force mss context settings at
4383          * next tso xmit (only used for gmac4).
4384          */
4385         priv->mss = 0;
4386
4387         stmmac_clear_descriptors(priv);
4388
4389         stmmac_hw_setup(ndev, false);
4390         stmmac_init_tx_coalesce(priv);
4391         stmmac_set_rx_mode(ndev);
4392
4393         stmmac_enable_all_queues(priv);
4394
4395         stmmac_start_all_queues(priv);
4396
4397         spin_unlock_irqrestore(&priv->lock, flags);
4398
4399         if (ndev->phydev)
4400                 phy_start(ndev->phydev);
4401
4402         return 0;
4403 }
4404 EXPORT_SYMBOL_GPL(stmmac_resume);
4405
4406 #ifndef MODULE
4407 static int __init stmmac_cmdline_opt(char *str)
4408 {
4409         char *opt;
4410
4411         if (!str || !*str)
4412                 return -EINVAL;
4413         while ((opt = strsep(&str, ",")) != NULL) {
4414                 if (!strncmp(opt, "debug:", 6)) {
4415                         if (kstrtoint(opt + 6, 0, &debug))
4416                                 goto err;
4417                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4418                         if (kstrtoint(opt + 8, 0, &phyaddr))
4419                                 goto err;
4420                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4421                         if (kstrtoint(opt + 7, 0, &buf_sz))
4422                                 goto err;
4423                 } else if (!strncmp(opt, "tc:", 3)) {
4424                         if (kstrtoint(opt + 3, 0, &tc))
4425                                 goto err;
4426                 } else if (!strncmp(opt, "watchdog:", 9)) {
4427                         if (kstrtoint(opt + 9, 0, &watchdog))
4428                                 goto err;
4429                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4430                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4431                                 goto err;
4432                 } else if (!strncmp(opt, "pause:", 6)) {
4433                         if (kstrtoint(opt + 6, 0, &pause))
4434                                 goto err;
4435                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4436                         if (kstrtoint(opt + 10, 0, &eee_timer))
4437                                 goto err;
4438                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4439                         if (kstrtoint(opt + 11, 0, &chain_mode))
4440                                 goto err;
4441                 }
4442         }
4443         return 0;
4444
4445 err:
4446         pr_err("%s: ERROR broken module parameter conversion", __func__);
4447         return -EINVAL;
4448 }
4449
4450 __setup("stmmaceth=", stmmac_cmdline_opt);
4451 #endif /* MODULE */
4452
4453 static int __init stmmac_init(void)
4454 {
4455 #ifdef CONFIG_DEBUG_FS
4456         /* Create debugfs main directory if it doesn't exist yet */
4457         if (!stmmac_fs_dir) {
4458                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4459
4460                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4461                         pr_err("ERROR %s, debugfs create directory failed\n",
4462                                STMMAC_RESOURCE_NAME);
4463
4464                         return -ENOMEM;
4465                 }
4466         }
4467 #endif
4468
4469         return 0;
4470 }
4471
4472 static void __exit stmmac_exit(void)
4473 {
4474 #ifdef CONFIG_DEBUG_FS
4475         debugfs_remove_recursive(stmmac_fs_dir);
4476 #endif
4477 }
4478
4479 module_init(stmmac_init)
4480 module_exit(stmmac_exit)
4481
4482 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4483 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4484 MODULE_LICENSE("GPL");