net: stmmac: fix cbs configuration
[linux-2.6-block.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO        5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK     256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
96                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER        1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105  * but allow user to force to use the chain instead of the ring
106  */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121  * stmmac_verify_args - verify the driver parameters.
122  * Description: it checks the driver parameters and set a default in case of
123  * errors.
124  */
125 static void stmmac_verify_args(void)
126 {
127         if (unlikely(watchdog < 0))
128                 watchdog = TX_TIMEO;
129         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130                 buf_sz = DEFAULT_BUFSIZE;
131         if (unlikely(flow_ctrl > 1))
132                 flow_ctrl = FLOW_AUTO;
133         else if (likely(flow_ctrl < 0))
134                 flow_ctrl = FLOW_OFF;
135         if (unlikely((pause < 0) || (pause > 0xffff)))
136                 pause = PAUSE_TIME;
137         if (eee_timer < 0)
138                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142  * stmmac_clk_csr_set - dynamically set the MDC clock
143  * @priv: driver private structure
144  * Description: this is to dynamically set the MDC clock according to the csr
145  * clock input.
146  * Note:
147  *      If a specific clk_csr value is passed from the platform
148  *      this means that the CSR Clock Range selection cannot be
149  *      changed at run-time and it is fixed (as reported in the driver
150  *      documentation). Viceversa the driver will try to set the MDC
151  *      clock dynamically according to the actual clock input.
152  */
153 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154 {
155         u32 clk_rate;
156
157         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158
159         /* Platform provided default clk_csr would be assumed valid
160          * for all other cases except for the below mentioned ones.
161          * For values higher than the IEEE 802.3 specified frequency
162          * we can not estimate the proper divider as it is not known
163          * the frequency of clk_csr_i. So we do not change the default
164          * divider.
165          */
166         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167                 if (clk_rate < CSR_F_35M)
168                         priv->clk_csr = STMMAC_CSR_20_35M;
169                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170                         priv->clk_csr = STMMAC_CSR_35_60M;
171                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172                         priv->clk_csr = STMMAC_CSR_60_100M;
173                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174                         priv->clk_csr = STMMAC_CSR_100_150M;
175                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176                         priv->clk_csr = STMMAC_CSR_150_250M;
177                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178                         priv->clk_csr = STMMAC_CSR_250_300M;
179         }
180 }
181
182 static void print_pkt(unsigned char *buf, int len)
183 {
184         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186 }
187
188 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
189 {
190         u32 avail;
191
192         if (priv->dirty_tx > priv->cur_tx)
193                 avail = priv->dirty_tx - priv->cur_tx - 1;
194         else
195                 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
196
197         return avail;
198 }
199
200 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
201 {
202         u32 dirty;
203
204         if (priv->dirty_rx <= priv->cur_rx)
205                 dirty = priv->cur_rx - priv->dirty_rx;
206         else
207                 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
208
209         return dirty;
210 }
211
212 /**
213  * stmmac_hw_fix_mac_speed - callback for speed selection
214  * @priv: driver private structure
215  * Description: on some platforms (e.g. ST), some HW system configuration
216  * registers have to be set according to the link speed negotiated.
217  */
218 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
219 {
220         struct net_device *ndev = priv->dev;
221         struct phy_device *phydev = ndev->phydev;
222
223         if (likely(priv->plat->fix_mac_speed))
224                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
225 }
226
227 /**
228  * stmmac_enable_eee_mode - check and enter in LPI mode
229  * @priv: driver private structure
230  * Description: this function is to verify and enter in LPI mode in case of
231  * EEE.
232  */
233 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
234 {
235         /* Check and enter in LPI mode */
236         if ((priv->dirty_tx == priv->cur_tx) &&
237             (priv->tx_path_in_lpi_mode == false))
238                 priv->hw->mac->set_eee_mode(priv->hw,
239                                             priv->plat->en_tx_lpi_clockgating);
240 }
241
242 /**
243  * stmmac_disable_eee_mode - disable and exit from LPI mode
244  * @priv: driver private structure
245  * Description: this function is to exit and disable EEE in case of
246  * LPI state is true. This is called by the xmit.
247  */
248 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
249 {
250         priv->hw->mac->reset_eee_mode(priv->hw);
251         del_timer_sync(&priv->eee_ctrl_timer);
252         priv->tx_path_in_lpi_mode = false;
253 }
254
255 /**
256  * stmmac_eee_ctrl_timer - EEE TX SW timer.
257  * @arg : data hook
258  * Description:
259  *  if there is no data transfer and if we are not in LPI state,
260  *  then MAC Transmitter can be moved to LPI state.
261  */
262 static void stmmac_eee_ctrl_timer(unsigned long arg)
263 {
264         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
265
266         stmmac_enable_eee_mode(priv);
267         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
268 }
269
270 /**
271  * stmmac_eee_init - init EEE
272  * @priv: driver private structure
273  * Description:
274  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
275  *  can also manage EEE, this function enable the LPI state and start related
276  *  timer.
277  */
278 bool stmmac_eee_init(struct stmmac_priv *priv)
279 {
280         struct net_device *ndev = priv->dev;
281         unsigned long flags;
282         bool ret = false;
283
284         /* Using PCS we cannot dial with the phy registers at this stage
285          * so we do not support extra feature like EEE.
286          */
287         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
288             (priv->hw->pcs == STMMAC_PCS_TBI) ||
289             (priv->hw->pcs == STMMAC_PCS_RTBI))
290                 goto out;
291
292         /* MAC core supports the EEE feature. */
293         if (priv->dma_cap.eee) {
294                 int tx_lpi_timer = priv->tx_lpi_timer;
295
296                 /* Check if the PHY supports EEE */
297                 if (phy_init_eee(ndev->phydev, 1)) {
298                         /* To manage at run-time if the EEE cannot be supported
299                          * anymore (for example because the lp caps have been
300                          * changed).
301                          * In that case the driver disable own timers.
302                          */
303                         spin_lock_irqsave(&priv->lock, flags);
304                         if (priv->eee_active) {
305                                 netdev_dbg(priv->dev, "disable EEE\n");
306                                 del_timer_sync(&priv->eee_ctrl_timer);
307                                 priv->hw->mac->set_eee_timer(priv->hw, 0,
308                                                              tx_lpi_timer);
309                         }
310                         priv->eee_active = 0;
311                         spin_unlock_irqrestore(&priv->lock, flags);
312                         goto out;
313                 }
314                 /* Activate the EEE and start timers */
315                 spin_lock_irqsave(&priv->lock, flags);
316                 if (!priv->eee_active) {
317                         priv->eee_active = 1;
318                         setup_timer(&priv->eee_ctrl_timer,
319                                     stmmac_eee_ctrl_timer,
320                                     (unsigned long)priv);
321                         mod_timer(&priv->eee_ctrl_timer,
322                                   STMMAC_LPI_T(eee_timer));
323
324                         priv->hw->mac->set_eee_timer(priv->hw,
325                                                      STMMAC_DEFAULT_LIT_LS,
326                                                      tx_lpi_timer);
327                 }
328                 /* Set HW EEE according to the speed */
329                 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
330
331                 ret = true;
332                 spin_unlock_irqrestore(&priv->lock, flags);
333
334                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
335         }
336 out:
337         return ret;
338 }
339
340 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
341  * @priv: driver private structure
342  * @p : descriptor pointer
343  * @skb : the socket buffer
344  * Description :
345  * This function will read timestamp from the descriptor & pass it to stack.
346  * and also perform some sanity checks.
347  */
348 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
349                                    struct dma_desc *p, struct sk_buff *skb)
350 {
351         struct skb_shared_hwtstamps shhwtstamp;
352         u64 ns;
353
354         if (!priv->hwts_tx_en)
355                 return;
356
357         /* exit if skb doesn't support hw tstamp */
358         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
359                 return;
360
361         /* check tx tstamp status */
362         if (!priv->hw->desc->get_tx_timestamp_status(p)) {
363                 /* get the valid tstamp */
364                 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
365
366                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
367                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
368
369                 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
370                 /* pass tstamp to stack */
371                 skb_tstamp_tx(skb, &shhwtstamp);
372         }
373
374         return;
375 }
376
377 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
378  * @priv: driver private structure
379  * @p : descriptor pointer
380  * @np : next descriptor pointer
381  * @skb : the socket buffer
382  * Description :
383  * This function will read received packet's timestamp from the descriptor
384  * and pass it to stack. It also perform some sanity checks.
385  */
386 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
387                                    struct dma_desc *np, struct sk_buff *skb)
388 {
389         struct skb_shared_hwtstamps *shhwtstamp = NULL;
390         u64 ns;
391
392         if (!priv->hwts_rx_en)
393                 return;
394
395         /* Check if timestamp is available */
396         if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
397                 /* For GMAC4, the valid timestamp is from CTX next desc. */
398                 if (priv->plat->has_gmac4)
399                         ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
400                 else
401                         ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
402
403                 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
404                 shhwtstamp = skb_hwtstamps(skb);
405                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
406                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
407         } else  {
408                 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
409         }
410 }
411
412 /**
413  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
414  *  @dev: device pointer.
415  *  @ifr: An IOCTL specific structure, that can contain a pointer to
416  *  a proprietary structure used to pass information to the driver.
417  *  Description:
418  *  This function configures the MAC to enable/disable both outgoing(TX)
419  *  and incoming(RX) packets time stamping based on user input.
420  *  Return Value:
421  *  0 on success and an appropriate -ve integer on failure.
422  */
423 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
424 {
425         struct stmmac_priv *priv = netdev_priv(dev);
426         struct hwtstamp_config config;
427         struct timespec64 now;
428         u64 temp = 0;
429         u32 ptp_v2 = 0;
430         u32 tstamp_all = 0;
431         u32 ptp_over_ipv4_udp = 0;
432         u32 ptp_over_ipv6_udp = 0;
433         u32 ptp_over_ethernet = 0;
434         u32 snap_type_sel = 0;
435         u32 ts_master_en = 0;
436         u32 ts_event_en = 0;
437         u32 value = 0;
438         u32 sec_inc;
439
440         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
441                 netdev_alert(priv->dev, "No support for HW time stamping\n");
442                 priv->hwts_tx_en = 0;
443                 priv->hwts_rx_en = 0;
444
445                 return -EOPNOTSUPP;
446         }
447
448         if (copy_from_user(&config, ifr->ifr_data,
449                            sizeof(struct hwtstamp_config)))
450                 return -EFAULT;
451
452         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
453                    __func__, config.flags, config.tx_type, config.rx_filter);
454
455         /* reserved for future extensions */
456         if (config.flags)
457                 return -EINVAL;
458
459         if (config.tx_type != HWTSTAMP_TX_OFF &&
460             config.tx_type != HWTSTAMP_TX_ON)
461                 return -ERANGE;
462
463         if (priv->adv_ts) {
464                 switch (config.rx_filter) {
465                 case HWTSTAMP_FILTER_NONE:
466                         /* time stamp no incoming packet at all */
467                         config.rx_filter = HWTSTAMP_FILTER_NONE;
468                         break;
469
470                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
471                         /* PTP v1, UDP, any kind of event packet */
472                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
473                         /* take time stamp for all event messages */
474                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
475
476                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
477                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
478                         break;
479
480                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
481                         /* PTP v1, UDP, Sync packet */
482                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
483                         /* take time stamp for SYNC messages only */
484                         ts_event_en = PTP_TCR_TSEVNTENA;
485
486                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
487                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
488                         break;
489
490                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
491                         /* PTP v1, UDP, Delay_req packet */
492                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
493                         /* take time stamp for Delay_Req messages only */
494                         ts_master_en = PTP_TCR_TSMSTRENA;
495                         ts_event_en = PTP_TCR_TSEVNTENA;
496
497                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
498                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
499                         break;
500
501                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
502                         /* PTP v2, UDP, any kind of event packet */
503                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
504                         ptp_v2 = PTP_TCR_TSVER2ENA;
505                         /* take time stamp for all event messages */
506                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
507
508                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
509                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
510                         break;
511
512                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
513                         /* PTP v2, UDP, Sync packet */
514                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
515                         ptp_v2 = PTP_TCR_TSVER2ENA;
516                         /* take time stamp for SYNC messages only */
517                         ts_event_en = PTP_TCR_TSEVNTENA;
518
519                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521                         break;
522
523                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
524                         /* PTP v2, UDP, Delay_req packet */
525                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
526                         ptp_v2 = PTP_TCR_TSVER2ENA;
527                         /* take time stamp for Delay_Req messages only */
528                         ts_master_en = PTP_TCR_TSMSTRENA;
529                         ts_event_en = PTP_TCR_TSEVNTENA;
530
531                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
532                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
533                         break;
534
535                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
536                         /* PTP v2/802.AS1 any layer, any kind of event packet */
537                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
538                         ptp_v2 = PTP_TCR_TSVER2ENA;
539                         /* take time stamp for all event messages */
540                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
541
542                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
543                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
544                         ptp_over_ethernet = PTP_TCR_TSIPENA;
545                         break;
546
547                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
548                         /* PTP v2/802.AS1, any layer, Sync packet */
549                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
550                         ptp_v2 = PTP_TCR_TSVER2ENA;
551                         /* take time stamp for SYNC messages only */
552                         ts_event_en = PTP_TCR_TSEVNTENA;
553
554                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
555                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
556                         ptp_over_ethernet = PTP_TCR_TSIPENA;
557                         break;
558
559                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
560                         /* PTP v2/802.AS1, any layer, Delay_req packet */
561                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
562                         ptp_v2 = PTP_TCR_TSVER2ENA;
563                         /* take time stamp for Delay_Req messages only */
564                         ts_master_en = PTP_TCR_TSMSTRENA;
565                         ts_event_en = PTP_TCR_TSEVNTENA;
566
567                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569                         ptp_over_ethernet = PTP_TCR_TSIPENA;
570                         break;
571
572                 case HWTSTAMP_FILTER_ALL:
573                         /* time stamp any incoming packet */
574                         config.rx_filter = HWTSTAMP_FILTER_ALL;
575                         tstamp_all = PTP_TCR_TSENALL;
576                         break;
577
578                 default:
579                         return -ERANGE;
580                 }
581         } else {
582                 switch (config.rx_filter) {
583                 case HWTSTAMP_FILTER_NONE:
584                         config.rx_filter = HWTSTAMP_FILTER_NONE;
585                         break;
586                 default:
587                         /* PTP v1, UDP, any kind of event packet */
588                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
589                         break;
590                 }
591         }
592         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
593         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
594
595         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
596                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
597         else {
598                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
599                          tstamp_all | ptp_v2 | ptp_over_ethernet |
600                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
601                          ts_master_en | snap_type_sel);
602                 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
603
604                 /* program Sub Second Increment reg */
605                 sec_inc = priv->hw->ptp->config_sub_second_increment(
606                         priv->ptpaddr, priv->plat->clk_ptp_rate,
607                         priv->plat->has_gmac4);
608                 temp = div_u64(1000000000ULL, sec_inc);
609
610                 /* calculate default added value:
611                  * formula is :
612                  * addend = (2^32)/freq_div_ratio;
613                  * where, freq_div_ratio = 1e9ns/sec_inc
614                  */
615                 temp = (u64)(temp << 32);
616                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
617                 priv->hw->ptp->config_addend(priv->ptpaddr,
618                                              priv->default_addend);
619
620                 /* initialize system time */
621                 ktime_get_real_ts64(&now);
622
623                 /* lower 32 bits of tv_sec are safe until y2106 */
624                 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
625                                             now.tv_nsec);
626         }
627
628         return copy_to_user(ifr->ifr_data, &config,
629                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
630 }
631
632 /**
633  * stmmac_init_ptp - init PTP
634  * @priv: driver private structure
635  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
636  * This is done by looking at the HW cap. register.
637  * This function also registers the ptp driver.
638  */
639 static int stmmac_init_ptp(struct stmmac_priv *priv)
640 {
641         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
642                 return -EOPNOTSUPP;
643
644         priv->adv_ts = 0;
645         /* Check if adv_ts can be enabled for dwmac 4.x core */
646         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
647                 priv->adv_ts = 1;
648         /* Dwmac 3.x core with extend_desc can support adv_ts */
649         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
650                 priv->adv_ts = 1;
651
652         if (priv->dma_cap.time_stamp)
653                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
654
655         if (priv->adv_ts)
656                 netdev_info(priv->dev,
657                             "IEEE 1588-2008 Advanced Timestamp supported\n");
658
659         priv->hw->ptp = &stmmac_ptp;
660         priv->hwts_tx_en = 0;
661         priv->hwts_rx_en = 0;
662
663         stmmac_ptp_register(priv);
664
665         return 0;
666 }
667
668 static void stmmac_release_ptp(struct stmmac_priv *priv)
669 {
670         if (priv->plat->clk_ptp_ref)
671                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
672         stmmac_ptp_unregister(priv);
673 }
674
675 /**
676  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
677  *  @priv: driver private structure
678  *  Description: It is used for configuring the flow control in all queues
679  */
680 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
681 {
682         u32 tx_cnt = priv->plat->tx_queues_to_use;
683
684         priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
685                                  priv->pause, tx_cnt);
686 }
687
688 /**
689  * stmmac_adjust_link - adjusts the link parameters
690  * @dev: net device structure
691  * Description: this is the helper called by the physical abstraction layer
692  * drivers to communicate the phy link status. According the speed and duplex
693  * this driver can invoke registered glue-logic as well.
694  * It also invoke the eee initialization because it could happen when switch
695  * on different networks (that are eee capable).
696  */
697 static void stmmac_adjust_link(struct net_device *dev)
698 {
699         struct stmmac_priv *priv = netdev_priv(dev);
700         struct phy_device *phydev = dev->phydev;
701         unsigned long flags;
702         int new_state = 0;
703
704         if (!phydev)
705                 return;
706
707         spin_lock_irqsave(&priv->lock, flags);
708
709         if (phydev->link) {
710                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
711
712                 /* Now we make sure that we can be in full duplex mode.
713                  * If not, we operate in half-duplex mode. */
714                 if (phydev->duplex != priv->oldduplex) {
715                         new_state = 1;
716                         if (!(phydev->duplex))
717                                 ctrl &= ~priv->hw->link.duplex;
718                         else
719                                 ctrl |= priv->hw->link.duplex;
720                         priv->oldduplex = phydev->duplex;
721                 }
722                 /* Flow Control operation */
723                 if (phydev->pause)
724                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
725
726                 if (phydev->speed != priv->speed) {
727                         new_state = 1;
728                         switch (phydev->speed) {
729                         case 1000:
730                                 if (priv->plat->has_gmac ||
731                                     priv->plat->has_gmac4)
732                                         ctrl &= ~priv->hw->link.port;
733                                 break;
734                         case 100:
735                                 if (priv->plat->has_gmac ||
736                                     priv->plat->has_gmac4) {
737                                         ctrl |= priv->hw->link.port;
738                                         ctrl |= priv->hw->link.speed;
739                                 } else {
740                                         ctrl &= ~priv->hw->link.port;
741                                 }
742                                 break;
743                         case 10:
744                                 if (priv->plat->has_gmac ||
745                                     priv->plat->has_gmac4) {
746                                         ctrl |= priv->hw->link.port;
747                                         ctrl &= ~(priv->hw->link.speed);
748                                 } else {
749                                         ctrl &= ~priv->hw->link.port;
750                                 }
751                                 break;
752                         default:
753                                 netif_warn(priv, link, priv->dev,
754                                            "broken speed: %d\n", phydev->speed);
755                                 phydev->speed = SPEED_UNKNOWN;
756                                 break;
757                         }
758                         if (phydev->speed != SPEED_UNKNOWN)
759                                 stmmac_hw_fix_mac_speed(priv);
760                         priv->speed = phydev->speed;
761                 }
762
763                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
764
765                 if (!priv->oldlink) {
766                         new_state = 1;
767                         priv->oldlink = 1;
768                 }
769         } else if (priv->oldlink) {
770                 new_state = 1;
771                 priv->oldlink = 0;
772                 priv->speed = SPEED_UNKNOWN;
773                 priv->oldduplex = DUPLEX_UNKNOWN;
774         }
775
776         if (new_state && netif_msg_link(priv))
777                 phy_print_status(phydev);
778
779         spin_unlock_irqrestore(&priv->lock, flags);
780
781         if (phydev->is_pseudo_fixed_link)
782                 /* Stop PHY layer to call the hook to adjust the link in case
783                  * of a switch is attached to the stmmac driver.
784                  */
785                 phydev->irq = PHY_IGNORE_INTERRUPT;
786         else
787                 /* At this stage, init the EEE if supported.
788                  * Never called in case of fixed_link.
789                  */
790                 priv->eee_enabled = stmmac_eee_init(priv);
791 }
792
793 /**
794  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
795  * @priv: driver private structure
796  * Description: this is to verify if the HW supports the PCS.
797  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
798  * configured for the TBI, RTBI, or SGMII PHY interface.
799  */
800 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
801 {
802         int interface = priv->plat->interface;
803
804         if (priv->dma_cap.pcs) {
805                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
806                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
807                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
808                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
809                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
810                         priv->hw->pcs = STMMAC_PCS_RGMII;
811                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
812                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
813                         priv->hw->pcs = STMMAC_PCS_SGMII;
814                 }
815         }
816 }
817
818 /**
819  * stmmac_init_phy - PHY initialization
820  * @dev: net device structure
821  * Description: it initializes the driver's PHY state, and attaches the PHY
822  * to the mac driver.
823  *  Return value:
824  *  0 on success
825  */
826 static int stmmac_init_phy(struct net_device *dev)
827 {
828         struct stmmac_priv *priv = netdev_priv(dev);
829         struct phy_device *phydev;
830         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
831         char bus_id[MII_BUS_ID_SIZE];
832         int interface = priv->plat->interface;
833         int max_speed = priv->plat->max_speed;
834         priv->oldlink = 0;
835         priv->speed = SPEED_UNKNOWN;
836         priv->oldduplex = DUPLEX_UNKNOWN;
837
838         if (priv->plat->phy_node) {
839                 phydev = of_phy_connect(dev, priv->plat->phy_node,
840                                         &stmmac_adjust_link, 0, interface);
841         } else {
842                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
843                          priv->plat->bus_id);
844
845                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
846                          priv->plat->phy_addr);
847                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
848                            phy_id_fmt);
849
850                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
851                                      interface);
852         }
853
854         if (IS_ERR_OR_NULL(phydev)) {
855                 netdev_err(priv->dev, "Could not attach to PHY\n");
856                 if (!phydev)
857                         return -ENODEV;
858
859                 return PTR_ERR(phydev);
860         }
861
862         /* Stop Advertising 1000BASE Capability if interface is not GMII */
863         if ((interface == PHY_INTERFACE_MODE_MII) ||
864             (interface == PHY_INTERFACE_MODE_RMII) ||
865                 (max_speed < 1000 && max_speed > 0))
866                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
867                                          SUPPORTED_1000baseT_Full);
868
869         /*
870          * Broken HW is sometimes missing the pull-up resistor on the
871          * MDIO line, which results in reads to non-existent devices returning
872          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
873          * device as well.
874          * Note: phydev->phy_id is the result of reading the UID PHY registers.
875          */
876         if (!priv->plat->phy_node && phydev->phy_id == 0) {
877                 phy_disconnect(phydev);
878                 return -ENODEV;
879         }
880
881         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
882          * subsequent PHY polling, make sure we force a link transition if
883          * we have a UP/DOWN/UP transition
884          */
885         if (phydev->is_pseudo_fixed_link)
886                 phydev->irq = PHY_POLL;
887
888         phy_attached_info(phydev);
889         return 0;
890 }
891
892 static void stmmac_display_rings(struct stmmac_priv *priv)
893 {
894         void *head_rx, *head_tx;
895
896         if (priv->extend_desc) {
897                 head_rx = (void *)priv->dma_erx;
898                 head_tx = (void *)priv->dma_etx;
899         } else {
900                 head_rx = (void *)priv->dma_rx;
901                 head_tx = (void *)priv->dma_tx;
902         }
903
904         /* Display Rx ring */
905         priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
906         /* Display Tx ring */
907         priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
908 }
909
910 static int stmmac_set_bfsize(int mtu, int bufsize)
911 {
912         int ret = bufsize;
913
914         if (mtu >= BUF_SIZE_4KiB)
915                 ret = BUF_SIZE_8KiB;
916         else if (mtu >= BUF_SIZE_2KiB)
917                 ret = BUF_SIZE_4KiB;
918         else if (mtu > DEFAULT_BUFSIZE)
919                 ret = BUF_SIZE_2KiB;
920         else
921                 ret = DEFAULT_BUFSIZE;
922
923         return ret;
924 }
925
926 /**
927  * stmmac_clear_descriptors - clear descriptors
928  * @priv: driver private structure
929  * Description: this function is called to clear the tx and rx descriptors
930  * in case of both basic and extended descriptors are used.
931  */
932 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
933 {
934         int i;
935
936         /* Clear the Rx/Tx descriptors */
937         for (i = 0; i < DMA_RX_SIZE; i++)
938                 if (priv->extend_desc)
939                         priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
940                                                      priv->use_riwt, priv->mode,
941                                                      (i == DMA_RX_SIZE - 1));
942                 else
943                         priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
944                                                      priv->use_riwt, priv->mode,
945                                                      (i == DMA_RX_SIZE - 1));
946         for (i = 0; i < DMA_TX_SIZE; i++)
947                 if (priv->extend_desc)
948                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
949                                                      priv->mode,
950                                                      (i == DMA_TX_SIZE - 1));
951                 else
952                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
953                                                      priv->mode,
954                                                      (i == DMA_TX_SIZE - 1));
955 }
956
957 /**
958  * stmmac_init_rx_buffers - init the RX descriptor buffer.
959  * @priv: driver private structure
960  * @p: descriptor pointer
961  * @i: descriptor index
962  * @flags: gfp flag.
963  * Description: this function is called to allocate a receive buffer, perform
964  * the DMA mapping and init the descriptor.
965  */
966 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
967                                   int i, gfp_t flags)
968 {
969         struct sk_buff *skb;
970
971         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
972         if (!skb) {
973                 netdev_err(priv->dev,
974                            "%s: Rx init fails; skb is NULL\n", __func__);
975                 return -ENOMEM;
976         }
977         priv->rx_skbuff[i] = skb;
978         priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
979                                                 priv->dma_buf_sz,
980                                                 DMA_FROM_DEVICE);
981         if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
982                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
983                 dev_kfree_skb_any(skb);
984                 return -EINVAL;
985         }
986
987         if (priv->synopsys_id >= DWMAC_CORE_4_00)
988                 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
989         else
990                 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
991
992         if ((priv->hw->mode->init_desc3) &&
993             (priv->dma_buf_sz == BUF_SIZE_16KiB))
994                 priv->hw->mode->init_desc3(p);
995
996         return 0;
997 }
998
999 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
1000 {
1001         if (priv->rx_skbuff[i]) {
1002                 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
1003                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1004                 dev_kfree_skb_any(priv->rx_skbuff[i]);
1005         }
1006         priv->rx_skbuff[i] = NULL;
1007 }
1008
1009 /**
1010  * init_dma_desc_rings - init the RX/TX descriptor rings
1011  * @dev: net device structure
1012  * @flags: gfp flag.
1013  * Description: this function initializes the DMA RX/TX descriptors
1014  * and allocates the socket buffers. It supports the chained and ring
1015  * modes.
1016  */
1017 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1018 {
1019         int i;
1020         struct stmmac_priv *priv = netdev_priv(dev);
1021         unsigned int bfsize = 0;
1022         int ret = -ENOMEM;
1023
1024         if (priv->hw->mode->set_16kib_bfsize)
1025                 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1026
1027         if (bfsize < BUF_SIZE_16KiB)
1028                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1029
1030         priv->dma_buf_sz = bfsize;
1031
1032         netif_dbg(priv, probe, priv->dev,
1033                   "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
1034                   __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
1035
1036         /* RX INITIALIZATION */
1037         netif_dbg(priv, probe, priv->dev,
1038                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1039
1040         for (i = 0; i < DMA_RX_SIZE; i++) {
1041                 struct dma_desc *p;
1042                 if (priv->extend_desc)
1043                         p = &((priv->dma_erx + i)->basic);
1044                 else
1045                         p = priv->dma_rx + i;
1046
1047                 ret = stmmac_init_rx_buffers(priv, p, i, flags);
1048                 if (ret)
1049                         goto err_init_rx_buffers;
1050
1051                 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1052                           priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
1053                           (unsigned int)priv->rx_skbuff_dma[i]);
1054         }
1055         priv->cur_rx = 0;
1056         priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1057         buf_sz = bfsize;
1058
1059         /* Setup the chained descriptor addresses */
1060         if (priv->mode == STMMAC_CHAIN_MODE) {
1061                 if (priv->extend_desc) {
1062                         priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1063                                              DMA_RX_SIZE, 1);
1064                         priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1065                                              DMA_TX_SIZE, 1);
1066                 } else {
1067                         priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1068                                              DMA_RX_SIZE, 0);
1069                         priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1070                                              DMA_TX_SIZE, 0);
1071                 }
1072         }
1073
1074         /* TX INITIALIZATION */
1075         for (i = 0; i < DMA_TX_SIZE; i++) {
1076                 struct dma_desc *p;
1077                 if (priv->extend_desc)
1078                         p = &((priv->dma_etx + i)->basic);
1079                 else
1080                         p = priv->dma_tx + i;
1081
1082                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1083                         p->des0 = 0;
1084                         p->des1 = 0;
1085                         p->des2 = 0;
1086                         p->des3 = 0;
1087                 } else {
1088                         p->des2 = 0;
1089                 }
1090
1091                 priv->tx_skbuff_dma[i].buf = 0;
1092                 priv->tx_skbuff_dma[i].map_as_page = false;
1093                 priv->tx_skbuff_dma[i].len = 0;
1094                 priv->tx_skbuff_dma[i].last_segment = false;
1095                 priv->tx_skbuff[i] = NULL;
1096         }
1097
1098         priv->dirty_tx = 0;
1099         priv->cur_tx = 0;
1100         netdev_reset_queue(priv->dev);
1101
1102         stmmac_clear_descriptors(priv);
1103
1104         if (netif_msg_hw(priv))
1105                 stmmac_display_rings(priv);
1106
1107         return 0;
1108 err_init_rx_buffers:
1109         while (--i >= 0)
1110                 stmmac_free_rx_buffers(priv, i);
1111         return ret;
1112 }
1113
1114 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1115 {
1116         int i;
1117
1118         for (i = 0; i < DMA_RX_SIZE; i++)
1119                 stmmac_free_rx_buffers(priv, i);
1120 }
1121
1122 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1123 {
1124         int i;
1125
1126         for (i = 0; i < DMA_TX_SIZE; i++) {
1127                 if (priv->tx_skbuff_dma[i].buf) {
1128                         if (priv->tx_skbuff_dma[i].map_as_page)
1129                                 dma_unmap_page(priv->device,
1130                                                priv->tx_skbuff_dma[i].buf,
1131                                                priv->tx_skbuff_dma[i].len,
1132                                                DMA_TO_DEVICE);
1133                         else
1134                                 dma_unmap_single(priv->device,
1135                                                  priv->tx_skbuff_dma[i].buf,
1136                                                  priv->tx_skbuff_dma[i].len,
1137                                                  DMA_TO_DEVICE);
1138                 }
1139
1140                 if (priv->tx_skbuff[i]) {
1141                         dev_kfree_skb_any(priv->tx_skbuff[i]);
1142                         priv->tx_skbuff[i] = NULL;
1143                         priv->tx_skbuff_dma[i].buf = 0;
1144                         priv->tx_skbuff_dma[i].map_as_page = false;
1145                 }
1146         }
1147 }
1148
1149 /**
1150  * alloc_dma_desc_resources - alloc TX/RX resources.
1151  * @priv: private structure
1152  * Description: according to which descriptor can be used (extend or basic)
1153  * this function allocates the resources for TX and RX paths. In case of
1154  * reception, for example, it pre-allocated the RX socket buffer in order to
1155  * allow zero-copy mechanism.
1156  */
1157 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1158 {
1159         int ret = -ENOMEM;
1160
1161         priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
1162                                             GFP_KERNEL);
1163         if (!priv->rx_skbuff_dma)
1164                 return -ENOMEM;
1165
1166         priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
1167                                         GFP_KERNEL);
1168         if (!priv->rx_skbuff)
1169                 goto err_rx_skbuff;
1170
1171         priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1172                                             sizeof(*priv->tx_skbuff_dma),
1173                                             GFP_KERNEL);
1174         if (!priv->tx_skbuff_dma)
1175                 goto err_tx_skbuff_dma;
1176
1177         priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
1178                                         GFP_KERNEL);
1179         if (!priv->tx_skbuff)
1180                 goto err_tx_skbuff;
1181
1182         if (priv->extend_desc) {
1183                 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1184                                                     sizeof(struct
1185                                                            dma_extended_desc),
1186                                                     &priv->dma_rx_phy,
1187                                                     GFP_KERNEL);
1188                 if (!priv->dma_erx)
1189                         goto err_dma;
1190
1191                 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1192                                                     sizeof(struct
1193                                                            dma_extended_desc),
1194                                                     &priv->dma_tx_phy,
1195                                                     GFP_KERNEL);
1196                 if (!priv->dma_etx) {
1197                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1198                                           sizeof(struct dma_extended_desc),
1199                                           priv->dma_erx, priv->dma_rx_phy);
1200                         goto err_dma;
1201                 }
1202         } else {
1203                 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1204                                                    sizeof(struct dma_desc),
1205                                                    &priv->dma_rx_phy,
1206                                                    GFP_KERNEL);
1207                 if (!priv->dma_rx)
1208                         goto err_dma;
1209
1210                 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1211                                                    sizeof(struct dma_desc),
1212                                                    &priv->dma_tx_phy,
1213                                                    GFP_KERNEL);
1214                 if (!priv->dma_tx) {
1215                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1216                                           sizeof(struct dma_desc),
1217                                           priv->dma_rx, priv->dma_rx_phy);
1218                         goto err_dma;
1219                 }
1220         }
1221
1222         return 0;
1223
1224 err_dma:
1225         kfree(priv->tx_skbuff);
1226 err_tx_skbuff:
1227         kfree(priv->tx_skbuff_dma);
1228 err_tx_skbuff_dma:
1229         kfree(priv->rx_skbuff);
1230 err_rx_skbuff:
1231         kfree(priv->rx_skbuff_dma);
1232         return ret;
1233 }
1234
1235 static void free_dma_desc_resources(struct stmmac_priv *priv)
1236 {
1237         /* Release the DMA TX/RX socket buffers */
1238         dma_free_rx_skbufs(priv);
1239         dma_free_tx_skbufs(priv);
1240
1241         /* Free DMA regions of consistent memory previously allocated */
1242         if (!priv->extend_desc) {
1243                 dma_free_coherent(priv->device,
1244                                   DMA_TX_SIZE * sizeof(struct dma_desc),
1245                                   priv->dma_tx, priv->dma_tx_phy);
1246                 dma_free_coherent(priv->device,
1247                                   DMA_RX_SIZE * sizeof(struct dma_desc),
1248                                   priv->dma_rx, priv->dma_rx_phy);
1249         } else {
1250                 dma_free_coherent(priv->device, DMA_TX_SIZE *
1251                                   sizeof(struct dma_extended_desc),
1252                                   priv->dma_etx, priv->dma_tx_phy);
1253                 dma_free_coherent(priv->device, DMA_RX_SIZE *
1254                                   sizeof(struct dma_extended_desc),
1255                                   priv->dma_erx, priv->dma_rx_phy);
1256         }
1257         kfree(priv->rx_skbuff_dma);
1258         kfree(priv->rx_skbuff);
1259         kfree(priv->tx_skbuff_dma);
1260         kfree(priv->tx_skbuff);
1261 }
1262
1263 /**
1264  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1265  *  @priv: driver private structure
1266  *  Description: It is used for enabling the rx queues in the MAC
1267  */
1268 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1269 {
1270         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1271         int queue;
1272         u8 mode;
1273
1274         for (queue = 0; queue < rx_queues_count; queue++) {
1275                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1276                 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1277         }
1278 }
1279
1280 /**
1281  * stmmac_start_rx_dma - start RX DMA channel
1282  * @priv: driver private structure
1283  * @chan: RX channel index
1284  * Description:
1285  * This starts a RX DMA channel
1286  */
1287 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1288 {
1289         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1290         priv->hw->dma->start_rx(priv->ioaddr, chan);
1291 }
1292
1293 /**
1294  * stmmac_start_tx_dma - start TX DMA channel
1295  * @priv: driver private structure
1296  * @chan: TX channel index
1297  * Description:
1298  * This starts a TX DMA channel
1299  */
1300 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1301 {
1302         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1303         priv->hw->dma->start_tx(priv->ioaddr, chan);
1304 }
1305
1306 /**
1307  * stmmac_stop_rx_dma - stop RX DMA channel
1308  * @priv: driver private structure
1309  * @chan: RX channel index
1310  * Description:
1311  * This stops a RX DMA channel
1312  */
1313 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1314 {
1315         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1316         priv->hw->dma->stop_rx(priv->ioaddr, chan);
1317 }
1318
1319 /**
1320  * stmmac_stop_tx_dma - stop TX DMA channel
1321  * @priv: driver private structure
1322  * @chan: TX channel index
1323  * Description:
1324  * This stops a TX DMA channel
1325  */
1326 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1327 {
1328         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1329         priv->hw->dma->stop_tx(priv->ioaddr, chan);
1330 }
1331
1332 /**
1333  * stmmac_start_all_dma - start all RX and TX DMA channels
1334  * @priv: driver private structure
1335  * Description:
1336  * This starts all the RX and TX DMA channels
1337  */
1338 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1339 {
1340         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1341         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1342         u32 chan = 0;
1343
1344         for (chan = 0; chan < rx_channels_count; chan++)
1345                 stmmac_start_rx_dma(priv, chan);
1346
1347         for (chan = 0; chan < tx_channels_count; chan++)
1348                 stmmac_start_tx_dma(priv, chan);
1349 }
1350
1351 /**
1352  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1353  * @priv: driver private structure
1354  * Description:
1355  * This stops the RX and TX DMA channels
1356  */
1357 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1358 {
1359         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1360         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1361         u32 chan = 0;
1362
1363         for (chan = 0; chan < rx_channels_count; chan++)
1364                 stmmac_stop_rx_dma(priv, chan);
1365
1366         for (chan = 0; chan < tx_channels_count; chan++)
1367                 stmmac_stop_tx_dma(priv, chan);
1368 }
1369
1370 /**
1371  *  stmmac_dma_operation_mode - HW DMA operation mode
1372  *  @priv: driver private structure
1373  *  Description: it is used for configuring the DMA operation mode register in
1374  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1375  */
1376 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1377 {
1378         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1379         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1380         int rxfifosz = priv->plat->rx_fifo_size;
1381         u32 txmode = 0;
1382         u32 rxmode = 0;
1383         u32 chan = 0;
1384
1385         if (rxfifosz == 0)
1386                 rxfifosz = priv->dma_cap.rx_fifo_size;
1387
1388         if (priv->plat->force_thresh_dma_mode) {
1389                 txmode = tc;
1390                 rxmode = tc;
1391         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1392                 /*
1393                  * In case of GMAC, SF mode can be enabled
1394                  * to perform the TX COE in HW. This depends on:
1395                  * 1) TX COE if actually supported
1396                  * 2) There is no bugged Jumbo frame support
1397                  *    that needs to not insert csum in the TDES.
1398                  */
1399                 txmode = SF_DMA_MODE;
1400                 rxmode = SF_DMA_MODE;
1401                 priv->xstats.threshold = SF_DMA_MODE;
1402         } else {
1403                 txmode = tc;
1404                 rxmode = SF_DMA_MODE;
1405         }
1406
1407         /* configure all channels */
1408         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1409                 for (chan = 0; chan < rx_channels_count; chan++)
1410                         priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1411                                                    rxfifosz);
1412
1413                 for (chan = 0; chan < tx_channels_count; chan++)
1414                         priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1415         } else {
1416                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1417                                         rxfifosz);
1418         }
1419 }
1420
1421 /**
1422  * stmmac_tx_clean - to manage the transmission completion
1423  * @priv: driver private structure
1424  * Description: it reclaims the transmit resources after transmission completes.
1425  */
1426 static void stmmac_tx_clean(struct stmmac_priv *priv)
1427 {
1428         unsigned int bytes_compl = 0, pkts_compl = 0;
1429         unsigned int entry = priv->dirty_tx;
1430
1431         netif_tx_lock(priv->dev);
1432
1433         priv->xstats.tx_clean++;
1434
1435         while (entry != priv->cur_tx) {
1436                 struct sk_buff *skb = priv->tx_skbuff[entry];
1437                 struct dma_desc *p;
1438                 int status;
1439
1440                 if (priv->extend_desc)
1441                         p = (struct dma_desc *)(priv->dma_etx + entry);
1442                 else
1443                         p = priv->dma_tx + entry;
1444
1445                 status = priv->hw->desc->tx_status(&priv->dev->stats,
1446                                                       &priv->xstats, p,
1447                                                       priv->ioaddr);
1448                 /* Check if the descriptor is owned by the DMA */
1449                 if (unlikely(status & tx_dma_own))
1450                         break;
1451
1452                 /* Just consider the last segment and ...*/
1453                 if (likely(!(status & tx_not_ls))) {
1454                         /* ... verify the status error condition */
1455                         if (unlikely(status & tx_err)) {
1456                                 priv->dev->stats.tx_errors++;
1457                         } else {
1458                                 priv->dev->stats.tx_packets++;
1459                                 priv->xstats.tx_pkt_n++;
1460                         }
1461                         stmmac_get_tx_hwtstamp(priv, p, skb);
1462                 }
1463
1464                 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1465                         if (priv->tx_skbuff_dma[entry].map_as_page)
1466                                 dma_unmap_page(priv->device,
1467                                                priv->tx_skbuff_dma[entry].buf,
1468                                                priv->tx_skbuff_dma[entry].len,
1469                                                DMA_TO_DEVICE);
1470                         else
1471                                 dma_unmap_single(priv->device,
1472                                                  priv->tx_skbuff_dma[entry].buf,
1473                                                  priv->tx_skbuff_dma[entry].len,
1474                                                  DMA_TO_DEVICE);
1475                         priv->tx_skbuff_dma[entry].buf = 0;
1476                         priv->tx_skbuff_dma[entry].len = 0;
1477                         priv->tx_skbuff_dma[entry].map_as_page = false;
1478                 }
1479
1480                 if (priv->hw->mode->clean_desc3)
1481                         priv->hw->mode->clean_desc3(priv, p);
1482
1483                 priv->tx_skbuff_dma[entry].last_segment = false;
1484                 priv->tx_skbuff_dma[entry].is_jumbo = false;
1485
1486                 if (likely(skb != NULL)) {
1487                         pkts_compl++;
1488                         bytes_compl += skb->len;
1489                         dev_consume_skb_any(skb);
1490                         priv->tx_skbuff[entry] = NULL;
1491                 }
1492
1493                 priv->hw->desc->release_tx_desc(p, priv->mode);
1494
1495                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1496         }
1497         priv->dirty_tx = entry;
1498
1499         netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1500
1501         if (unlikely(netif_queue_stopped(priv->dev) &&
1502             stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
1503                 netif_dbg(priv, tx_done, priv->dev,
1504                           "%s: restart transmit\n", __func__);
1505                 netif_wake_queue(priv->dev);
1506         }
1507
1508         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1509                 stmmac_enable_eee_mode(priv);
1510                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1511         }
1512         netif_tx_unlock(priv->dev);
1513 }
1514
1515 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1516 {
1517         priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1518 }
1519
1520 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1521 {
1522         priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1523 }
1524
1525 /**
1526  * stmmac_tx_err - to manage the tx error
1527  * @priv: driver private structure
1528  * @chan: channel index
1529  * Description: it cleans the descriptors and restarts the transmission
1530  * in case of transmission errors.
1531  */
1532 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1533 {
1534         int i;
1535         netif_stop_queue(priv->dev);
1536
1537         stmmac_stop_tx_dma(priv, chan);
1538         dma_free_tx_skbufs(priv);
1539         for (i = 0; i < DMA_TX_SIZE; i++)
1540                 if (priv->extend_desc)
1541                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1542                                                      priv->mode,
1543                                                      (i == DMA_TX_SIZE - 1));
1544                 else
1545                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1546                                                      priv->mode,
1547                                                      (i == DMA_TX_SIZE - 1));
1548         priv->dirty_tx = 0;
1549         priv->cur_tx = 0;
1550         netdev_reset_queue(priv->dev);
1551         stmmac_start_tx_dma(priv, chan);
1552
1553         priv->dev->stats.tx_errors++;
1554         netif_wake_queue(priv->dev);
1555 }
1556
1557 /**
1558  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1559  *  @priv: driver private structure
1560  *  @txmode: TX operating mode
1561  *  @rxmode: RX operating mode
1562  *  @chan: channel index
1563  *  Description: it is used for configuring of the DMA operation mode in
1564  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1565  *  mode.
1566  */
1567 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1568                                           u32 rxmode, u32 chan)
1569 {
1570         int rxfifosz = priv->plat->rx_fifo_size;
1571
1572         if (rxfifosz == 0)
1573                 rxfifosz = priv->dma_cap.rx_fifo_size;
1574
1575         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1576                 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1577                                            rxfifosz);
1578                 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1579         } else {
1580                 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1581                                         rxfifosz);
1582         }
1583 }
1584
1585 /**
1586  * stmmac_dma_interrupt - DMA ISR
1587  * @priv: driver private structure
1588  * Description: this is the DMA ISR. It is called by the main ISR.
1589  * It calls the dwmac dma routine and schedule poll method in case of some
1590  * work can be done.
1591  */
1592 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1593 {
1594         u32 tx_channel_count = priv->plat->tx_queues_to_use;
1595         int status;
1596         u32 chan;
1597
1598         for (chan = 0; chan < tx_channel_count; chan++) {
1599                 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1600                                                       &priv->xstats, chan);
1601                 if (likely((status & handle_rx)) || (status & handle_tx)) {
1602                         if (likely(napi_schedule_prep(&priv->napi))) {
1603                                 stmmac_disable_dma_irq(priv, chan);
1604                                 __napi_schedule(&priv->napi);
1605                         }
1606                 }
1607
1608                 if (unlikely(status & tx_hard_error_bump_tc)) {
1609                         /* Try to bump up the dma threshold on this failure */
1610                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1611                             (tc <= 256)) {
1612                                 tc += 64;
1613                                 if (priv->plat->force_thresh_dma_mode)
1614                                         stmmac_set_dma_operation_mode(priv,
1615                                                                       tc,
1616                                                                       tc,
1617                                                                       chan);
1618                                 else
1619                                         stmmac_set_dma_operation_mode(priv,
1620                                                                     tc,
1621                                                                     SF_DMA_MODE,
1622                                                                     chan);
1623                                 priv->xstats.threshold = tc;
1624                         }
1625                 } else if (unlikely(status == tx_hard_error)) {
1626                         stmmac_tx_err(priv, chan);
1627                 }
1628         }
1629 }
1630
1631 /**
1632  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1633  * @priv: driver private structure
1634  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1635  */
1636 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1637 {
1638         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1639                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1640
1641         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1642                 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1643                 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1644         } else {
1645                 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
1646                 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1647         }
1648
1649         dwmac_mmc_intr_all_mask(priv->mmcaddr);
1650
1651         if (priv->dma_cap.rmon) {
1652                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
1653                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1654         } else
1655                 netdev_info(priv->dev, "No MAC Management Counters available\n");
1656 }
1657
1658 /**
1659  * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1660  * @priv: driver private structure
1661  * Description: select the Enhanced/Alternate or Normal descriptors.
1662  * In case of Enhanced/Alternate, it checks if the extended descriptors are
1663  * supported by the HW capability register.
1664  */
1665 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1666 {
1667         if (priv->plat->enh_desc) {
1668                 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1669
1670                 /* GMAC older than 3.50 has no extended descriptors */
1671                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1672                         dev_info(priv->device, "Enabled extended descriptors\n");
1673                         priv->extend_desc = 1;
1674                 } else
1675                         dev_warn(priv->device, "Extended descriptors not supported\n");
1676
1677                 priv->hw->desc = &enh_desc_ops;
1678         } else {
1679                 dev_info(priv->device, "Normal descriptors\n");
1680                 priv->hw->desc = &ndesc_ops;
1681         }
1682 }
1683
1684 /**
1685  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1686  * @priv: driver private structure
1687  * Description:
1688  *  new GMAC chip generations have a new register to indicate the
1689  *  presence of the optional feature/functions.
1690  *  This can be also used to override the value passed through the
1691  *  platform and necessary for old MAC10/100 and GMAC chips.
1692  */
1693 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1694 {
1695         u32 ret = 0;
1696
1697         if (priv->hw->dma->get_hw_feature) {
1698                 priv->hw->dma->get_hw_feature(priv->ioaddr,
1699                                               &priv->dma_cap);
1700                 ret = 1;
1701         }
1702
1703         return ret;
1704 }
1705
1706 /**
1707  * stmmac_check_ether_addr - check if the MAC addr is valid
1708  * @priv: driver private structure
1709  * Description:
1710  * it is to verify if the MAC address is valid, in case of failures it
1711  * generates a random MAC address
1712  */
1713 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1714 {
1715         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1716                 priv->hw->mac->get_umac_addr(priv->hw,
1717                                              priv->dev->dev_addr, 0);
1718                 if (!is_valid_ether_addr(priv->dev->dev_addr))
1719                         eth_hw_addr_random(priv->dev);
1720                 netdev_info(priv->dev, "device MAC address %pM\n",
1721                             priv->dev->dev_addr);
1722         }
1723 }
1724
1725 /**
1726  * stmmac_init_dma_engine - DMA init.
1727  * @priv: driver private structure
1728  * Description:
1729  * It inits the DMA invoking the specific MAC/GMAC callback.
1730  * Some DMA parameters can be passed from the platform;
1731  * in case of these are not passed a default is kept for the MAC or GMAC.
1732  */
1733 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1734 {
1735         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1736         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1737         u32 dummy_dma_rx_phy = 0;
1738         u32 dummy_dma_tx_phy = 0;
1739         u32 chan = 0;
1740         int atds = 0;
1741         int ret = 0;
1742
1743         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
1744                 dev_err(priv->device, "Invalid DMA configuration\n");
1745                 return -EINVAL;
1746         }
1747
1748         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1749                 atds = 1;
1750
1751         ret = priv->hw->dma->reset(priv->ioaddr);
1752         if (ret) {
1753                 dev_err(priv->device, "Failed to reset the dma\n");
1754                 return ret;
1755         }
1756
1757         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1758                 /* DMA Configuration */
1759                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1760                                     dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
1761
1762                 /* DMA RX Channel Configuration */
1763                 for (chan = 0; chan < rx_channels_count; chan++) {
1764                         priv->hw->dma->init_rx_chan(priv->ioaddr,
1765                                                     priv->plat->dma_cfg,
1766                                                     priv->dma_rx_phy, chan);
1767
1768                         priv->rx_tail_addr = priv->dma_rx_phy +
1769                                     (DMA_RX_SIZE * sizeof(struct dma_desc));
1770                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
1771                                                        priv->rx_tail_addr,
1772                                                        chan);
1773                 }
1774
1775                 /* DMA TX Channel Configuration */
1776                 for (chan = 0; chan < tx_channels_count; chan++) {
1777                         priv->hw->dma->init_chan(priv->ioaddr,
1778                                                         priv->plat->dma_cfg,
1779                                                         chan);
1780
1781                         priv->hw->dma->init_tx_chan(priv->ioaddr,
1782                                                     priv->plat->dma_cfg,
1783                                                     priv->dma_tx_phy, chan);
1784
1785                         priv->tx_tail_addr = priv->dma_tx_phy +
1786                                     (DMA_TX_SIZE * sizeof(struct dma_desc));
1787                         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
1788                                                        priv->tx_tail_addr,
1789                                                        chan);
1790                 }
1791         } else {
1792                 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1793                                     priv->dma_tx_phy, priv->dma_rx_phy, atds);
1794         }
1795
1796         if (priv->plat->axi && priv->hw->dma->axi)
1797                 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1798
1799         return ret;
1800 }
1801
1802 /**
1803  * stmmac_tx_timer - mitigation sw timer for tx.
1804  * @data: data pointer
1805  * Description:
1806  * This is the timer handler to directly invoke the stmmac_tx_clean.
1807  */
1808 static void stmmac_tx_timer(unsigned long data)
1809 {
1810         struct stmmac_priv *priv = (struct stmmac_priv *)data;
1811
1812         stmmac_tx_clean(priv);
1813 }
1814
1815 /**
1816  * stmmac_init_tx_coalesce - init tx mitigation options.
1817  * @priv: driver private structure
1818  * Description:
1819  * This inits the transmit coalesce parameters: i.e. timer rate,
1820  * timer handler and default threshold used for enabling the
1821  * interrupt on completion bit.
1822  */
1823 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1824 {
1825         priv->tx_coal_frames = STMMAC_TX_FRAMES;
1826         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1827         init_timer(&priv->txtimer);
1828         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1829         priv->txtimer.data = (unsigned long)priv;
1830         priv->txtimer.function = stmmac_tx_timer;
1831         add_timer(&priv->txtimer);
1832 }
1833
1834 static void stmmac_set_rings_length(struct stmmac_priv *priv)
1835 {
1836         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1837         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1838         u32 chan;
1839
1840         /* set TX ring length */
1841         if (priv->hw->dma->set_tx_ring_len) {
1842                 for (chan = 0; chan < tx_channels_count; chan++)
1843                         priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1844                                                        (DMA_TX_SIZE - 1), chan);
1845         }
1846
1847         /* set RX ring length */
1848         if (priv->hw->dma->set_rx_ring_len) {
1849                 for (chan = 0; chan < rx_channels_count; chan++)
1850                         priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1851                                                        (DMA_RX_SIZE - 1), chan);
1852         }
1853 }
1854
1855 /**
1856  *  stmmac_set_tx_queue_weight - Set TX queue weight
1857  *  @priv: driver private structure
1858  *  Description: It is used for setting TX queues weight
1859  */
1860 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
1861 {
1862         u32 tx_queues_count = priv->plat->tx_queues_to_use;
1863         u32 weight;
1864         u32 queue;
1865
1866         for (queue = 0; queue < tx_queues_count; queue++) {
1867                 weight = priv->plat->tx_queues_cfg[queue].weight;
1868                 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
1869         }
1870 }
1871
1872 /**
1873  *  stmmac_configure_cbs - Configure CBS in TX queue
1874  *  @priv: driver private structure
1875  *  Description: It is used for configuring CBS in AVB TX queues
1876  */
1877 static void stmmac_configure_cbs(struct stmmac_priv *priv)
1878 {
1879         u32 tx_queues_count = priv->plat->tx_queues_to_use;
1880         u32 mode_to_use;
1881         u32 queue;
1882
1883         /* queue 0 is reserved for legacy traffic */
1884         for (queue = 1; queue < tx_queues_count; queue++) {
1885                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
1886                 if (mode_to_use == MTL_QUEUE_DCB)
1887                         continue;
1888
1889                 priv->hw->mac->config_cbs(priv->hw,
1890                                 priv->plat->tx_queues_cfg[queue].send_slope,
1891                                 priv->plat->tx_queues_cfg[queue].idle_slope,
1892                                 priv->plat->tx_queues_cfg[queue].high_credit,
1893                                 priv->plat->tx_queues_cfg[queue].low_credit,
1894                                 queue);
1895         }
1896 }
1897
1898 /**
1899  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
1900  *  @priv: driver private structure
1901  *  Description: It is used for mapping RX queues to RX dma channels
1902  */
1903 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
1904 {
1905         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1906         u32 queue;
1907         u32 chan;
1908
1909         for (queue = 0; queue < rx_queues_count; queue++) {
1910                 chan = priv->plat->rx_queues_cfg[queue].chan;
1911                 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
1912         }
1913 }
1914
1915 /**
1916  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
1917  *  @priv: driver private structure
1918  *  Description: It is used for configuring the RX Queue Priority
1919  */
1920 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
1921 {
1922         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1923         u32 queue;
1924         u32 prio;
1925
1926         for (queue = 0; queue < rx_queues_count; queue++) {
1927                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
1928                         continue;
1929
1930                 prio = priv->plat->rx_queues_cfg[queue].prio;
1931                 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
1932         }
1933 }
1934
1935 /**
1936  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
1937  *  @priv: driver private structure
1938  *  Description: It is used for configuring the TX Queue Priority
1939  */
1940 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
1941 {
1942         u32 tx_queues_count = priv->plat->tx_queues_to_use;
1943         u32 queue;
1944         u32 prio;
1945
1946         for (queue = 0; queue < tx_queues_count; queue++) {
1947                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
1948                         continue;
1949
1950                 prio = priv->plat->tx_queues_cfg[queue].prio;
1951                 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
1952         }
1953 }
1954
1955 /**
1956  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
1957  *  @priv: driver private structure
1958  *  Description: It is used for configuring the RX queue routing
1959  */
1960 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
1961 {
1962         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1963         u32 queue;
1964         u8 packet;
1965
1966         for (queue = 0; queue < rx_queues_count; queue++) {
1967                 /* no specific packet type routing specified for the queue */
1968                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
1969                         continue;
1970
1971                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
1972                 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
1973         }
1974 }
1975
1976 /**
1977  *  stmmac_mtl_configuration - Configure MTL
1978  *  @priv: driver private structure
1979  *  Description: It is used for configurring MTL
1980  */
1981 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
1982 {
1983         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1984         u32 tx_queues_count = priv->plat->tx_queues_to_use;
1985
1986         if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
1987                 stmmac_set_tx_queue_weight(priv);
1988
1989         /* Configure MTL RX algorithms */
1990         if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
1991                 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
1992                                                 priv->plat->rx_sched_algorithm);
1993
1994         /* Configure MTL TX algorithms */
1995         if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
1996                 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
1997                                                 priv->plat->tx_sched_algorithm);
1998
1999         /* Configure CBS in AVB TX queues */
2000         if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2001                 stmmac_configure_cbs(priv);
2002
2003         /* Map RX MTL to DMA channels */
2004         if (rx_queues_count > 1 && priv->hw->mac->map_mtl_to_dma)
2005                 stmmac_rx_queue_dma_chan_map(priv);
2006
2007         /* Enable MAC RX Queues */
2008         if (priv->hw->mac->rx_queue_enable)
2009                 stmmac_mac_enable_rx_queues(priv);
2010
2011         /* Set RX priorities */
2012         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2013                 stmmac_mac_config_rx_queues_prio(priv);
2014
2015         /* Set TX priorities */
2016         if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2017                 stmmac_mac_config_tx_queues_prio(priv);
2018
2019         /* Set RX routing */
2020         if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2021                 stmmac_mac_config_rx_queues_routing(priv);
2022 }
2023
2024 /**
2025  * stmmac_hw_setup - setup mac in a usable state.
2026  *  @dev : pointer to the device structure.
2027  *  Description:
2028  *  this is the main function to setup the HW in a usable state because the
2029  *  dma engine is reset, the core registers are configured (e.g. AXI,
2030  *  Checksum features, timers). The DMA is ready to start receiving and
2031  *  transmitting.
2032  *  Return value:
2033  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2034  *  file on failure.
2035  */
2036 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2037 {
2038         struct stmmac_priv *priv = netdev_priv(dev);
2039         u32 rx_cnt = priv->plat->rx_queues_to_use;
2040         u32 tx_cnt = priv->plat->tx_queues_to_use;
2041         u32 chan;
2042         int ret;
2043
2044         /* DMA initialization and SW reset */
2045         ret = stmmac_init_dma_engine(priv);
2046         if (ret < 0) {
2047                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2048                            __func__);
2049                 return ret;
2050         }
2051
2052         /* Copy the MAC addr into the HW  */
2053         priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2054
2055         /* PS and related bits will be programmed according to the speed */
2056         if (priv->hw->pcs) {
2057                 int speed = priv->plat->mac_port_sel_speed;
2058
2059                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2060                     (speed == SPEED_1000)) {
2061                         priv->hw->ps = speed;
2062                 } else {
2063                         dev_warn(priv->device, "invalid port speed\n");
2064                         priv->hw->ps = 0;
2065                 }
2066         }
2067
2068         /* Initialize the MAC Core */
2069         priv->hw->mac->core_init(priv->hw, dev->mtu);
2070
2071         /* Initialize MTL*/
2072         if (priv->synopsys_id >= DWMAC_CORE_4_00)
2073                 stmmac_mtl_configuration(priv);
2074
2075         ret = priv->hw->mac->rx_ipc(priv->hw);
2076         if (!ret) {
2077                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2078                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2079                 priv->hw->rx_csum = 0;
2080         }
2081
2082         /* Enable the MAC Rx/Tx */
2083         priv->hw->mac->set_mac(priv->ioaddr, true);
2084
2085         /* Set the HW DMA mode and the COE */
2086         stmmac_dma_operation_mode(priv);
2087
2088         stmmac_mmc_setup(priv);
2089
2090         if (init_ptp) {
2091                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2092                 if (ret < 0)
2093                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2094
2095                 ret = stmmac_init_ptp(priv);
2096                 if (ret == -EOPNOTSUPP)
2097                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2098                 else if (ret)
2099                         netdev_warn(priv->dev, "PTP init failed\n");
2100         }
2101
2102 #ifdef CONFIG_DEBUG_FS
2103         ret = stmmac_init_fs(dev);
2104         if (ret < 0)
2105                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2106                             __func__);
2107 #endif
2108         /* Start the ball rolling... */
2109         stmmac_start_all_dma(priv);
2110
2111         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2112
2113         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2114                 priv->rx_riwt = MAX_DMA_RIWT;
2115                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2116         }
2117
2118         if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2119                 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2120
2121         /* set TX and RX rings length */
2122         stmmac_set_rings_length(priv);
2123
2124         /* Enable TSO */
2125         if (priv->tso) {
2126                 for (chan = 0; chan < tx_cnt; chan++)
2127                         priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2128         }
2129
2130         return 0;
2131 }
2132
2133 static void stmmac_hw_teardown(struct net_device *dev)
2134 {
2135         struct stmmac_priv *priv = netdev_priv(dev);
2136
2137         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2138 }
2139
2140 /**
2141  *  stmmac_open - open entry point of the driver
2142  *  @dev : pointer to the device structure.
2143  *  Description:
2144  *  This function is the open entry point of the driver.
2145  *  Return value:
2146  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2147  *  file on failure.
2148  */
2149 static int stmmac_open(struct net_device *dev)
2150 {
2151         struct stmmac_priv *priv = netdev_priv(dev);
2152         int ret;
2153
2154         stmmac_check_ether_addr(priv);
2155
2156         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2157             priv->hw->pcs != STMMAC_PCS_TBI &&
2158             priv->hw->pcs != STMMAC_PCS_RTBI) {
2159                 ret = stmmac_init_phy(dev);
2160                 if (ret) {
2161                         netdev_err(priv->dev,
2162                                    "%s: Cannot attach to PHY (error: %d)\n",
2163                                    __func__, ret);
2164                         return ret;
2165                 }
2166         }
2167
2168         /* Extra statistics */
2169         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2170         priv->xstats.threshold = tc;
2171
2172         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2173         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2174
2175         ret = alloc_dma_desc_resources(priv);
2176         if (ret < 0) {
2177                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2178                            __func__);
2179                 goto dma_desc_error;
2180         }
2181
2182         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2183         if (ret < 0) {
2184                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2185                            __func__);
2186                 goto init_error;
2187         }
2188
2189         ret = stmmac_hw_setup(dev, true);
2190         if (ret < 0) {
2191                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2192                 goto init_error;
2193         }
2194
2195         stmmac_init_tx_coalesce(priv);
2196
2197         if (dev->phydev)
2198                 phy_start(dev->phydev);
2199
2200         /* Request the IRQ lines */
2201         ret = request_irq(dev->irq, stmmac_interrupt,
2202                           IRQF_SHARED, dev->name, dev);
2203         if (unlikely(ret < 0)) {
2204                 netdev_err(priv->dev,
2205                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2206                            __func__, dev->irq, ret);
2207                 goto irq_error;
2208         }
2209
2210         /* Request the Wake IRQ in case of another line is used for WoL */
2211         if (priv->wol_irq != dev->irq) {
2212                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2213                                   IRQF_SHARED, dev->name, dev);
2214                 if (unlikely(ret < 0)) {
2215                         netdev_err(priv->dev,
2216                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2217                                    __func__, priv->wol_irq, ret);
2218                         goto wolirq_error;
2219                 }
2220         }
2221
2222         /* Request the IRQ lines */
2223         if (priv->lpi_irq > 0) {
2224                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2225                                   dev->name, dev);
2226                 if (unlikely(ret < 0)) {
2227                         netdev_err(priv->dev,
2228                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2229                                    __func__, priv->lpi_irq, ret);
2230                         goto lpiirq_error;
2231                 }
2232         }
2233
2234         napi_enable(&priv->napi);
2235         netif_start_queue(dev);
2236
2237         return 0;
2238
2239 lpiirq_error:
2240         if (priv->wol_irq != dev->irq)
2241                 free_irq(priv->wol_irq, dev);
2242 wolirq_error:
2243         free_irq(dev->irq, dev);
2244 irq_error:
2245         if (dev->phydev)
2246                 phy_stop(dev->phydev);
2247
2248         del_timer_sync(&priv->txtimer);
2249         stmmac_hw_teardown(dev);
2250 init_error:
2251         free_dma_desc_resources(priv);
2252 dma_desc_error:
2253         if (dev->phydev)
2254                 phy_disconnect(dev->phydev);
2255
2256         return ret;
2257 }
2258
2259 /**
2260  *  stmmac_release - close entry point of the driver
2261  *  @dev : device pointer.
2262  *  Description:
2263  *  This is the stop entry point of the driver.
2264  */
2265 static int stmmac_release(struct net_device *dev)
2266 {
2267         struct stmmac_priv *priv = netdev_priv(dev);
2268
2269         if (priv->eee_enabled)
2270                 del_timer_sync(&priv->eee_ctrl_timer);
2271
2272         /* Stop and disconnect the PHY */
2273         if (dev->phydev) {
2274                 phy_stop(dev->phydev);
2275                 phy_disconnect(dev->phydev);
2276         }
2277
2278         netif_stop_queue(dev);
2279
2280         napi_disable(&priv->napi);
2281
2282         del_timer_sync(&priv->txtimer);
2283
2284         /* Free the IRQ lines */
2285         free_irq(dev->irq, dev);
2286         if (priv->wol_irq != dev->irq)
2287                 free_irq(priv->wol_irq, dev);
2288         if (priv->lpi_irq > 0)
2289                 free_irq(priv->lpi_irq, dev);
2290
2291         /* Stop TX/RX DMA and clear the descriptors */
2292         stmmac_stop_all_dma(priv);
2293
2294         /* Release and free the Rx/Tx resources */
2295         free_dma_desc_resources(priv);
2296
2297         /* Disable the MAC Rx/Tx */
2298         priv->hw->mac->set_mac(priv->ioaddr, false);
2299
2300         netif_carrier_off(dev);
2301
2302 #ifdef CONFIG_DEBUG_FS
2303         stmmac_exit_fs(dev);
2304 #endif
2305
2306         stmmac_release_ptp(priv);
2307
2308         return 0;
2309 }
2310
2311 /**
2312  *  stmmac_tso_allocator - close entry point of the driver
2313  *  @priv: driver private structure
2314  *  @des: buffer start address
2315  *  @total_len: total length to fill in descriptors
2316  *  @last_segmant: condition for the last descriptor
2317  *  Description:
2318  *  This function fills descriptor and request new descriptors according to
2319  *  buffer length to fill
2320  */
2321 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2322                                  int total_len, bool last_segment)
2323 {
2324         struct dma_desc *desc;
2325         int tmp_len;
2326         u32 buff_size;
2327
2328         tmp_len = total_len;
2329
2330         while (tmp_len > 0) {
2331                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2332                 desc = priv->dma_tx + priv->cur_tx;
2333
2334                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2335                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2336                             TSO_MAX_BUFF_SIZE : tmp_len;
2337
2338                 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2339                         0, 1,
2340                         (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2341                         0, 0);
2342
2343                 tmp_len -= TSO_MAX_BUFF_SIZE;
2344         }
2345 }
2346
2347 /**
2348  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2349  *  @skb : the socket buffer
2350  *  @dev : device pointer
2351  *  Description: this is the transmit function that is called on TSO frames
2352  *  (support available on GMAC4 and newer chips).
2353  *  Diagram below show the ring programming in case of TSO frames:
2354  *
2355  *  First Descriptor
2356  *   --------
2357  *   | DES0 |---> buffer1 = L2/L3/L4 header
2358  *   | DES1 |---> TCP Payload (can continue on next descr...)
2359  *   | DES2 |---> buffer 1 and 2 len
2360  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2361  *   --------
2362  *      |
2363  *     ...
2364  *      |
2365  *   --------
2366  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2367  *   | DES1 | --|
2368  *   | DES2 | --> buffer 1 and 2 len
2369  *   | DES3 |
2370  *   --------
2371  *
2372  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2373  */
2374 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2375 {
2376         u32 pay_len, mss;
2377         int tmp_pay_len = 0;
2378         struct stmmac_priv *priv = netdev_priv(dev);
2379         int nfrags = skb_shinfo(skb)->nr_frags;
2380         unsigned int first_entry, des;
2381         struct dma_desc *desc, *first, *mss_desc = NULL;
2382         u8 proto_hdr_len;
2383         int i;
2384
2385         /* Compute header lengths */
2386         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2387
2388         /* Desc availability based on threshold should be enough safe */
2389         if (unlikely(stmmac_tx_avail(priv) <
2390                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2391                 if (!netif_queue_stopped(dev)) {
2392                         netif_stop_queue(dev);
2393                         /* This is a hard error, log it. */
2394                         netdev_err(priv->dev,
2395                                    "%s: Tx Ring full when queue awake\n",
2396                                    __func__);
2397                 }
2398                 return NETDEV_TX_BUSY;
2399         }
2400
2401         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2402
2403         mss = skb_shinfo(skb)->gso_size;
2404
2405         /* set new MSS value if needed */
2406         if (mss != priv->mss) {
2407                 mss_desc = priv->dma_tx + priv->cur_tx;
2408                 priv->hw->desc->set_mss(mss_desc, mss);
2409                 priv->mss = mss;
2410                 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2411         }
2412
2413         if (netif_msg_tx_queued(priv)) {
2414                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2415                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2416                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2417                         skb->data_len);
2418         }
2419
2420         first_entry = priv->cur_tx;
2421
2422         desc = priv->dma_tx + first_entry;
2423         first = desc;
2424
2425         /* first descriptor: fill Headers on Buf1 */
2426         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2427                              DMA_TO_DEVICE);
2428         if (dma_mapping_error(priv->device, des))
2429                 goto dma_map_err;
2430
2431         priv->tx_skbuff_dma[first_entry].buf = des;
2432         priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2433         priv->tx_skbuff[first_entry] = skb;
2434
2435         first->des0 = cpu_to_le32(des);
2436
2437         /* Fill start of payload in buff2 of first descriptor */
2438         if (pay_len)
2439                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2440
2441         /* If needed take extra descriptors to fill the remaining payload */
2442         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2443
2444         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2445
2446         /* Prepare fragments */
2447         for (i = 0; i < nfrags; i++) {
2448                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2449
2450                 des = skb_frag_dma_map(priv->device, frag, 0,
2451                                        skb_frag_size(frag),
2452                                        DMA_TO_DEVICE);
2453                 if (dma_mapping_error(priv->device, des))
2454                         goto dma_map_err;
2455
2456                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2457                                      (i == nfrags - 1));
2458
2459                 priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2460                 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2461                 priv->tx_skbuff[priv->cur_tx] = NULL;
2462                 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2463         }
2464
2465         priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2466
2467         priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2468
2469         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2470                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2471                           __func__);
2472                 netif_stop_queue(dev);
2473         }
2474
2475         dev->stats.tx_bytes += skb->len;
2476         priv->xstats.tx_tso_frames++;
2477         priv->xstats.tx_tso_nfrags += nfrags;
2478
2479         /* Manage tx mitigation */
2480         priv->tx_count_frames += nfrags + 1;
2481         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2482                 mod_timer(&priv->txtimer,
2483                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2484         } else {
2485                 priv->tx_count_frames = 0;
2486                 priv->hw->desc->set_tx_ic(desc);
2487                 priv->xstats.tx_set_ic_bit++;
2488         }
2489
2490         if (!priv->hwts_tx_en)
2491                 skb_tx_timestamp(skb);
2492
2493         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2494                      priv->hwts_tx_en)) {
2495                 /* declare that device is doing timestamping */
2496                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2497                 priv->hw->desc->enable_tx_timestamp(first);
2498         }
2499
2500         /* Complete the first descriptor before granting the DMA */
2501         priv->hw->desc->prepare_tso_tx_desc(first, 1,
2502                         proto_hdr_len,
2503                         pay_len,
2504                         1, priv->tx_skbuff_dma[first_entry].last_segment,
2505                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2506
2507         /* If context desc is used to change MSS */
2508         if (mss_desc)
2509                 priv->hw->desc->set_tx_owner(mss_desc);
2510
2511         /* The own bit must be the latest setting done when prepare the
2512          * descriptor and then barrier is needed to make sure that
2513          * all is coherent before granting the DMA engine.
2514          */
2515         dma_wmb();
2516
2517         if (netif_msg_pktdata(priv)) {
2518                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2519                         __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2520                         priv->cur_tx, first, nfrags);
2521
2522                 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2523                                              0);
2524
2525                 pr_info(">>> frame to be transmitted: ");
2526                 print_pkt(skb->data, skb_headlen(skb));
2527         }
2528
2529         netdev_sent_queue(dev, skb->len);
2530
2531         priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2532                                        STMMAC_CHAN0);
2533
2534         return NETDEV_TX_OK;
2535
2536 dma_map_err:
2537         dev_err(priv->device, "Tx dma map failed\n");
2538         dev_kfree_skb(skb);
2539         priv->dev->stats.tx_dropped++;
2540         return NETDEV_TX_OK;
2541 }
2542
2543 /**
2544  *  stmmac_xmit - Tx entry point of the driver
2545  *  @skb : the socket buffer
2546  *  @dev : device pointer
2547  *  Description : this is the tx entry point of the driver.
2548  *  It programs the chain or the ring and supports oversized frames
2549  *  and SG feature.
2550  */
2551 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2552 {
2553         struct stmmac_priv *priv = netdev_priv(dev);
2554         unsigned int nopaged_len = skb_headlen(skb);
2555         int i, csum_insertion = 0, is_jumbo = 0;
2556         int nfrags = skb_shinfo(skb)->nr_frags;
2557         unsigned int entry, first_entry;
2558         struct dma_desc *desc, *first;
2559         unsigned int enh_desc;
2560         unsigned int des;
2561
2562         /* Manage oversized TCP frames for GMAC4 device */
2563         if (skb_is_gso(skb) && priv->tso) {
2564                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2565                         return stmmac_tso_xmit(skb, dev);
2566         }
2567
2568         if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
2569                 if (!netif_queue_stopped(dev)) {
2570                         netif_stop_queue(dev);
2571                         /* This is a hard error, log it. */
2572                         netdev_err(priv->dev,
2573                                    "%s: Tx Ring full when queue awake\n",
2574                                    __func__);
2575                 }
2576                 return NETDEV_TX_BUSY;
2577         }
2578
2579         if (priv->tx_path_in_lpi_mode)
2580                 stmmac_disable_eee_mode(priv);
2581
2582         entry = priv->cur_tx;
2583         first_entry = entry;
2584
2585         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2586
2587         if (likely(priv->extend_desc))
2588                 desc = (struct dma_desc *)(priv->dma_etx + entry);
2589         else
2590                 desc = priv->dma_tx + entry;
2591
2592         first = desc;
2593
2594         priv->tx_skbuff[first_entry] = skb;
2595
2596         enh_desc = priv->plat->enh_desc;
2597         /* To program the descriptors according to the size of the frame */
2598         if (enh_desc)
2599                 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2600
2601         if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2602                                          DWMAC_CORE_4_00)) {
2603                 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
2604                 if (unlikely(entry < 0))
2605                         goto dma_map_err;
2606         }
2607
2608         for (i = 0; i < nfrags; i++) {
2609                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2610                 int len = skb_frag_size(frag);
2611                 bool last_segment = (i == (nfrags - 1));
2612
2613                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2614
2615                 if (likely(priv->extend_desc))
2616                         desc = (struct dma_desc *)(priv->dma_etx + entry);
2617                 else
2618                         desc = priv->dma_tx + entry;
2619
2620                 des = skb_frag_dma_map(priv->device, frag, 0, len,
2621                                        DMA_TO_DEVICE);
2622                 if (dma_mapping_error(priv->device, des))
2623                         goto dma_map_err; /* should reuse desc w/o issues */
2624
2625                 priv->tx_skbuff[entry] = NULL;
2626
2627                 priv->tx_skbuff_dma[entry].buf = des;
2628                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2629                         desc->des0 = cpu_to_le32(des);
2630                 else
2631                         desc->des2 = cpu_to_le32(des);
2632
2633                 priv->tx_skbuff_dma[entry].map_as_page = true;
2634                 priv->tx_skbuff_dma[entry].len = len;
2635                 priv->tx_skbuff_dma[entry].last_segment = last_segment;
2636
2637                 /* Prepare the descriptor and set the own bit too */
2638                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2639                                                 priv->mode, 1, last_segment);
2640         }
2641
2642         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2643
2644         priv->cur_tx = entry;
2645
2646         if (netif_msg_pktdata(priv)) {
2647                 void *tx_head;
2648
2649                 netdev_dbg(priv->dev,
2650                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2651                            __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2652                            entry, first, nfrags);
2653
2654                 if (priv->extend_desc)
2655                         tx_head = (void *)priv->dma_etx;
2656                 else
2657                         tx_head = (void *)priv->dma_tx;
2658
2659                 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2660
2661                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
2662                 print_pkt(skb->data, skb->len);
2663         }
2664
2665         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2666                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2667                           __func__);
2668                 netif_stop_queue(dev);
2669         }
2670
2671         dev->stats.tx_bytes += skb->len;
2672
2673         /* According to the coalesce parameter the IC bit for the latest
2674          * segment is reset and the timer re-started to clean the tx status.
2675          * This approach takes care about the fragments: desc is the first
2676          * element in case of no SG.
2677          */
2678         priv->tx_count_frames += nfrags + 1;
2679         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2680                 mod_timer(&priv->txtimer,
2681                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2682         } else {
2683                 priv->tx_count_frames = 0;
2684                 priv->hw->desc->set_tx_ic(desc);
2685                 priv->xstats.tx_set_ic_bit++;
2686         }
2687
2688         if (!priv->hwts_tx_en)
2689                 skb_tx_timestamp(skb);
2690
2691         /* Ready to fill the first descriptor and set the OWN bit w/o any
2692          * problems because all the descriptors are actually ready to be
2693          * passed to the DMA engine.
2694          */
2695         if (likely(!is_jumbo)) {
2696                 bool last_segment = (nfrags == 0);
2697
2698                 des = dma_map_single(priv->device, skb->data,
2699                                      nopaged_len, DMA_TO_DEVICE);
2700                 if (dma_mapping_error(priv->device, des))
2701                         goto dma_map_err;
2702
2703                 priv->tx_skbuff_dma[first_entry].buf = des;
2704                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2705                         first->des0 = cpu_to_le32(des);
2706                 else
2707                         first->des2 = cpu_to_le32(des);
2708
2709                 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2710                 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2711
2712                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2713                              priv->hwts_tx_en)) {
2714                         /* declare that device is doing timestamping */
2715                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2716                         priv->hw->desc->enable_tx_timestamp(first);
2717                 }
2718
2719                 /* Prepare the first descriptor setting the OWN bit too */
2720                 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
2721                                                 csum_insertion, priv->mode, 1,
2722                                                 last_segment);
2723
2724                 /* The own bit must be the latest setting done when prepare the
2725                  * descriptor and then barrier is needed to make sure that
2726                  * all is coherent before granting the DMA engine.
2727                  */
2728                 dma_wmb();
2729         }
2730
2731         netdev_sent_queue(dev, skb->len);
2732
2733         if (priv->synopsys_id < DWMAC_CORE_4_00)
2734                 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2735         else
2736                 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2737                                                STMMAC_CHAN0);
2738
2739         return NETDEV_TX_OK;
2740
2741 dma_map_err:
2742         netdev_err(priv->dev, "Tx DMA map failed\n");
2743         dev_kfree_skb(skb);
2744         priv->dev->stats.tx_dropped++;
2745         return NETDEV_TX_OK;
2746 }
2747
2748 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2749 {
2750         struct ethhdr *ehdr;
2751         u16 vlanid;
2752
2753         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2754             NETIF_F_HW_VLAN_CTAG_RX &&
2755             !__vlan_get_tag(skb, &vlanid)) {
2756                 /* pop the vlan tag */
2757                 ehdr = (struct ethhdr *)skb->data;
2758                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2759                 skb_pull(skb, VLAN_HLEN);
2760                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2761         }
2762 }
2763
2764
2765 static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2766 {
2767         if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
2768                 return 0;
2769
2770         return 1;
2771 }
2772
2773 /**
2774  * stmmac_rx_refill - refill used skb preallocated buffers
2775  * @priv: driver private structure
2776  * Description : this is to reallocate the skb for the reception process
2777  * that is based on zero-copy.
2778  */
2779 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2780 {
2781         int bfsize = priv->dma_buf_sz;
2782         unsigned int entry = priv->dirty_rx;
2783         int dirty = stmmac_rx_dirty(priv);
2784
2785         while (dirty-- > 0) {
2786                 struct dma_desc *p;
2787
2788                 if (priv->extend_desc)
2789                         p = (struct dma_desc *)(priv->dma_erx + entry);
2790                 else
2791                         p = priv->dma_rx + entry;
2792
2793                 if (likely(priv->rx_skbuff[entry] == NULL)) {
2794                         struct sk_buff *skb;
2795
2796                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2797                         if (unlikely(!skb)) {
2798                                 /* so for a while no zero-copy! */
2799                                 priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
2800                                 if (unlikely(net_ratelimit()))
2801                                         dev_err(priv->device,
2802                                                 "fail to alloc skb entry %d\n",
2803                                                 entry);
2804                                 break;
2805                         }
2806
2807                         priv->rx_skbuff[entry] = skb;
2808                         priv->rx_skbuff_dma[entry] =
2809                             dma_map_single(priv->device, skb->data, bfsize,
2810                                            DMA_FROM_DEVICE);
2811                         if (dma_mapping_error(priv->device,
2812                                               priv->rx_skbuff_dma[entry])) {
2813                                 netdev_err(priv->dev, "Rx DMA map failed\n");
2814                                 dev_kfree_skb(skb);
2815                                 break;
2816                         }
2817
2818                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2819                                 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2820                                 p->des1 = 0;
2821                         } else {
2822                                 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2823                         }
2824                         if (priv->hw->mode->refill_desc3)
2825                                 priv->hw->mode->refill_desc3(priv, p);
2826
2827                         if (priv->rx_zeroc_thresh > 0)
2828                                 priv->rx_zeroc_thresh--;
2829
2830                         netif_dbg(priv, rx_status, priv->dev,
2831                                   "refill entry #%d\n", entry);
2832                 }
2833                 dma_wmb();
2834
2835                 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2836                         priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2837                 else
2838                         priv->hw->desc->set_rx_owner(p);
2839
2840                 dma_wmb();
2841
2842                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2843         }
2844         priv->dirty_rx = entry;
2845 }
2846
2847 /**
2848  * stmmac_rx - manage the receive process
2849  * @priv: driver private structure
2850  * @limit: napi bugget.
2851  * Description :  this the function called by the napi poll method.
2852  * It gets all the frames inside the ring.
2853  */
2854 static int stmmac_rx(struct stmmac_priv *priv, int limit)
2855 {
2856         unsigned int entry = priv->cur_rx;
2857         unsigned int next_entry;
2858         unsigned int count = 0;
2859         int coe = priv->hw->rx_csum;
2860
2861         if (netif_msg_rx_status(priv)) {
2862                 void *rx_head;
2863
2864                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
2865                 if (priv->extend_desc)
2866                         rx_head = (void *)priv->dma_erx;
2867                 else
2868                         rx_head = (void *)priv->dma_rx;
2869
2870                 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2871         }
2872         while (count < limit) {
2873                 int status;
2874                 struct dma_desc *p;
2875                 struct dma_desc *np;
2876
2877                 if (priv->extend_desc)
2878                         p = (struct dma_desc *)(priv->dma_erx + entry);
2879                 else
2880                         p = priv->dma_rx + entry;
2881
2882                 /* read the status of the incoming frame */
2883                 status = priv->hw->desc->rx_status(&priv->dev->stats,
2884                                                    &priv->xstats, p);
2885                 /* check if managed by the DMA otherwise go ahead */
2886                 if (unlikely(status & dma_own))
2887                         break;
2888
2889                 count++;
2890
2891                 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
2892                 next_entry = priv->cur_rx;
2893
2894                 if (priv->extend_desc)
2895                         np = (struct dma_desc *)(priv->dma_erx + next_entry);
2896                 else
2897                         np = priv->dma_rx + next_entry;
2898
2899                 prefetch(np);
2900
2901                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2902                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
2903                                                            &priv->xstats,
2904                                                            priv->dma_erx +
2905                                                            entry);
2906                 if (unlikely(status == discard_frame)) {
2907                         priv->dev->stats.rx_errors++;
2908                         if (priv->hwts_rx_en && !priv->extend_desc) {
2909                                 /* DESC2 & DESC3 will be overwritten by device
2910                                  * with timestamp value, hence reinitialize
2911                                  * them in stmmac_rx_refill() function so that
2912                                  * device can reuse it.
2913                                  */
2914                                 priv->rx_skbuff[entry] = NULL;
2915                                 dma_unmap_single(priv->device,
2916                                                  priv->rx_skbuff_dma[entry],
2917                                                  priv->dma_buf_sz,
2918                                                  DMA_FROM_DEVICE);
2919                         }
2920                 } else {
2921                         struct sk_buff *skb;
2922                         int frame_len;
2923                         unsigned int des;
2924
2925                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2926                                 des = le32_to_cpu(p->des0);
2927                         else
2928                                 des = le32_to_cpu(p->des2);
2929
2930                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2931
2932                         /*  If frame length is greater than skb buffer size
2933                          *  (preallocated during init) then the packet is
2934                          *  ignored
2935                          */
2936                         if (frame_len > priv->dma_buf_sz) {
2937                                 netdev_err(priv->dev,
2938                                            "len %d larger than size (%d)\n",
2939                                            frame_len, priv->dma_buf_sz);
2940                                 priv->dev->stats.rx_length_errors++;
2941                                 break;
2942                         }
2943
2944                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2945                          * Type frames (LLC/LLC-SNAP)
2946                          */
2947                         if (unlikely(status != llc_snap))
2948                                 frame_len -= ETH_FCS_LEN;
2949
2950                         if (netif_msg_rx_status(priv)) {
2951                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
2952                                            p, entry, des);
2953                                 if (frame_len > ETH_FRAME_LEN)
2954                                         netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
2955                                                    frame_len, status);
2956                         }
2957
2958                         /* The zero-copy is always used for all the sizes
2959                          * in case of GMAC4 because it needs
2960                          * to refill the used descriptors, always.
2961                          */
2962                         if (unlikely(!priv->plat->has_gmac4 &&
2963                                      ((frame_len < priv->rx_copybreak) ||
2964                                      stmmac_rx_threshold_count(priv)))) {
2965                                 skb = netdev_alloc_skb_ip_align(priv->dev,
2966                                                                 frame_len);
2967                                 if (unlikely(!skb)) {
2968                                         if (net_ratelimit())
2969                                                 dev_warn(priv->device,
2970                                                          "packet dropped\n");
2971                                         priv->dev->stats.rx_dropped++;
2972                                         break;
2973                                 }
2974
2975                                 dma_sync_single_for_cpu(priv->device,
2976                                                         priv->rx_skbuff_dma
2977                                                         [entry], frame_len,
2978                                                         DMA_FROM_DEVICE);
2979                                 skb_copy_to_linear_data(skb,
2980                                                         priv->
2981                                                         rx_skbuff[entry]->data,
2982                                                         frame_len);
2983
2984                                 skb_put(skb, frame_len);
2985                                 dma_sync_single_for_device(priv->device,
2986                                                            priv->rx_skbuff_dma
2987                                                            [entry], frame_len,
2988                                                            DMA_FROM_DEVICE);
2989                         } else {
2990                                 skb = priv->rx_skbuff[entry];
2991                                 if (unlikely(!skb)) {
2992                                         netdev_err(priv->dev,
2993                                                    "%s: Inconsistent Rx chain\n",
2994                                                    priv->dev->name);
2995                                         priv->dev->stats.rx_dropped++;
2996                                         break;
2997                                 }
2998                                 prefetch(skb->data - NET_IP_ALIGN);
2999                                 priv->rx_skbuff[entry] = NULL;
3000                                 priv->rx_zeroc_thresh++;
3001
3002                                 skb_put(skb, frame_len);
3003                                 dma_unmap_single(priv->device,
3004                                                  priv->rx_skbuff_dma[entry],
3005                                                  priv->dma_buf_sz,
3006                                                  DMA_FROM_DEVICE);
3007                         }
3008
3009                         if (netif_msg_pktdata(priv)) {
3010                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3011                                            frame_len);
3012                                 print_pkt(skb->data, frame_len);
3013                         }
3014
3015                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3016
3017                         stmmac_rx_vlan(priv->dev, skb);
3018
3019                         skb->protocol = eth_type_trans(skb, priv->dev);
3020
3021                         if (unlikely(!coe))
3022                                 skb_checksum_none_assert(skb);
3023                         else
3024                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3025
3026                         napi_gro_receive(&priv->napi, skb);
3027
3028                         priv->dev->stats.rx_packets++;
3029                         priv->dev->stats.rx_bytes += frame_len;
3030                 }
3031                 entry = next_entry;
3032         }
3033
3034         stmmac_rx_refill(priv);
3035
3036         priv->xstats.rx_pkt_n += count;
3037
3038         return count;
3039 }
3040
3041 /**
3042  *  stmmac_poll - stmmac poll method (NAPI)
3043  *  @napi : pointer to the napi structure.
3044  *  @budget : maximum number of packets that the current CPU can receive from
3045  *            all interfaces.
3046  *  Description :
3047  *  To look at the incoming frames and clear the tx resources.
3048  */
3049 static int stmmac_poll(struct napi_struct *napi, int budget)
3050 {
3051         struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
3052         int work_done = 0;
3053         u32 chan = STMMAC_CHAN0;
3054
3055         priv->xstats.napi_poll++;
3056         stmmac_tx_clean(priv);
3057
3058         work_done = stmmac_rx(priv, budget);
3059         if (work_done < budget) {
3060                 napi_complete_done(napi, work_done);
3061                 stmmac_enable_dma_irq(priv, chan);
3062         }
3063         return work_done;
3064 }
3065
3066 /**
3067  *  stmmac_tx_timeout
3068  *  @dev : Pointer to net device structure
3069  *  Description: this function is called when a packet transmission fails to
3070  *   complete within a reasonable time. The driver will mark the error in the
3071  *   netdev structure and arrange for the device to be reset to a sane state
3072  *   in order to transmit a new packet.
3073  */
3074 static void stmmac_tx_timeout(struct net_device *dev)
3075 {
3076         struct stmmac_priv *priv = netdev_priv(dev);
3077         u32 chan = STMMAC_CHAN0;
3078
3079         /* Clear Tx resources and restart transmitting again */
3080         stmmac_tx_err(priv, chan);
3081 }
3082
3083 /**
3084  *  stmmac_set_rx_mode - entry point for multicast addressing
3085  *  @dev : pointer to the device structure
3086  *  Description:
3087  *  This function is a driver entry point which gets called by the kernel
3088  *  whenever multicast addresses must be enabled/disabled.
3089  *  Return value:
3090  *  void.
3091  */
3092 static void stmmac_set_rx_mode(struct net_device *dev)
3093 {
3094         struct stmmac_priv *priv = netdev_priv(dev);
3095
3096         priv->hw->mac->set_filter(priv->hw, dev);
3097 }
3098
3099 /**
3100  *  stmmac_change_mtu - entry point to change MTU size for the device.
3101  *  @dev : device pointer.
3102  *  @new_mtu : the new MTU size for the device.
3103  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3104  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3105  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3106  *  Return value:
3107  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3108  *  file on failure.
3109  */
3110 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3111 {
3112         struct stmmac_priv *priv = netdev_priv(dev);
3113
3114         if (netif_running(dev)) {
3115                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3116                 return -EBUSY;
3117         }
3118
3119         dev->mtu = new_mtu;
3120
3121         netdev_update_features(dev);
3122
3123         return 0;
3124 }
3125
3126 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3127                                              netdev_features_t features)
3128 {
3129         struct stmmac_priv *priv = netdev_priv(dev);
3130
3131         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3132                 features &= ~NETIF_F_RXCSUM;
3133
3134         if (!priv->plat->tx_coe)
3135                 features &= ~NETIF_F_CSUM_MASK;
3136
3137         /* Some GMAC devices have a bugged Jumbo frame support that
3138          * needs to have the Tx COE disabled for oversized frames
3139          * (due to limited buffer sizes). In this case we disable
3140          * the TX csum insertion in the TDES and not use SF.
3141          */
3142         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3143                 features &= ~NETIF_F_CSUM_MASK;
3144
3145         /* Disable tso if asked by ethtool */
3146         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3147                 if (features & NETIF_F_TSO)
3148                         priv->tso = true;
3149                 else
3150                         priv->tso = false;
3151         }
3152
3153         return features;
3154 }
3155
3156 static int stmmac_set_features(struct net_device *netdev,
3157                                netdev_features_t features)
3158 {
3159         struct stmmac_priv *priv = netdev_priv(netdev);
3160
3161         /* Keep the COE Type in case of csum is supporting */
3162         if (features & NETIF_F_RXCSUM)
3163                 priv->hw->rx_csum = priv->plat->rx_coe;
3164         else
3165                 priv->hw->rx_csum = 0;
3166         /* No check needed because rx_coe has been set before and it will be
3167          * fixed in case of issue.
3168          */
3169         priv->hw->mac->rx_ipc(priv->hw);
3170
3171         return 0;
3172 }
3173
3174 /**
3175  *  stmmac_interrupt - main ISR
3176  *  @irq: interrupt number.
3177  *  @dev_id: to pass the net device pointer.
3178  *  Description: this is the main driver interrupt service routine.
3179  *  It can call:
3180  *  o DMA service routine (to manage incoming frame reception and transmission
3181  *    status)
3182  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3183  *    interrupts.
3184  */
3185 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3186 {
3187         struct net_device *dev = (struct net_device *)dev_id;
3188         struct stmmac_priv *priv = netdev_priv(dev);
3189         u32 rx_cnt = priv->plat->rx_queues_to_use;
3190         u32 tx_cnt = priv->plat->tx_queues_to_use;
3191         u32 queues_count;
3192         u32 queue;
3193
3194         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3195
3196         if (priv->irq_wake)
3197                 pm_wakeup_event(priv->device, 0);
3198
3199         if (unlikely(!dev)) {
3200                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3201                 return IRQ_NONE;
3202         }
3203
3204         /* To handle GMAC own interrupts */
3205         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3206                 int status = priv->hw->mac->host_irq_status(priv->hw,
3207                                                             &priv->xstats);
3208
3209                 if (unlikely(status)) {
3210                         /* For LPI we need to save the tx status */
3211                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3212                                 priv->tx_path_in_lpi_mode = true;
3213                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3214                                 priv->tx_path_in_lpi_mode = false;
3215                 }
3216
3217                 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3218                         for (queue = 0; queue < queues_count; queue++) {
3219                                 status |=
3220                                 priv->hw->mac->host_mtl_irq_status(priv->hw,
3221                                                                    queue);
3222
3223                                 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3224                                     priv->hw->dma->set_rx_tail_ptr)
3225                                         priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3226                                                                 priv->rx_tail_addr,
3227                                                                 queue);
3228                         }
3229                 }
3230
3231                 /* PCS link status */
3232                 if (priv->hw->pcs) {
3233                         if (priv->xstats.pcs_link)
3234                                 netif_carrier_on(dev);
3235                         else
3236                                 netif_carrier_off(dev);
3237                 }
3238         }
3239
3240         /* To handle DMA interrupts */
3241         stmmac_dma_interrupt(priv);
3242
3243         return IRQ_HANDLED;
3244 }
3245
3246 #ifdef CONFIG_NET_POLL_CONTROLLER
3247 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3248  * to allow network I/O with interrupts disabled.
3249  */
3250 static void stmmac_poll_controller(struct net_device *dev)
3251 {
3252         disable_irq(dev->irq);
3253         stmmac_interrupt(dev->irq, dev);
3254         enable_irq(dev->irq);
3255 }
3256 #endif
3257
3258 /**
3259  *  stmmac_ioctl - Entry point for the Ioctl
3260  *  @dev: Device pointer.
3261  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3262  *  a proprietary structure used to pass information to the driver.
3263  *  @cmd: IOCTL command
3264  *  Description:
3265  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3266  */
3267 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3268 {
3269         int ret = -EOPNOTSUPP;
3270
3271         if (!netif_running(dev))
3272                 return -EINVAL;
3273
3274         switch (cmd) {
3275         case SIOCGMIIPHY:
3276         case SIOCGMIIREG:
3277         case SIOCSMIIREG:
3278                 if (!dev->phydev)
3279                         return -EINVAL;
3280                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3281                 break;
3282         case SIOCSHWTSTAMP:
3283                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3284                 break;
3285         default:
3286                 break;
3287         }
3288
3289         return ret;
3290 }
3291
3292 #ifdef CONFIG_DEBUG_FS
3293 static struct dentry *stmmac_fs_dir;
3294
3295 static void sysfs_display_ring(void *head, int size, int extend_desc,
3296                                struct seq_file *seq)
3297 {
3298         int i;
3299         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3300         struct dma_desc *p = (struct dma_desc *)head;
3301
3302         for (i = 0; i < size; i++) {
3303                 if (extend_desc) {
3304                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3305                                    i, (unsigned int)virt_to_phys(ep),
3306                                    le32_to_cpu(ep->basic.des0),
3307                                    le32_to_cpu(ep->basic.des1),
3308                                    le32_to_cpu(ep->basic.des2),
3309                                    le32_to_cpu(ep->basic.des3));
3310                         ep++;
3311                 } else {
3312                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3313                                    i, (unsigned int)virt_to_phys(ep),
3314                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3315                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3316                         p++;
3317                 }
3318                 seq_printf(seq, "\n");
3319         }
3320 }
3321
3322 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3323 {
3324         struct net_device *dev = seq->private;
3325         struct stmmac_priv *priv = netdev_priv(dev);
3326
3327         if (priv->extend_desc) {
3328                 seq_printf(seq, "Extended RX descriptor ring:\n");
3329                 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
3330                 seq_printf(seq, "Extended TX descriptor ring:\n");
3331                 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
3332         } else {
3333                 seq_printf(seq, "RX descriptor ring:\n");
3334                 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
3335                 seq_printf(seq, "TX descriptor ring:\n");
3336                 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
3337         }
3338
3339         return 0;
3340 }
3341
3342 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3343 {
3344         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3345 }
3346
3347 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3348
3349 static const struct file_operations stmmac_rings_status_fops = {
3350         .owner = THIS_MODULE,
3351         .open = stmmac_sysfs_ring_open,
3352         .read = seq_read,
3353         .llseek = seq_lseek,
3354         .release = single_release,
3355 };
3356
3357 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3358 {
3359         struct net_device *dev = seq->private;
3360         struct stmmac_priv *priv = netdev_priv(dev);
3361
3362         if (!priv->hw_cap_support) {
3363                 seq_printf(seq, "DMA HW features not supported\n");
3364                 return 0;
3365         }
3366
3367         seq_printf(seq, "==============================\n");
3368         seq_printf(seq, "\tDMA HW features\n");
3369         seq_printf(seq, "==============================\n");
3370
3371         seq_printf(seq, "\t10/100 Mbps: %s\n",
3372                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3373         seq_printf(seq, "\t1000 Mbps: %s\n",
3374                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3375         seq_printf(seq, "\tHalf duplex: %s\n",
3376                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3377         seq_printf(seq, "\tHash Filter: %s\n",
3378                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3379         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3380                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3381         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3382                    (priv->dma_cap.pcs) ? "Y" : "N");
3383         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3384                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3385         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3386                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3387         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3388                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3389         seq_printf(seq, "\tRMON module: %s\n",
3390                    (priv->dma_cap.rmon) ? "Y" : "N");
3391         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3392                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3393         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3394                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3395         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3396                    (priv->dma_cap.eee) ? "Y" : "N");
3397         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3398         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3399                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3400         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3401                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3402                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3403         } else {
3404                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3405                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3406                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3407                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3408         }
3409         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3410                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3411         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3412                    priv->dma_cap.number_rx_channel);
3413         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3414                    priv->dma_cap.number_tx_channel);
3415         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3416                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3417
3418         return 0;
3419 }
3420
3421 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3422 {
3423         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3424 }
3425
3426 static const struct file_operations stmmac_dma_cap_fops = {
3427         .owner = THIS_MODULE,
3428         .open = stmmac_sysfs_dma_cap_open,
3429         .read = seq_read,
3430         .llseek = seq_lseek,
3431         .release = single_release,
3432 };
3433
3434 static int stmmac_init_fs(struct net_device *dev)
3435 {
3436         struct stmmac_priv *priv = netdev_priv(dev);
3437
3438         /* Create per netdev entries */
3439         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3440
3441         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3442                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3443
3444                 return -ENOMEM;
3445         }
3446
3447         /* Entry to report DMA RX/TX rings */
3448         priv->dbgfs_rings_status =
3449                 debugfs_create_file("descriptors_status", S_IRUGO,
3450                                     priv->dbgfs_dir, dev,
3451                                     &stmmac_rings_status_fops);
3452
3453         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3454                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3455                 debugfs_remove_recursive(priv->dbgfs_dir);
3456
3457                 return -ENOMEM;
3458         }
3459
3460         /* Entry to report the DMA HW features */
3461         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3462                                             priv->dbgfs_dir,
3463                                             dev, &stmmac_dma_cap_fops);
3464
3465         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3466                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3467                 debugfs_remove_recursive(priv->dbgfs_dir);
3468
3469                 return -ENOMEM;
3470         }
3471
3472         return 0;
3473 }
3474
3475 static void stmmac_exit_fs(struct net_device *dev)
3476 {
3477         struct stmmac_priv *priv = netdev_priv(dev);
3478
3479         debugfs_remove_recursive(priv->dbgfs_dir);
3480 }
3481 #endif /* CONFIG_DEBUG_FS */
3482
3483 static const struct net_device_ops stmmac_netdev_ops = {
3484         .ndo_open = stmmac_open,
3485         .ndo_start_xmit = stmmac_xmit,
3486         .ndo_stop = stmmac_release,
3487         .ndo_change_mtu = stmmac_change_mtu,
3488         .ndo_fix_features = stmmac_fix_features,
3489         .ndo_set_features = stmmac_set_features,
3490         .ndo_set_rx_mode = stmmac_set_rx_mode,
3491         .ndo_tx_timeout = stmmac_tx_timeout,
3492         .ndo_do_ioctl = stmmac_ioctl,
3493 #ifdef CONFIG_NET_POLL_CONTROLLER
3494         .ndo_poll_controller = stmmac_poll_controller,
3495 #endif
3496         .ndo_set_mac_address = eth_mac_addr,
3497 };
3498
3499 /**
3500  *  stmmac_hw_init - Init the MAC device
3501  *  @priv: driver private structure
3502  *  Description: this function is to configure the MAC device according to
3503  *  some platform parameters or the HW capability register. It prepares the
3504  *  driver to use either ring or chain modes and to setup either enhanced or
3505  *  normal descriptors.
3506  */
3507 static int stmmac_hw_init(struct stmmac_priv *priv)
3508 {
3509         struct mac_device_info *mac;
3510
3511         /* Identify the MAC HW device */
3512         if (priv->plat->has_gmac) {
3513                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3514                 mac = dwmac1000_setup(priv->ioaddr,
3515                                       priv->plat->multicast_filter_bins,
3516                                       priv->plat->unicast_filter_entries,
3517                                       &priv->synopsys_id);
3518         } else if (priv->plat->has_gmac4) {
3519                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3520                 mac = dwmac4_setup(priv->ioaddr,
3521                                    priv->plat->multicast_filter_bins,
3522                                    priv->plat->unicast_filter_entries,
3523                                    &priv->synopsys_id);
3524         } else {
3525                 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3526         }
3527         if (!mac)
3528                 return -ENOMEM;
3529
3530         priv->hw = mac;
3531
3532         /* To use the chained or ring mode */
3533         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3534                 priv->hw->mode = &dwmac4_ring_mode_ops;
3535         } else {
3536                 if (chain_mode) {
3537                         priv->hw->mode = &chain_mode_ops;
3538                         dev_info(priv->device, "Chain mode enabled\n");
3539                         priv->mode = STMMAC_CHAIN_MODE;
3540                 } else {
3541                         priv->hw->mode = &ring_mode_ops;
3542                         dev_info(priv->device, "Ring mode enabled\n");
3543                         priv->mode = STMMAC_RING_MODE;
3544                 }
3545         }
3546
3547         /* Get the HW capability (new GMAC newer than 3.50a) */
3548         priv->hw_cap_support = stmmac_get_hw_features(priv);
3549         if (priv->hw_cap_support) {
3550                 dev_info(priv->device, "DMA HW capability register supported\n");
3551
3552                 /* We can override some gmac/dma configuration fields: e.g.
3553                  * enh_desc, tx_coe (e.g. that are passed through the
3554                  * platform) with the values from the HW capability
3555                  * register (if supported).
3556                  */
3557                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3558                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3559                 priv->hw->pmt = priv->plat->pmt;
3560
3561                 /* TXCOE doesn't work in thresh DMA mode */
3562                 if (priv->plat->force_thresh_dma_mode)
3563                         priv->plat->tx_coe = 0;
3564                 else
3565                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
3566
3567                 /* In case of GMAC4 rx_coe is from HW cap register. */
3568                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
3569
3570                 if (priv->dma_cap.rx_coe_type2)
3571                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3572                 else if (priv->dma_cap.rx_coe_type1)
3573                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3574
3575         } else {
3576                 dev_info(priv->device, "No HW DMA feature register supported\n");
3577         }
3578
3579         /* To use alternate (extended), normal or GMAC4 descriptor structures */
3580         if (priv->synopsys_id >= DWMAC_CORE_4_00)
3581                 priv->hw->desc = &dwmac4_desc_ops;
3582         else
3583                 stmmac_selec_desc_mode(priv);
3584
3585         if (priv->plat->rx_coe) {
3586                 priv->hw->rx_csum = priv->plat->rx_coe;
3587                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
3588                 if (priv->synopsys_id < DWMAC_CORE_4_00)
3589                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
3590         }
3591         if (priv->plat->tx_coe)
3592                 dev_info(priv->device, "TX Checksum insertion supported\n");
3593
3594         if (priv->plat->pmt) {
3595                 dev_info(priv->device, "Wake-Up On Lan supported\n");
3596                 device_set_wakeup_capable(priv->device, 1);
3597         }
3598
3599         if (priv->dma_cap.tsoen)
3600                 dev_info(priv->device, "TSO supported\n");
3601
3602         return 0;
3603 }
3604
3605 /**
3606  * stmmac_dvr_probe
3607  * @device: device pointer
3608  * @plat_dat: platform data pointer
3609  * @res: stmmac resource pointer
3610  * Description: this is the main probe function used to
3611  * call the alloc_etherdev, allocate the priv structure.
3612  * Return:
3613  * returns 0 on success, otherwise errno.
3614  */
3615 int stmmac_dvr_probe(struct device *device,
3616                      struct plat_stmmacenet_data *plat_dat,
3617                      struct stmmac_resources *res)
3618 {
3619         int ret = 0;
3620         struct net_device *ndev = NULL;
3621         struct stmmac_priv *priv;
3622
3623         ndev = alloc_etherdev(sizeof(struct stmmac_priv));
3624         if (!ndev)
3625                 return -ENOMEM;
3626
3627         SET_NETDEV_DEV(ndev, device);
3628
3629         priv = netdev_priv(ndev);
3630         priv->device = device;
3631         priv->dev = ndev;
3632
3633         stmmac_set_ethtool_ops(ndev);
3634         priv->pause = pause;
3635         priv->plat = plat_dat;
3636         priv->ioaddr = res->addr;
3637         priv->dev->base_addr = (unsigned long)res->addr;
3638
3639         priv->dev->irq = res->irq;
3640         priv->wol_irq = res->wol_irq;
3641         priv->lpi_irq = res->lpi_irq;
3642
3643         if (res->mac)
3644                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
3645
3646         dev_set_drvdata(device, priv->dev);
3647
3648         /* Verify driver arguments */
3649         stmmac_verify_args();
3650
3651         /* Override with kernel parameters if supplied XXX CRS XXX
3652          * this needs to have multiple instances
3653          */
3654         if ((phyaddr >= 0) && (phyaddr <= 31))
3655                 priv->plat->phy_addr = phyaddr;
3656
3657         if (priv->plat->stmmac_rst)
3658                 reset_control_deassert(priv->plat->stmmac_rst);
3659
3660         /* Init MAC and get the capabilities */
3661         ret = stmmac_hw_init(priv);
3662         if (ret)
3663                 goto error_hw_init;
3664
3665         ndev->netdev_ops = &stmmac_netdev_ops;
3666
3667         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3668                             NETIF_F_RXCSUM;
3669
3670         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3671                 ndev->hw_features |= NETIF_F_TSO;
3672                 priv->tso = true;
3673                 dev_info(priv->device, "TSO feature enabled\n");
3674         }
3675         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
3676         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3677 #ifdef STMMAC_VLAN_TAG_USED
3678         /* Both mac100 and gmac support receive VLAN tag detection */
3679         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3680 #endif
3681         priv->msg_enable = netif_msg_init(debug, default_msg_level);
3682
3683         /* MTU range: 46 - hw-specific max */
3684         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
3685         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
3686                 ndev->max_mtu = JUMBO_LEN;
3687         else
3688                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
3689         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
3690          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
3691          */
3692         if ((priv->plat->maxmtu < ndev->max_mtu) &&
3693             (priv->plat->maxmtu >= ndev->min_mtu))
3694                 ndev->max_mtu = priv->plat->maxmtu;
3695         else if (priv->plat->maxmtu < ndev->min_mtu)
3696                 dev_warn(priv->device,
3697                          "%s: warning: maxmtu having invalid value (%d)\n",
3698                          __func__, priv->plat->maxmtu);
3699
3700         if (flow_ctrl)
3701                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
3702
3703         /* Rx Watchdog is available in the COREs newer than the 3.40.
3704          * In some case, for example on bugged HW this feature
3705          * has to be disable and this can be done by passing the
3706          * riwt_off field from the platform.
3707          */
3708         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3709                 priv->use_riwt = 1;
3710                 dev_info(priv->device,
3711                          "Enable RX Mitigation via HW Watchdog Timer\n");
3712         }
3713
3714         netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3715
3716         spin_lock_init(&priv->lock);
3717
3718         /* If a specific clk_csr value is passed from the platform
3719          * this means that the CSR Clock Range selection cannot be
3720          * changed at run-time and it is fixed. Viceversa the driver'll try to
3721          * set the MDC clock dynamically according to the csr actual
3722          * clock input.
3723          */
3724         if (!priv->plat->clk_csr)
3725                 stmmac_clk_csr_set(priv);
3726         else
3727                 priv->clk_csr = priv->plat->clk_csr;
3728
3729         stmmac_check_pcs_mode(priv);
3730
3731         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
3732             priv->hw->pcs != STMMAC_PCS_TBI &&
3733             priv->hw->pcs != STMMAC_PCS_RTBI) {
3734                 /* MDIO bus Registration */
3735                 ret = stmmac_mdio_register(ndev);
3736                 if (ret < 0) {
3737                         dev_err(priv->device,
3738                                 "%s: MDIO bus (id: %d) registration failed",
3739                                 __func__, priv->plat->bus_id);
3740                         goto error_mdio_register;
3741                 }
3742         }
3743
3744         ret = register_netdev(ndev);
3745         if (ret) {
3746                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
3747                         __func__, ret);
3748                 goto error_netdev_register;
3749         }
3750
3751         return ret;
3752
3753 error_netdev_register:
3754         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3755             priv->hw->pcs != STMMAC_PCS_TBI &&
3756             priv->hw->pcs != STMMAC_PCS_RTBI)
3757                 stmmac_mdio_unregister(ndev);
3758 error_mdio_register:
3759         netif_napi_del(&priv->napi);
3760 error_hw_init:
3761         free_netdev(ndev);
3762
3763         return ret;
3764 }
3765 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
3766
3767 /**
3768  * stmmac_dvr_remove
3769  * @dev: device pointer
3770  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
3771  * changes the link status, releases the DMA descriptor rings.
3772  */
3773 int stmmac_dvr_remove(struct device *dev)
3774 {
3775         struct net_device *ndev = dev_get_drvdata(dev);
3776         struct stmmac_priv *priv = netdev_priv(ndev);
3777
3778         netdev_info(priv->dev, "%s: removing driver", __func__);
3779
3780         stmmac_stop_all_dma(priv);
3781
3782         priv->hw->mac->set_mac(priv->ioaddr, false);
3783         netif_carrier_off(ndev);
3784         unregister_netdev(ndev);
3785         if (priv->plat->stmmac_rst)
3786                 reset_control_assert(priv->plat->stmmac_rst);
3787         clk_disable_unprepare(priv->plat->pclk);
3788         clk_disable_unprepare(priv->plat->stmmac_clk);
3789         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3790             priv->hw->pcs != STMMAC_PCS_TBI &&
3791             priv->hw->pcs != STMMAC_PCS_RTBI)
3792                 stmmac_mdio_unregister(ndev);
3793         free_netdev(ndev);
3794
3795         return 0;
3796 }
3797 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
3798
3799 /**
3800  * stmmac_suspend - suspend callback
3801  * @dev: device pointer
3802  * Description: this is the function to suspend the device and it is called
3803  * by the platform driver to stop the network queue, release the resources,
3804  * program the PMT register (for WoL), clean and release driver resources.
3805  */
3806 int stmmac_suspend(struct device *dev)
3807 {
3808         struct net_device *ndev = dev_get_drvdata(dev);
3809         struct stmmac_priv *priv = netdev_priv(ndev);
3810         unsigned long flags;
3811
3812         if (!ndev || !netif_running(ndev))
3813                 return 0;
3814
3815         if (ndev->phydev)
3816                 phy_stop(ndev->phydev);
3817
3818         spin_lock_irqsave(&priv->lock, flags);
3819
3820         netif_device_detach(ndev);
3821         netif_stop_queue(ndev);
3822
3823         napi_disable(&priv->napi);
3824
3825         /* Stop TX/RX DMA */
3826         stmmac_stop_all_dma(priv);
3827
3828         /* Enable Power down mode by programming the PMT regs */
3829         if (device_may_wakeup(priv->device)) {
3830                 priv->hw->mac->pmt(priv->hw, priv->wolopts);
3831                 priv->irq_wake = 1;
3832         } else {
3833                 priv->hw->mac->set_mac(priv->ioaddr, false);
3834                 pinctrl_pm_select_sleep_state(priv->device);
3835                 /* Disable clock in case of PWM is off */
3836                 clk_disable(priv->plat->pclk);
3837                 clk_disable(priv->plat->stmmac_clk);
3838         }
3839         spin_unlock_irqrestore(&priv->lock, flags);
3840
3841         priv->oldlink = 0;
3842         priv->speed = SPEED_UNKNOWN;
3843         priv->oldduplex = DUPLEX_UNKNOWN;
3844         return 0;
3845 }
3846 EXPORT_SYMBOL_GPL(stmmac_suspend);
3847
3848 /**
3849  * stmmac_resume - resume callback
3850  * @dev: device pointer
3851  * Description: when resume this function is invoked to setup the DMA and CORE
3852  * in a usable state.
3853  */
3854 int stmmac_resume(struct device *dev)
3855 {
3856         struct net_device *ndev = dev_get_drvdata(dev);
3857         struct stmmac_priv *priv = netdev_priv(ndev);
3858         unsigned long flags;
3859
3860         if (!netif_running(ndev))
3861                 return 0;
3862
3863         /* Power Down bit, into the PM register, is cleared
3864          * automatically as soon as a magic packet or a Wake-up frame
3865          * is received. Anyway, it's better to manually clear
3866          * this bit because it can generate problems while resuming
3867          * from another devices (e.g. serial console).
3868          */
3869         if (device_may_wakeup(priv->device)) {
3870                 spin_lock_irqsave(&priv->lock, flags);
3871                 priv->hw->mac->pmt(priv->hw, 0);
3872                 spin_unlock_irqrestore(&priv->lock, flags);
3873                 priv->irq_wake = 0;
3874         } else {
3875                 pinctrl_pm_select_default_state(priv->device);
3876                 /* enable the clk previously disabled */
3877                 clk_enable(priv->plat->stmmac_clk);
3878                 clk_enable(priv->plat->pclk);
3879                 /* reset the phy so that it's ready */
3880                 if (priv->mii)
3881                         stmmac_mdio_reset(priv->mii);
3882         }
3883
3884         netif_device_attach(ndev);
3885
3886         spin_lock_irqsave(&priv->lock, flags);
3887
3888         priv->cur_rx = 0;
3889         priv->dirty_rx = 0;
3890         priv->dirty_tx = 0;
3891         priv->cur_tx = 0;
3892         /* reset private mss value to force mss context settings at
3893          * next tso xmit (only used for gmac4).
3894          */
3895         priv->mss = 0;
3896
3897         stmmac_clear_descriptors(priv);
3898
3899         stmmac_hw_setup(ndev, false);
3900         stmmac_init_tx_coalesce(priv);
3901         stmmac_set_rx_mode(ndev);
3902
3903         napi_enable(&priv->napi);
3904
3905         netif_start_queue(ndev);
3906
3907         spin_unlock_irqrestore(&priv->lock, flags);
3908
3909         if (ndev->phydev)
3910                 phy_start(ndev->phydev);
3911
3912         return 0;
3913 }
3914 EXPORT_SYMBOL_GPL(stmmac_resume);
3915
3916 #ifndef MODULE
3917 static int __init stmmac_cmdline_opt(char *str)
3918 {
3919         char *opt;
3920
3921         if (!str || !*str)
3922                 return -EINVAL;
3923         while ((opt = strsep(&str, ",")) != NULL) {
3924                 if (!strncmp(opt, "debug:", 6)) {
3925                         if (kstrtoint(opt + 6, 0, &debug))
3926                                 goto err;
3927                 } else if (!strncmp(opt, "phyaddr:", 8)) {
3928                         if (kstrtoint(opt + 8, 0, &phyaddr))
3929                                 goto err;
3930                 } else if (!strncmp(opt, "buf_sz:", 7)) {
3931                         if (kstrtoint(opt + 7, 0, &buf_sz))
3932                                 goto err;
3933                 } else if (!strncmp(opt, "tc:", 3)) {
3934                         if (kstrtoint(opt + 3, 0, &tc))
3935                                 goto err;
3936                 } else if (!strncmp(opt, "watchdog:", 9)) {
3937                         if (kstrtoint(opt + 9, 0, &watchdog))
3938                                 goto err;
3939                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
3940                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
3941                                 goto err;
3942                 } else if (!strncmp(opt, "pause:", 6)) {
3943                         if (kstrtoint(opt + 6, 0, &pause))
3944                                 goto err;
3945                 } else if (!strncmp(opt, "eee_timer:", 10)) {
3946                         if (kstrtoint(opt + 10, 0, &eee_timer))
3947                                 goto err;
3948                 } else if (!strncmp(opt, "chain_mode:", 11)) {
3949                         if (kstrtoint(opt + 11, 0, &chain_mode))
3950                                 goto err;
3951                 }
3952         }
3953         return 0;
3954
3955 err:
3956         pr_err("%s: ERROR broken module parameter conversion", __func__);
3957         return -EINVAL;
3958 }
3959
3960 __setup("stmmaceth=", stmmac_cmdline_opt);
3961 #endif /* MODULE */
3962
3963 static int __init stmmac_init(void)
3964 {
3965 #ifdef CONFIG_DEBUG_FS
3966         /* Create debugfs main directory if it doesn't exist yet */
3967         if (!stmmac_fs_dir) {
3968                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3969
3970                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3971                         pr_err("ERROR %s, debugfs create directory failed\n",
3972                                STMMAC_RESOURCE_NAME);
3973
3974                         return -ENOMEM;
3975                 }
3976         }
3977 #endif
3978
3979         return 0;
3980 }
3981
3982 static void __exit stmmac_exit(void)
3983 {
3984 #ifdef CONFIG_DEBUG_FS
3985         debugfs_remove_recursive(stmmac_fs_dir);
3986 #endif
3987 }
3988
3989 module_init(stmmac_init)
3990 module_exit(stmmac_exit)
3991
3992 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3993 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3994 MODULE_LICENSE("GPL");