2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/phy.h>
118 #include <linux/clk.h>
119 #include <linux/bitrev.h>
120 #include <linux/crc32.h>
123 #include "xgbe-common.h"
125 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
131 DBGPR("-->xgbe_usec_to_riwt\n");
133 rate = clk_get_rate(pdata->sysclk);
136 * Convert the input usec value to the watchdog timer value. Each
137 * watchdog timer value is equivalent to 256 clock cycles.
138 * Calculate the required value as:
139 * ( usec * ( system_clock_mhz / 10^6 ) / 256
141 ret = (usec * (rate / 1000000)) / 256;
143 DBGPR("<--xgbe_usec_to_riwt\n");
148 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
154 DBGPR("-->xgbe_riwt_to_usec\n");
156 rate = clk_get_rate(pdata->sysclk);
159 * Convert the input watchdog timer value to the usec value. Each
160 * watchdog timer value is equivalent to 256 clock cycles.
161 * Calculate the required value as:
162 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
164 ret = (riwt * 256) / (rate / 1000000);
166 DBGPR("<--xgbe_riwt_to_usec\n");
171 static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
173 struct xgbe_channel *channel;
176 channel = pdata->channel;
177 for (i = 0; i < pdata->channel_count; i++, channel++)
178 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
184 static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
186 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
189 static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
191 struct xgbe_channel *channel;
194 channel = pdata->channel;
195 for (i = 0; i < pdata->channel_count; i++, channel++) {
196 if (!channel->tx_ring)
199 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
206 static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
208 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
211 static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
213 struct xgbe_channel *channel;
216 channel = pdata->channel;
217 for (i = 0; i < pdata->channel_count; i++, channel++) {
218 if (!channel->rx_ring)
221 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
228 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
230 struct xgbe_channel *channel;
233 channel = pdata->channel;
234 for (i = 0; i < pdata->channel_count; i++, channel++) {
235 if (!channel->tx_ring)
238 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
245 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
249 for (i = 0; i < pdata->rx_q_count; i++)
250 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
255 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
259 for (i = 0; i < pdata->tx_q_count; i++)
260 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
265 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
270 for (i = 0; i < pdata->rx_q_count; i++)
271 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
276 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
281 for (i = 0; i < pdata->tx_q_count; i++)
282 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
287 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
289 struct xgbe_channel *channel;
292 channel = pdata->channel;
293 for (i = 0; i < pdata->channel_count; i++, channel++) {
294 if (!channel->rx_ring)
297 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
304 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
309 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
311 struct xgbe_channel *channel;
314 channel = pdata->channel;
315 for (i = 0; i < pdata->channel_count; i++, channel++) {
316 if (!channel->rx_ring)
319 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
324 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
326 struct xgbe_channel *channel;
329 channel = pdata->channel;
330 for (i = 0; i < pdata->channel_count; i++, channel++) {
331 if (!channel->tx_ring)
334 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
338 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
340 struct xgbe_channel *channel;
343 channel = pdata->channel;
344 for (i = 0; i < pdata->channel_count; i++, channel++) {
345 if (!channel->rx_ring)
348 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
351 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
354 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
355 unsigned int index, unsigned int val)
360 mutex_lock(&pdata->rss_mutex);
362 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
367 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
369 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
370 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
371 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
372 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
376 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
379 usleep_range(1000, 1500);
385 mutex_unlock(&pdata->rss_mutex);
390 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
392 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
393 unsigned int *key = (unsigned int *)&pdata->rss_key;
397 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
406 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
411 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
412 ret = xgbe_write_rss_reg(pdata,
413 XGBE_RSS_LOOKUP_TABLE_TYPE, i,
414 pdata->rss_table[i]);
422 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
424 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
426 return xgbe_write_rss_hash_key(pdata);
429 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
434 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
435 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
437 return xgbe_write_rss_lookup_table(pdata);
440 static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
444 if (!pdata->hw_feat.rss)
447 /* Program the hash key */
448 ret = xgbe_write_rss_hash_key(pdata);
452 /* Program the lookup table */
453 ret = xgbe_write_rss_lookup_table(pdata);
457 /* Set the RSS options */
458 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
461 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
466 static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
468 if (!pdata->hw_feat.rss)
471 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
476 static void xgbe_config_rss(struct xgbe_prv_data *pdata)
480 if (!pdata->hw_feat.rss)
483 if (pdata->netdev->features & NETIF_F_RXHASH)
484 ret = xgbe_enable_rss(pdata);
486 ret = xgbe_disable_rss(pdata);
489 netdev_err(pdata->netdev,
490 "error configuring RSS, RSS disabled\n");
493 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
495 unsigned int max_q_count, q_count;
496 unsigned int reg, reg_val;
499 /* Clear MTL flow control */
500 for (i = 0; i < pdata->rx_q_count; i++)
501 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
503 /* Clear MAC flow control */
504 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
505 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
507 for (i = 0; i < q_count; i++) {
508 reg_val = XGMAC_IOREAD(pdata, reg);
509 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
510 XGMAC_IOWRITE(pdata, reg, reg_val);
512 reg += MAC_QTFCR_INC;
518 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
520 unsigned int max_q_count, q_count;
521 unsigned int reg, reg_val;
524 /* Set MTL flow control */
525 for (i = 0; i < pdata->rx_q_count; i++)
526 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
528 /* Set MAC flow control */
529 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
530 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
532 for (i = 0; i < q_count; i++) {
533 reg_val = XGMAC_IOREAD(pdata, reg);
535 /* Enable transmit flow control */
536 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
538 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
540 XGMAC_IOWRITE(pdata, reg, reg_val);
542 reg += MAC_QTFCR_INC;
548 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
550 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
555 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
557 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
562 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
564 struct ieee_pfc *pfc = pdata->pfc;
566 if (pdata->tx_pause || (pfc && pfc->pfc_en))
567 xgbe_enable_tx_flow_control(pdata);
569 xgbe_disable_tx_flow_control(pdata);
574 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
576 struct ieee_pfc *pfc = pdata->pfc;
578 if (pdata->rx_pause || (pfc && pfc->pfc_en))
579 xgbe_enable_rx_flow_control(pdata);
581 xgbe_disable_rx_flow_control(pdata);
586 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
588 struct ieee_pfc *pfc = pdata->pfc;
590 xgbe_config_tx_flow_control(pdata);
591 xgbe_config_rx_flow_control(pdata);
593 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
594 (pfc && pfc->pfc_en) ? 1 : 0);
597 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
599 struct xgbe_channel *channel;
600 unsigned int dma_ch_isr, dma_ch_ier;
603 channel = pdata->channel;
604 for (i = 0; i < pdata->channel_count; i++, channel++) {
605 /* Clear all the interrupts which are set */
606 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
607 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
609 /* Clear all interrupt enable bits */
612 /* Enable following interrupts
613 * NIE - Normal Interrupt Summary Enable
614 * AIE - Abnormal Interrupt Summary Enable
615 * FBEE - Fatal Bus Error Enable
617 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
618 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
619 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
621 if (channel->tx_ring) {
622 /* Enable the following Tx interrupts
623 * TIE - Transmit Interrupt Enable (unless using
624 * per channel interrupts)
626 if (!pdata->per_channel_irq)
627 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
629 if (channel->rx_ring) {
630 /* Enable following Rx interrupts
631 * RBUE - Receive Buffer Unavailable Enable
632 * RIE - Receive Interrupt Enable (unless using
633 * per channel interrupts)
635 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
636 if (!pdata->per_channel_irq)
637 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
640 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
644 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
646 unsigned int mtl_q_isr;
647 unsigned int q_count, i;
649 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
650 for (i = 0; i < q_count; i++) {
651 /* Clear all the interrupts which are set */
652 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
653 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
655 /* No MTL interrupts to be enabled */
656 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
660 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
662 unsigned int mac_ier = 0;
664 /* Enable Timestamp interrupt */
665 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
667 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
669 /* Enable all counter interrupts */
670 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
671 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
674 static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
676 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
681 static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
683 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
688 static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
690 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
695 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
698 unsigned int val = enable ? 1 : 0;
700 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
703 DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
704 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
709 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
712 unsigned int val = enable ? 1 : 0;
714 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
717 DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
718 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
723 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
724 struct netdev_hw_addr *ha, unsigned int *mac_reg)
726 unsigned int mac_addr_hi, mac_addr_lo;
733 mac_addr = (u8 *)&mac_addr_lo;
734 mac_addr[0] = ha->addr[0];
735 mac_addr[1] = ha->addr[1];
736 mac_addr[2] = ha->addr[2];
737 mac_addr[3] = ha->addr[3];
738 mac_addr = (u8 *)&mac_addr_hi;
739 mac_addr[0] = ha->addr[4];
740 mac_addr[1] = ha->addr[5];
742 DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
745 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
748 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
749 *mac_reg += MAC_MACA_INC;
750 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
751 *mac_reg += MAC_MACA_INC;
754 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
756 struct net_device *netdev = pdata->netdev;
757 struct netdev_hw_addr *ha;
758 unsigned int mac_reg;
759 unsigned int addn_macs;
761 mac_reg = MAC_MACA1HR;
762 addn_macs = pdata->hw_feat.addn_mac;
764 if (netdev_uc_count(netdev) > addn_macs) {
765 xgbe_set_promiscuous_mode(pdata, 1);
767 netdev_for_each_uc_addr(ha, netdev) {
768 xgbe_set_mac_reg(pdata, ha, &mac_reg);
772 if (netdev_mc_count(netdev) > addn_macs) {
773 xgbe_set_all_multicast_mode(pdata, 1);
775 netdev_for_each_mc_addr(ha, netdev) {
776 xgbe_set_mac_reg(pdata, ha, &mac_reg);
782 /* Clear remaining additional MAC address entries */
784 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
787 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
789 struct net_device *netdev = pdata->netdev;
790 struct netdev_hw_addr *ha;
791 unsigned int hash_reg;
792 unsigned int hash_table_shift, hash_table_count;
793 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
797 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
798 hash_table_count = pdata->hw_feat.hash_table_size / 32;
799 memset(hash_table, 0, sizeof(hash_table));
801 /* Build the MAC Hash Table register values */
802 netdev_for_each_uc_addr(ha, netdev) {
803 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
804 crc >>= hash_table_shift;
805 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
808 netdev_for_each_mc_addr(ha, netdev) {
809 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
810 crc >>= hash_table_shift;
811 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
814 /* Set the MAC Hash Table registers */
816 for (i = 0; i < hash_table_count; i++) {
817 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
818 hash_reg += MAC_HTR_INC;
822 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
824 if (pdata->hw_feat.hash_table_size)
825 xgbe_set_mac_hash_table(pdata);
827 xgbe_set_mac_addn_addrs(pdata);
832 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
834 unsigned int mac_addr_hi, mac_addr_lo;
836 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
837 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
838 (addr[1] << 8) | (addr[0] << 0);
840 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
841 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
846 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
849 unsigned int mmd_address;
852 if (mmd_reg & MII_ADDR_C45)
853 mmd_address = mmd_reg & ~MII_ADDR_C45;
855 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
857 /* The PCS registers are accessed using mmio. The underlying APB3
858 * management interface uses indirect addressing to access the MMD
859 * register sets. This requires accessing of the PCS register in two
860 * phases, an address phase and a data phase.
862 * The mmio interface is based on 32-bit offsets and values. All
863 * register offsets must therefore be adjusted by left shifting the
864 * offset 2 bits and reading 32 bits of data.
866 mutex_lock(&pdata->xpcs_mutex);
867 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
868 mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
869 mutex_unlock(&pdata->xpcs_mutex);
874 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
875 int mmd_reg, int mmd_data)
877 unsigned int mmd_address;
879 if (mmd_reg & MII_ADDR_C45)
880 mmd_address = mmd_reg & ~MII_ADDR_C45;
882 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
884 /* The PCS registers are accessed using mmio. The underlying APB3
885 * management interface uses indirect addressing to access the MMD
886 * register sets. This requires accessing of the PCS register in two
887 * phases, an address phase and a data phase.
889 * The mmio interface is based on 32-bit offsets and values. All
890 * register offsets must therefore be adjusted by left shifting the
891 * offset 2 bits and reading 32 bits of data.
893 mutex_lock(&pdata->xpcs_mutex);
894 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
895 XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
896 mutex_unlock(&pdata->xpcs_mutex);
899 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
901 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
904 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
906 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
911 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
913 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
918 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
920 /* Put the VLAN tag in the Rx descriptor */
921 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
923 /* Don't check the VLAN type */
924 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
926 /* Check only C-TAG (0x8100) packets */
927 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
929 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
930 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
932 /* Enable VLAN tag stripping */
933 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
938 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
940 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
945 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
947 /* Enable VLAN filtering */
948 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
950 /* Enable VLAN Hash Table filtering */
951 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
953 /* Disable VLAN tag inverse matching */
954 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
956 /* Only filter on the lower 12-bits of the VLAN tag */
957 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
959 /* In order for the VLAN Hash Table filtering to be effective,
960 * the VLAN tag identifier in the VLAN Tag Register must not
961 * be zero. Set the VLAN tag identifier to "1" to enable the
962 * VLAN Hash Table filtering. This implies that a VLAN tag of
963 * 1 will always pass filtering.
965 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
970 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
972 /* Disable VLAN filtering */
973 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
979 #define CRCPOLY_LE 0xedb88320
981 static u32 xgbe_vid_crc32_le(__le16 vid_le)
983 u32 poly = CRCPOLY_LE;
986 unsigned char *data = (unsigned char *)&vid_le;
987 unsigned char data_byte = 0;
990 bits = get_bitmask_order(VLAN_VID_MASK);
991 for (i = 0; i < bits; i++) {
993 data_byte = data[i / 8];
995 temp = ((crc & 1) ^ data_byte) & 1;
1006 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
1011 u16 vlan_hash_table = 0;
1013 /* Generate the VLAN Hash Table value */
1014 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
1015 /* Get the CRC32 value of the VLAN ID */
1016 vid_le = cpu_to_le16(vid);
1017 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
1019 vlan_hash_table |= (1 << crc);
1022 /* Set the VLAN Hash Table filtering register */
1023 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
1028 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1030 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1032 /* Reset the Tx descriptor
1033 * Set buffer 1 (lo) address to zero
1034 * Set buffer 1 (hi) address to zero
1035 * Reset all other control bits (IC, TTSE, B2L & B1L)
1036 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1043 /* Make sure ownership is written to the descriptor */
1047 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
1049 struct xgbe_ring *ring = channel->tx_ring;
1050 struct xgbe_ring_data *rdata;
1052 int start_index = ring->cur;
1054 DBGPR("-->tx_desc_init\n");
1056 /* Initialze all descriptors */
1057 for (i = 0; i < ring->rdesc_count; i++) {
1058 rdata = XGBE_GET_DESC_DATA(ring, i);
1060 /* Initialize Tx descriptor */
1061 xgbe_tx_desc_reset(rdata);
1064 /* Update the total number of Tx descriptors */
1065 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1067 /* Update the starting address of descriptor ring */
1068 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1069 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1070 upper_32_bits(rdata->rdesc_dma));
1071 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1072 lower_32_bits(rdata->rdesc_dma));
1074 DBGPR("<--tx_desc_init\n");
1077 static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
1079 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1081 /* Reset the Rx descriptor
1082 * Set buffer 1 (lo) address to header dma address (lo)
1083 * Set buffer 1 (hi) address to header dma address (hi)
1084 * Set buffer 2 (lo) address to buffer dma address (lo)
1085 * Set buffer 2 (hi) address to buffer dma address (hi) and
1086 * set control bits OWN and INTE
1088 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
1089 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
1090 rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
1091 rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
1093 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
1094 rdata->interrupt ? 1 : 0);
1096 /* Since the Rx DMA engine is likely running, make sure everything
1097 * is written to the descriptor(s) before setting the OWN bit
1098 * for the descriptor
1102 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
1104 /* Make sure ownership is written to the descriptor */
1108 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
1110 struct xgbe_prv_data *pdata = channel->pdata;
1111 struct xgbe_ring *ring = channel->rx_ring;
1112 struct xgbe_ring_data *rdata;
1113 unsigned int start_index = ring->cur;
1114 unsigned int rx_coalesce, rx_frames;
1117 DBGPR("-->rx_desc_init\n");
1119 rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
1120 rx_frames = pdata->rx_frames;
1122 /* Initialize all descriptors */
1123 for (i = 0; i < ring->rdesc_count; i++) {
1124 rdata = XGBE_GET_DESC_DATA(ring, i);
1126 /* Set interrupt on completion bit as appropriate */
1127 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
1128 rdata->interrupt = 0;
1130 rdata->interrupt = 1;
1132 /* Initialize Rx descriptor */
1133 xgbe_rx_desc_reset(rdata);
1136 /* Update the total number of Rx descriptors */
1137 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1139 /* Update the starting address of descriptor ring */
1140 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1141 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1142 upper_32_bits(rdata->rdesc_dma));
1143 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1144 lower_32_bits(rdata->rdesc_dma));
1146 /* Update the Rx Descriptor Tail Pointer */
1147 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
1148 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1149 lower_32_bits(rdata->rdesc_dma));
1151 DBGPR("<--rx_desc_init\n");
1154 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
1155 unsigned int addend)
1157 /* Set the addend register value and tell the device */
1158 XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1159 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1161 /* Wait for addend update to complete */
1162 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1166 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
1169 /* Set the time values and tell the device */
1170 XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1171 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1172 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1174 /* Wait for time update to complete */
1175 while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1179 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
1183 nsec = XGMAC_IOREAD(pdata, MAC_STSR);
1184 nsec *= NSEC_PER_SEC;
1185 nsec += XGMAC_IOREAD(pdata, MAC_STNR);
1190 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
1192 unsigned int tx_snr;
1195 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1196 if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
1199 nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
1200 nsec *= NSEC_PER_SEC;
1206 static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
1207 struct xgbe_ring_desc *rdesc)
1211 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
1212 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
1213 nsec = le32_to_cpu(rdesc->desc1);
1215 nsec |= le32_to_cpu(rdesc->desc0);
1216 if (nsec != 0xffffffffffffffffULL) {
1217 packet->rx_tstamp = nsec;
1218 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1224 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
1225 unsigned int mac_tscr)
1227 /* Set one nano-second accuracy */
1228 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1230 /* Set fine timestamp update */
1231 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1233 /* Overwrite earlier timestamps */
1234 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1236 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1238 /* Exit if timestamping is not enabled */
1239 if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
1242 /* Initialize time registers */
1243 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
1244 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
1245 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1246 xgbe_set_tstamp_time(pdata, 0, 0);
1248 /* Initialize the timecounter */
1249 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
1250 ktime_to_ns(ktime_get_real()));
1255 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
1257 struct ieee_ets *ets = pdata->ets;
1258 unsigned int total_weight, min_weight, weight;
1264 /* Set Tx to deficit weighted round robin scheduling algorithm (when
1265 * traffic class is using ETS algorithm)
1267 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
1269 /* Set Traffic Class algorithms */
1270 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
1271 min_weight = total_weight / 100;
1275 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1276 switch (ets->tc_tsa[i]) {
1277 case IEEE_8021QAZ_TSA_STRICT:
1278 DBGPR(" TC%u using SP\n", i);
1279 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1282 case IEEE_8021QAZ_TSA_ETS:
1283 weight = total_weight * ets->tc_tx_bw[i] / 100;
1284 weight = clamp(weight, min_weight, total_weight);
1286 DBGPR(" TC%u using DWRR (weight %u)\n", i, weight);
1287 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1289 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
1296 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
1298 struct ieee_pfc *pfc = pdata->pfc;
1299 struct ieee_ets *ets = pdata->ets;
1300 unsigned int mask, reg, reg_val;
1301 unsigned int tc, prio;
1306 for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) {
1308 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
1309 if ((pfc->pfc_en & (1 << prio)) &&
1310 (ets->prio_tc[prio] == tc))
1311 mask |= (1 << prio);
1315 DBGPR(" TC%u PFC mask=%#x\n", tc, mask);
1316 reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
1317 reg_val = XGMAC_IOREAD(pdata, reg);
1319 reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3));
1320 reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3));
1322 XGMAC_IOWRITE(pdata, reg, reg_val);
1325 xgbe_config_flow_control(pdata);
1328 static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
1329 struct xgbe_ring *ring)
1331 struct xgbe_prv_data *pdata = channel->pdata;
1332 struct xgbe_ring_data *rdata;
1334 /* Issue a poll command to Tx DMA by writing address
1335 * of next immediate free descriptor */
1336 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1337 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1338 lower_32_bits(rdata->rdesc_dma));
1340 /* Start the Tx coalescing timer */
1341 if (pdata->tx_usecs && !channel->tx_timer_active) {
1342 channel->tx_timer_active = 1;
1343 hrtimer_start(&channel->tx_timer,
1344 ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
1348 ring->tx.xmit_more = 0;
1351 static void xgbe_dev_xmit(struct xgbe_channel *channel)
1353 struct xgbe_prv_data *pdata = channel->pdata;
1354 struct xgbe_ring *ring = channel->tx_ring;
1355 struct xgbe_ring_data *rdata;
1356 struct xgbe_ring_desc *rdesc;
1357 struct xgbe_packet_data *packet = &ring->packet_data;
1358 unsigned int csum, tso, vlan;
1359 unsigned int tso_context, vlan_context;
1360 unsigned int tx_set_ic;
1361 int start_index = ring->cur;
1364 DBGPR("-->xgbe_dev_xmit\n");
1366 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1368 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1370 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1373 if (tso && (packet->mss != ring->tx.cur_mss))
1378 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1383 /* Determine if an interrupt should be generated for this Tx:
1385 * - Tx frame count exceeds the frame count setting
1386 * - Addition of Tx frame count to the frame count since the
1387 * last interrupt was set exceeds the frame count setting
1389 * - No frame count setting specified (ethtool -C ethX tx-frames 0)
1390 * - Addition of Tx frame count to the frame count since the
1391 * last interrupt was set does not exceed the frame count setting
1393 ring->coalesce_count += packet->tx_packets;
1394 if (!pdata->tx_frames)
1396 else if (packet->tx_packets > pdata->tx_frames)
1398 else if ((ring->coalesce_count % pdata->tx_frames) <
1404 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1405 rdesc = rdata->rdesc;
1407 /* Create a context descriptor if this is a TSO packet */
1408 if (tso_context || vlan_context) {
1410 DBGPR(" TSO context descriptor, mss=%u\n",
1413 /* Set the MSS size */
1414 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1417 /* Mark it as a CONTEXT descriptor */
1418 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1421 /* Indicate this descriptor contains the MSS */
1422 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1425 ring->tx.cur_mss = packet->mss;
1429 DBGPR(" VLAN context descriptor, ctag=%u\n",
1432 /* Mark it as a CONTEXT descriptor */
1433 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1436 /* Set the VLAN tag */
1437 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1438 VT, packet->vlan_ctag);
1440 /* Indicate this descriptor contains the VLAN tag */
1441 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1444 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1448 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1449 rdesc = rdata->rdesc;
1452 /* Update buffer address (for TSO this is the header) */
1453 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1454 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1456 /* Update the buffer length */
1457 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1458 rdata->skb_dma_len);
1460 /* VLAN tag insertion check */
1462 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1463 TX_NORMAL_DESC2_VLAN_INSERT);
1465 /* Timestamp enablement check */
1466 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1467 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
1469 /* Mark it as First Descriptor */
1470 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1472 /* Mark it as a NORMAL descriptor */
1473 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1475 /* Set OWN bit if not the first descriptor */
1476 if (ring->cur != start_index)
1477 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1481 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1482 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1483 packet->tcp_payload_len);
1484 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1485 packet->tcp_header_len / 4);
1487 /* Enable CRC and Pad Insertion */
1488 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1490 /* Enable HW CSUM */
1492 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1495 /* Set the total length to be transmitted */
1496 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1500 for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
1502 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1503 rdesc = rdata->rdesc;
1505 /* Update buffer address */
1506 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1507 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1509 /* Update the buffer length */
1510 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1511 rdata->skb_dma_len);
1514 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1516 /* Mark it as NORMAL descriptor */
1517 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1519 /* Enable HW CSUM */
1521 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1525 /* Set LAST bit for the last descriptor */
1526 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1528 /* Set IC bit based on Tx coalescing settings */
1530 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1532 /* Save the Tx info to report back during cleanup */
1533 rdata->tx.packets = packet->tx_packets;
1534 rdata->tx.bytes = packet->tx_bytes;
1536 /* In case the Tx DMA engine is running, make sure everything
1537 * is written to the descriptor(s) before setting the OWN bit
1538 * for the first descriptor
1542 /* Set OWN bit for the first descriptor */
1543 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1544 rdesc = rdata->rdesc;
1545 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1547 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
1548 xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
1551 /* Make sure ownership is written to the descriptor */
1555 if (!packet->skb->xmit_more ||
1556 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1557 channel->queue_index)))
1558 xgbe_tx_start_xmit(channel, ring);
1560 ring->tx.xmit_more = 1;
1562 DBGPR(" %s: descriptors %u to %u written\n",
1563 channel->name, start_index & (ring->rdesc_count - 1),
1564 (ring->cur - 1) & (ring->rdesc_count - 1));
1566 DBGPR("<--xgbe_dev_xmit\n");
1569 static int xgbe_dev_read(struct xgbe_channel *channel)
1571 struct xgbe_ring *ring = channel->rx_ring;
1572 struct xgbe_ring_data *rdata;
1573 struct xgbe_ring_desc *rdesc;
1574 struct xgbe_packet_data *packet = &ring->packet_data;
1575 struct net_device *netdev = channel->pdata->netdev;
1576 unsigned int err, etlt, l34t;
1578 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1580 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1581 rdesc = rdata->rdesc;
1583 /* Check for data availability */
1584 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1587 /* Make sure descriptor fields are read after reading the OWN bit */
1590 #ifdef XGMAC_ENABLE_RX_DESC_DUMP
1591 xgbe_dump_rx_desc(ring, rdesc, ring->cur);
1594 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1595 /* Timestamp Context Descriptor */
1596 xgbe_get_rx_tstamp(packet, rdesc);
1598 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1600 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1605 /* Normal Descriptor, be sure Context Descriptor bit is off */
1606 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1608 /* Indicate if a Context Descriptor is next */
1609 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1610 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1613 /* Get the header length */
1614 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
1615 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1616 RX_NORMAL_DESC2, HL);
1618 /* Get the RSS hash */
1619 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1620 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1623 packet->rss_hash = le32_to_cpu(rdesc->desc1);
1625 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1627 case RX_DESC3_L34T_IPV4_TCP:
1628 case RX_DESC3_L34T_IPV4_UDP:
1629 case RX_DESC3_L34T_IPV6_TCP:
1630 case RX_DESC3_L34T_IPV6_UDP:
1631 packet->rss_hash_type = PKT_HASH_TYPE_L4;
1634 packet->rss_hash_type = PKT_HASH_TYPE_L3;
1638 /* Get the packet length */
1639 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1641 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1642 /* Not all the data has been transferred for this packet */
1643 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1648 /* This is the last of the data for this packet */
1649 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1652 /* Set checksum done indicator as appropriate */
1653 if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
1654 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1657 /* Check for errors (only valid in last descriptor) */
1658 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1659 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1660 DBGPR(" err=%u, etlt=%#x\n", err, etlt);
1662 if (!err || !etlt) {
1663 /* No error if err is 0 or etlt is 0 */
1664 if ((etlt == 0x09) &&
1665 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1666 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1668 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1671 DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
1674 if ((etlt == 0x05) || (etlt == 0x06))
1675 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1678 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1682 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1683 ring->cur & (ring->rdesc_count - 1), ring->cur);
1688 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1690 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1691 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1694 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1696 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1697 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1700 static int xgbe_enable_int(struct xgbe_channel *channel,
1701 enum xgbe_int int_id)
1703 unsigned int dma_ch_ier;
1705 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1708 case XGMAC_INT_DMA_CH_SR_TI:
1709 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
1711 case XGMAC_INT_DMA_CH_SR_TPS:
1712 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
1714 case XGMAC_INT_DMA_CH_SR_TBU:
1715 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
1717 case XGMAC_INT_DMA_CH_SR_RI:
1718 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
1720 case XGMAC_INT_DMA_CH_SR_RBU:
1721 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
1723 case XGMAC_INT_DMA_CH_SR_RPS:
1724 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
1726 case XGMAC_INT_DMA_CH_SR_TI_RI:
1727 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
1728 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
1730 case XGMAC_INT_DMA_CH_SR_FBE:
1731 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
1733 case XGMAC_INT_DMA_ALL:
1734 dma_ch_ier |= channel->saved_ier;
1740 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1745 static int xgbe_disable_int(struct xgbe_channel *channel,
1746 enum xgbe_int int_id)
1748 unsigned int dma_ch_ier;
1750 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1753 case XGMAC_INT_DMA_CH_SR_TI:
1754 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
1756 case XGMAC_INT_DMA_CH_SR_TPS:
1757 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
1759 case XGMAC_INT_DMA_CH_SR_TBU:
1760 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
1762 case XGMAC_INT_DMA_CH_SR_RI:
1763 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
1765 case XGMAC_INT_DMA_CH_SR_RBU:
1766 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
1768 case XGMAC_INT_DMA_CH_SR_RPS:
1769 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
1771 case XGMAC_INT_DMA_CH_SR_TI_RI:
1772 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
1773 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
1775 case XGMAC_INT_DMA_CH_SR_FBE:
1776 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
1778 case XGMAC_INT_DMA_ALL:
1779 channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
1780 dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
1786 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1791 static int xgbe_exit(struct xgbe_prv_data *pdata)
1793 unsigned int count = 2000;
1795 DBGPR("-->xgbe_exit\n");
1797 /* Issue a software reset */
1798 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
1799 usleep_range(10, 15);
1801 /* Poll Until Poll Condition */
1802 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1803 usleep_range(500, 600);
1808 DBGPR("<--xgbe_exit\n");
1813 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1815 unsigned int i, count;
1817 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
1820 for (i = 0; i < pdata->tx_q_count; i++)
1821 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1823 /* Poll Until Poll Condition */
1824 for (i = 0; i < pdata->tx_q_count; i++) {
1826 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
1828 usleep_range(500, 600);
1837 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
1839 /* Set enhanced addressing mode */
1840 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
1842 /* Set the System Bus mode */
1843 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
1844 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
1847 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
1849 unsigned int arcache, awcache;
1852 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
1853 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
1854 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
1855 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
1856 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
1857 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
1858 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
1861 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
1862 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
1863 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
1864 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
1865 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
1866 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
1867 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
1868 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
1869 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
1872 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1876 /* Set Tx to weighted round robin scheduling algorithm */
1877 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1879 /* Set Tx traffic classes to use WRR algorithm with equal weights */
1880 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1881 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1883 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
1886 /* Set Rx to strict priority algorithm */
1887 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1890 static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
1891 unsigned int queue_count)
1893 unsigned int q_fifo_size = 0;
1894 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1896 /* Calculate Tx/Rx fifo share per queue */
1897 switch (fifo_size) {
1899 q_fifo_size = XGBE_FIFO_SIZE_B(128);
1902 q_fifo_size = XGBE_FIFO_SIZE_B(256);
1905 q_fifo_size = XGBE_FIFO_SIZE_B(512);
1908 q_fifo_size = XGBE_FIFO_SIZE_KB(1);
1911 q_fifo_size = XGBE_FIFO_SIZE_KB(2);
1914 q_fifo_size = XGBE_FIFO_SIZE_KB(4);
1917 q_fifo_size = XGBE_FIFO_SIZE_KB(8);
1920 q_fifo_size = XGBE_FIFO_SIZE_KB(16);
1923 q_fifo_size = XGBE_FIFO_SIZE_KB(32);
1926 q_fifo_size = XGBE_FIFO_SIZE_KB(64);
1929 q_fifo_size = XGBE_FIFO_SIZE_KB(128);
1932 q_fifo_size = XGBE_FIFO_SIZE_KB(256);
1936 /* The configured value is not the actual amount of fifo RAM */
1937 q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
1939 q_fifo_size = q_fifo_size / queue_count;
1941 /* Set the queue fifo size programmable value */
1942 if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
1943 p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
1944 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
1945 p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
1946 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
1947 p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
1948 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
1949 p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
1950 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
1951 p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
1952 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
1953 p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
1954 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
1955 p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
1956 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
1957 p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
1958 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
1959 p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
1960 else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
1961 p_fifo = XGMAC_MTL_FIFO_SIZE_512;
1962 else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
1963 p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1968 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
1970 enum xgbe_mtl_fifo_size fifo_size;
1973 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
1976 for (i = 0; i < pdata->tx_q_count; i++)
1977 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
1979 netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
1980 pdata->tx_q_count, ((fifo_size + 1) * 256));
1983 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
1985 enum xgbe_mtl_fifo_size fifo_size;
1988 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
1991 for (i = 0; i < pdata->rx_q_count; i++)
1992 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
1994 netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
1995 pdata->rx_q_count, ((fifo_size + 1) * 256));
1998 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2000 unsigned int qptc, qptc_extra, queue;
2001 unsigned int prio_queues;
2002 unsigned int ppq, ppq_extra, prio;
2004 unsigned int i, j, reg, reg_val;
2006 /* Map the MTL Tx Queues to Traffic Classes
2007 * Note: Tx Queues >= Traffic Classes
2009 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
2010 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
2012 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
2013 for (j = 0; j < qptc; j++) {
2014 DBGPR(" TXq%u mapped to TC%u\n", queue, i);
2015 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2017 pdata->q2tc_map[queue++] = i;
2020 if (i < qptc_extra) {
2021 DBGPR(" TXq%u mapped to TC%u\n", queue, i);
2022 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2024 pdata->q2tc_map[queue++] = i;
2028 /* Map the 8 VLAN priority values to available MTL Rx queues */
2029 prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
2031 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
2032 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
2036 for (i = 0, prio = 0; i < prio_queues;) {
2038 for (j = 0; j < ppq; j++) {
2039 DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
2040 mask |= (1 << prio);
2041 pdata->prio2q_map[prio++] = i;
2044 if (i < ppq_extra) {
2045 DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
2046 mask |= (1 << prio);
2047 pdata->prio2q_map[prio++] = i;
2050 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2052 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2055 XGMAC_IOWRITE(pdata, reg, reg_val);
2056 reg += MAC_RQC2_INC;
2060 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2063 for (i = 0; i < pdata->rx_q_count;) {
2064 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2066 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2069 XGMAC_IOWRITE(pdata, reg, reg_val);
2071 reg += MTL_RQDCM_INC;
2076 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2080 for (i = 0; i < pdata->rx_q_count; i++) {
2081 /* Activate flow control when less than 4k left in fifo */
2082 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
2084 /* De-activate flow control when more than 6k left in fifo */
2085 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
2089 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2091 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
2093 /* Filtering is done using perfect filtering and hash filtering */
2094 if (pdata->hw_feat.hash_table_size) {
2095 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2096 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2097 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2101 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2105 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2107 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2110 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2112 if (pdata->netdev->features & NETIF_F_RXCSUM)
2113 xgbe_enable_rx_csum(pdata);
2115 xgbe_disable_rx_csum(pdata);
2118 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2120 /* Indicate that VLAN Tx CTAGs come from context descriptors */
2121 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2122 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2124 /* Set the current VLAN Hash Table register value */
2125 xgbe_update_vlan_hash_table(pdata);
2127 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
2128 xgbe_enable_rx_vlan_filtering(pdata);
2130 xgbe_disable_rx_vlan_filtering(pdata);
2132 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2133 xgbe_enable_rx_vlan_stripping(pdata);
2135 xgbe_disable_rx_vlan_stripping(pdata);
2138 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2144 /* These registers are always 64 bit */
2145 case MMC_TXOCTETCOUNT_GB_LO:
2146 case MMC_TXOCTETCOUNT_G_LO:
2147 case MMC_RXOCTETCOUNT_GB_LO:
2148 case MMC_RXOCTETCOUNT_G_LO:
2156 val = XGMAC_IOREAD(pdata, reg_lo);
2159 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2164 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2166 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2167 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2169 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2170 stats->txoctetcount_gb +=
2171 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2173 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2174 stats->txframecount_gb +=
2175 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2177 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2178 stats->txbroadcastframes_g +=
2179 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2181 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2182 stats->txmulticastframes_g +=
2183 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2185 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2186 stats->tx64octets_gb +=
2187 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2189 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2190 stats->tx65to127octets_gb +=
2191 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2193 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2194 stats->tx128to255octets_gb +=
2195 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2197 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2198 stats->tx256to511octets_gb +=
2199 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2201 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2202 stats->tx512to1023octets_gb +=
2203 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2205 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2206 stats->tx1024tomaxoctets_gb +=
2207 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2209 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2210 stats->txunicastframes_gb +=
2211 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2213 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2214 stats->txmulticastframes_gb +=
2215 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2217 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2218 stats->txbroadcastframes_g +=
2219 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2221 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2222 stats->txunderflowerror +=
2223 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2225 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2226 stats->txoctetcount_g +=
2227 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2229 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2230 stats->txframecount_g +=
2231 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2233 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2234 stats->txpauseframes +=
2235 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2237 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2238 stats->txvlanframes_g +=
2239 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2242 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2244 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2245 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2247 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2248 stats->rxframecount_gb +=
2249 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2251 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2252 stats->rxoctetcount_gb +=
2253 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2255 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2256 stats->rxoctetcount_g +=
2257 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2259 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2260 stats->rxbroadcastframes_g +=
2261 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2263 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2264 stats->rxmulticastframes_g +=
2265 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2267 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2268 stats->rxcrcerror +=
2269 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2271 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2272 stats->rxrunterror +=
2273 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2275 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2276 stats->rxjabbererror +=
2277 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2279 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2280 stats->rxundersize_g +=
2281 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2283 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2284 stats->rxoversize_g +=
2285 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2287 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
2288 stats->rx64octets_gb +=
2289 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2291 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
2292 stats->rx65to127octets_gb +=
2293 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2295 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
2296 stats->rx128to255octets_gb +=
2297 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2299 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
2300 stats->rx256to511octets_gb +=
2301 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2303 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
2304 stats->rx512to1023octets_gb +=
2305 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2307 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
2308 stats->rx1024tomaxoctets_gb +=
2309 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2311 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
2312 stats->rxunicastframes_g +=
2313 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2315 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
2316 stats->rxlengtherror +=
2317 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2319 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
2320 stats->rxoutofrangetype +=
2321 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2323 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
2324 stats->rxpauseframes +=
2325 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2327 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
2328 stats->rxfifooverflow +=
2329 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2331 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
2332 stats->rxvlanframes_gb +=
2333 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2335 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
2336 stats->rxwatchdogerror +=
2337 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2340 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
2342 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2344 /* Freeze counters */
2345 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
2347 stats->txoctetcount_gb +=
2348 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2350 stats->txframecount_gb +=
2351 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2353 stats->txbroadcastframes_g +=
2354 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2356 stats->txmulticastframes_g +=
2357 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2359 stats->tx64octets_gb +=
2360 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2362 stats->tx65to127octets_gb +=
2363 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2365 stats->tx128to255octets_gb +=
2366 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2368 stats->tx256to511octets_gb +=
2369 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2371 stats->tx512to1023octets_gb +=
2372 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2374 stats->tx1024tomaxoctets_gb +=
2375 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2377 stats->txunicastframes_gb +=
2378 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2380 stats->txmulticastframes_gb +=
2381 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2383 stats->txbroadcastframes_g +=
2384 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2386 stats->txunderflowerror +=
2387 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2389 stats->txoctetcount_g +=
2390 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2392 stats->txframecount_g +=
2393 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2395 stats->txpauseframes +=
2396 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2398 stats->txvlanframes_g +=
2399 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2401 stats->rxframecount_gb +=
2402 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2404 stats->rxoctetcount_gb +=
2405 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2407 stats->rxoctetcount_g +=
2408 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2410 stats->rxbroadcastframes_g +=
2411 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2413 stats->rxmulticastframes_g +=
2414 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2416 stats->rxcrcerror +=
2417 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2419 stats->rxrunterror +=
2420 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2422 stats->rxjabbererror +=
2423 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2425 stats->rxundersize_g +=
2426 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2428 stats->rxoversize_g +=
2429 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2431 stats->rx64octets_gb +=
2432 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2434 stats->rx65to127octets_gb +=
2435 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2437 stats->rx128to255octets_gb +=
2438 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2440 stats->rx256to511octets_gb +=
2441 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2443 stats->rx512to1023octets_gb +=
2444 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2446 stats->rx1024tomaxoctets_gb +=
2447 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2449 stats->rxunicastframes_g +=
2450 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2452 stats->rxlengtherror +=
2453 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2455 stats->rxoutofrangetype +=
2456 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2458 stats->rxpauseframes +=
2459 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2461 stats->rxfifooverflow +=
2462 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2464 stats->rxvlanframes_gb +=
2465 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2467 stats->rxwatchdogerror +=
2468 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2470 /* Un-freeze counters */
2471 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
2474 static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
2476 /* Set counters to reset on read */
2477 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
2479 /* Reset the counters */
2480 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
2483 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
2484 struct xgbe_channel *channel)
2486 unsigned int tx_dsr, tx_pos, tx_qidx;
2487 unsigned int tx_status;
2488 unsigned long tx_timeout;
2490 /* Calculate the status register to read and the position within */
2491 if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
2493 tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
2496 tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
2498 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
2499 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
2503 /* The Tx engine cannot be stopped if it is actively processing
2504 * descriptors. Wait for the Tx engine to enter the stopped or
2505 * suspended state. Don't wait forever though...
2507 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
2508 while (time_before(jiffies, tx_timeout)) {
2509 tx_status = XGMAC_IOREAD(pdata, tx_dsr);
2510 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
2511 if ((tx_status == DMA_TPS_STOPPED) ||
2512 (tx_status == DMA_TPS_SUSPENDED))
2515 usleep_range(500, 1000);
2518 if (!time_before(jiffies, tx_timeout))
2519 netdev_info(pdata->netdev,
2520 "timed out waiting for Tx DMA channel %u to stop\n",
2521 channel->queue_index);
2524 static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
2526 struct xgbe_channel *channel;
2529 /* Enable each Tx DMA channel */
2530 channel = pdata->channel;
2531 for (i = 0; i < pdata->channel_count; i++, channel++) {
2532 if (!channel->tx_ring)
2535 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2538 /* Enable each Tx queue */
2539 for (i = 0; i < pdata->tx_q_count; i++)
2540 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
2544 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2547 static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
2549 struct xgbe_channel *channel;
2552 /* Prepare for Tx DMA channel stop */
2553 channel = pdata->channel;
2554 for (i = 0; i < pdata->channel_count; i++, channel++) {
2555 if (!channel->tx_ring)
2558 xgbe_prepare_tx_stop(pdata, channel);
2561 /* Disable MAC Tx */
2562 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2564 /* Disable each Tx queue */
2565 for (i = 0; i < pdata->tx_q_count; i++)
2566 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
2568 /* Disable each Tx DMA channel */
2569 channel = pdata->channel;
2570 for (i = 0; i < pdata->channel_count; i++, channel++) {
2571 if (!channel->tx_ring)
2574 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2578 static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
2580 struct xgbe_channel *channel;
2581 unsigned int reg_val, i;
2583 /* Enable each Rx DMA channel */
2584 channel = pdata->channel;
2585 for (i = 0; i < pdata->channel_count; i++, channel++) {
2586 if (!channel->rx_ring)
2589 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2592 /* Enable each Rx queue */
2594 for (i = 0; i < pdata->rx_q_count; i++)
2595 reg_val |= (0x02 << (i << 1));
2596 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
2599 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
2600 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
2601 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
2602 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
2605 static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
2607 struct xgbe_channel *channel;
2610 /* Disable MAC Rx */
2611 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
2612 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
2613 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
2614 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
2616 /* Disable each Rx queue */
2617 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
2619 /* Disable each Rx DMA channel */
2620 channel = pdata->channel;
2621 for (i = 0; i < pdata->channel_count; i++, channel++) {
2622 if (!channel->rx_ring)
2625 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2629 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
2631 struct xgbe_channel *channel;
2634 /* Enable each Tx DMA channel */
2635 channel = pdata->channel;
2636 for (i = 0; i < pdata->channel_count; i++, channel++) {
2637 if (!channel->tx_ring)
2640 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2644 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2647 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
2649 struct xgbe_channel *channel;
2652 /* Prepare for Tx DMA channel stop */
2653 channel = pdata->channel;
2654 for (i = 0; i < pdata->channel_count; i++, channel++) {
2655 if (!channel->tx_ring)
2658 xgbe_prepare_tx_stop(pdata, channel);
2661 /* Disable MAC Tx */
2662 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2664 /* Disable each Tx DMA channel */
2665 channel = pdata->channel;
2666 for (i = 0; i < pdata->channel_count; i++, channel++) {
2667 if (!channel->tx_ring)
2670 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2674 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
2676 struct xgbe_channel *channel;
2679 /* Enable each Rx DMA channel */
2680 channel = pdata->channel;
2681 for (i = 0; i < pdata->channel_count; i++, channel++) {
2682 if (!channel->rx_ring)
2685 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2689 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
2691 struct xgbe_channel *channel;
2694 /* Disable each Rx DMA channel */
2695 channel = pdata->channel;
2696 for (i = 0; i < pdata->channel_count; i++, channel++) {
2697 if (!channel->rx_ring)
2700 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2704 static int xgbe_init(struct xgbe_prv_data *pdata)
2706 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2709 DBGPR("-->xgbe_init\n");
2711 /* Flush Tx queues */
2712 ret = xgbe_flush_tx_queues(pdata);
2717 * Initialize DMA related features
2719 xgbe_config_dma_bus(pdata);
2720 xgbe_config_dma_cache(pdata);
2721 xgbe_config_osp_mode(pdata);
2722 xgbe_config_pblx8(pdata);
2723 xgbe_config_tx_pbl_val(pdata);
2724 xgbe_config_rx_pbl_val(pdata);
2725 xgbe_config_rx_coalesce(pdata);
2726 xgbe_config_tx_coalesce(pdata);
2727 xgbe_config_rx_buffer_size(pdata);
2728 xgbe_config_tso_mode(pdata);
2729 xgbe_config_sph_mode(pdata);
2730 xgbe_config_rss(pdata);
2731 desc_if->wrapper_tx_desc_init(pdata);
2732 desc_if->wrapper_rx_desc_init(pdata);
2733 xgbe_enable_dma_interrupts(pdata);
2736 * Initialize MTL related features
2738 xgbe_config_mtl_mode(pdata);
2739 xgbe_config_queue_mapping(pdata);
2740 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
2741 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
2742 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
2743 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
2744 xgbe_config_tx_fifo_size(pdata);
2745 xgbe_config_rx_fifo_size(pdata);
2746 xgbe_config_flow_control_threshold(pdata);
2747 /*TODO: Error Packet and undersized good Packet forwarding enable
2750 xgbe_config_dcb_tc(pdata);
2751 xgbe_config_dcb_pfc(pdata);
2752 xgbe_enable_mtl_interrupts(pdata);
2755 * Initialize MAC related features
2757 xgbe_config_mac_address(pdata);
2758 xgbe_config_jumbo_enable(pdata);
2759 xgbe_config_flow_control(pdata);
2760 xgbe_config_checksum_offload(pdata);
2761 xgbe_config_vlan_support(pdata);
2762 xgbe_config_mmc(pdata);
2763 xgbe_enable_mac_interrupts(pdata);
2765 DBGPR("<--xgbe_init\n");
2770 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2772 DBGPR("-->xgbe_init_function_ptrs\n");
2774 hw_if->tx_complete = xgbe_tx_complete;
2776 hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
2777 hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
2778 hw_if->add_mac_addresses = xgbe_add_mac_addresses;
2779 hw_if->set_mac_address = xgbe_set_mac_address;
2781 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
2782 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
2784 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
2785 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
2786 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
2787 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
2788 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
2790 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
2791 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
2793 hw_if->set_gmii_speed = xgbe_set_gmii_speed;
2794 hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
2795 hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
2797 hw_if->enable_tx = xgbe_enable_tx;
2798 hw_if->disable_tx = xgbe_disable_tx;
2799 hw_if->enable_rx = xgbe_enable_rx;
2800 hw_if->disable_rx = xgbe_disable_rx;
2802 hw_if->powerup_tx = xgbe_powerup_tx;
2803 hw_if->powerdown_tx = xgbe_powerdown_tx;
2804 hw_if->powerup_rx = xgbe_powerup_rx;
2805 hw_if->powerdown_rx = xgbe_powerdown_rx;
2807 hw_if->dev_xmit = xgbe_dev_xmit;
2808 hw_if->dev_read = xgbe_dev_read;
2809 hw_if->enable_int = xgbe_enable_int;
2810 hw_if->disable_int = xgbe_disable_int;
2811 hw_if->init = xgbe_init;
2812 hw_if->exit = xgbe_exit;
2814 /* Descriptor related Sequences have to be initialized here */
2815 hw_if->tx_desc_init = xgbe_tx_desc_init;
2816 hw_if->rx_desc_init = xgbe_rx_desc_init;
2817 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
2818 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
2819 hw_if->is_last_desc = xgbe_is_last_desc;
2820 hw_if->is_context_desc = xgbe_is_context_desc;
2821 hw_if->tx_start_xmit = xgbe_tx_start_xmit;
2824 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
2825 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
2827 /* For RX coalescing */
2828 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
2829 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
2830 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
2831 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
2833 /* For RX and TX threshold config */
2834 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
2835 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
2837 /* For RX and TX Store and Forward Mode config */
2838 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
2839 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
2841 /* For TX DMA Operating on Second Frame config */
2842 hw_if->config_osp_mode = xgbe_config_osp_mode;
2844 /* For RX and TX PBL config */
2845 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
2846 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
2847 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
2848 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
2849 hw_if->config_pblx8 = xgbe_config_pblx8;
2851 /* For MMC statistics support */
2852 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
2853 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
2854 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
2856 /* For PTP config */
2857 hw_if->config_tstamp = xgbe_config_tstamp;
2858 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
2859 hw_if->set_tstamp_time = xgbe_set_tstamp_time;
2860 hw_if->get_tstamp_time = xgbe_get_tstamp_time;
2861 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
2863 /* For Data Center Bridging config */
2864 hw_if->config_dcb_tc = xgbe_config_dcb_tc;
2865 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
2867 /* For Receive Side Scaling */
2868 hw_if->enable_rss = xgbe_enable_rss;
2869 hw_if->disable_rss = xgbe_disable_rss;
2870 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
2871 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
2873 DBGPR("<--xgbe_init_function_ptrs\n");