2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy.h>
31 #include <linux/platform_device.h>
32 #include <linux/skbuff.h>
34 #include "mvneta_bm.h"
40 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
41 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
42 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
43 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
44 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
45 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
46 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
47 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
48 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
49 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
50 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
51 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
52 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
53 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
54 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
55 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
56 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
57 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
58 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
59 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
60 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
61 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
62 #define MVNETA_PORT_RX_RESET 0x1cc0
63 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
64 #define MVNETA_PHY_ADDR 0x2000
65 #define MVNETA_PHY_ADDR_MASK 0x1f
66 #define MVNETA_MBUS_RETRY 0x2010
67 #define MVNETA_UNIT_INTR_CAUSE 0x2080
68 #define MVNETA_UNIT_CONTROL 0x20B0
69 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
70 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
71 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
72 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
73 #define MVNETA_BASE_ADDR_ENABLE 0x2290
74 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
75 #define MVNETA_PORT_CONFIG 0x2400
76 #define MVNETA_UNI_PROMISC_MODE BIT(0)
77 #define MVNETA_DEF_RXQ(q) ((q) << 1)
78 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
79 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
80 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
81 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
82 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
83 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
84 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
85 MVNETA_DEF_RXQ_ARP(q) | \
86 MVNETA_DEF_RXQ_TCP(q) | \
87 MVNETA_DEF_RXQ_UDP(q) | \
88 MVNETA_DEF_RXQ_BPDU(q) | \
89 MVNETA_TX_UNSET_ERR_SUM | \
90 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
91 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
92 #define MVNETA_MAC_ADDR_LOW 0x2414
93 #define MVNETA_MAC_ADDR_HIGH 0x2418
94 #define MVNETA_SDMA_CONFIG 0x241c
95 #define MVNETA_SDMA_BRST_SIZE_16 4
96 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
97 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
98 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
99 #define MVNETA_DESC_SWAP BIT(6)
100 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
101 #define MVNETA_PORT_STATUS 0x2444
102 #define MVNETA_TX_IN_PRGRS BIT(1)
103 #define MVNETA_TX_FIFO_EMPTY BIT(8)
104 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
105 #define MVNETA_SERDES_CFG 0x24A0
106 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
107 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
108 #define MVNETA_TYPE_PRIO 0x24bc
109 #define MVNETA_FORCE_UNI BIT(21)
110 #define MVNETA_TXQ_CMD_1 0x24e4
111 #define MVNETA_TXQ_CMD 0x2448
112 #define MVNETA_TXQ_DISABLE_SHIFT 8
113 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
114 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
115 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
116 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
117 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
118 #define MVNETA_ACC_MODE 0x2500
119 #define MVNETA_BM_ADDRESS 0x2504
120 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
121 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
122 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
123 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
124 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
125 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
127 /* Exception Interrupt Port/Queue Cause register
129 * Their behavior depend of the mapping done using the PCPX2Q
130 * registers. For a given CPU if the bit associated to a queue is not
131 * set, then for the register a read from this CPU will always return
132 * 0 and a write won't do anything
135 #define MVNETA_INTR_NEW_CAUSE 0x25a0
136 #define MVNETA_INTR_NEW_MASK 0x25a4
138 /* bits 0..7 = TXQ SENT, one bit per queue.
139 * bits 8..15 = RXQ OCCUP, one bit per queue.
140 * bits 16..23 = RXQ FREE, one bit per queue.
141 * bit 29 = OLD_REG_SUM, see old reg ?
142 * bit 30 = TX_ERR_SUM, one bit for 4 ports
143 * bit 31 = MISC_SUM, one bit for 4 ports
145 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
146 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
147 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
148 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
149 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
151 #define MVNETA_INTR_OLD_CAUSE 0x25a8
152 #define MVNETA_INTR_OLD_MASK 0x25ac
154 /* Data Path Port/Queue Cause Register */
155 #define MVNETA_INTR_MISC_CAUSE 0x25b0
156 #define MVNETA_INTR_MISC_MASK 0x25b4
158 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
159 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
160 #define MVNETA_CAUSE_PTP BIT(4)
162 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
163 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
164 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
165 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
166 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
167 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
168 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
169 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
171 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
172 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
173 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
175 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
176 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
177 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
179 #define MVNETA_INTR_ENABLE 0x25b8
180 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
181 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
183 #define MVNETA_RXQ_CMD 0x2680
184 #define MVNETA_RXQ_DISABLE_SHIFT 8
185 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
186 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
187 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
188 #define MVNETA_GMAC_CTRL_0 0x2c00
189 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
190 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
191 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
192 #define MVNETA_GMAC_CTRL_2 0x2c08
193 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
194 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
195 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
196 #define MVNETA_GMAC2_PORT_RESET BIT(6)
197 #define MVNETA_GMAC_STATUS 0x2c10
198 #define MVNETA_GMAC_LINK_UP BIT(0)
199 #define MVNETA_GMAC_SPEED_1000 BIT(1)
200 #define MVNETA_GMAC_SPEED_100 BIT(2)
201 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
202 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
203 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
204 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
205 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
206 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
207 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
208 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
209 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
210 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
211 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
212 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
213 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
214 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
215 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
216 #define MVNETA_MIB_COUNTERS_BASE 0x3000
217 #define MVNETA_MIB_LATE_COLLISION 0x7c
218 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
219 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
220 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
221 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
222 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
223 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
224 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
225 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
226 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
227 #define MVNETA_TXQ_DEC_SENT_MASK 0xff
228 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
229 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
230 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
231 #define MVNETA_PORT_TX_RESET 0x3cf0
232 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
233 #define MVNETA_TX_MTU 0x3e0c
234 #define MVNETA_TX_TOKEN_SIZE 0x3e14
235 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
236 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
237 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
239 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
241 /* Descriptor ring Macros */
242 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
243 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
245 /* Various constants */
248 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
249 #define MVNETA_RX_COAL_PKTS 32
250 #define MVNETA_RX_COAL_USEC 100
252 /* The two bytes Marvell header. Either contains a special value used
253 * by Marvell switches when a specific hardware mode is enabled (not
254 * supported by this driver) or is filled automatically by zeroes on
255 * the RX side. Those two bytes being at the front of the Ethernet
256 * header, they allow to have the IP header aligned on a 4 bytes
257 * boundary automatically: the hardware skips those two bytes on its
260 #define MVNETA_MH_SIZE 2
262 #define MVNETA_VLAN_TAG_LEN 4
264 #define MVNETA_TX_CSUM_DEF_SIZE 1600
265 #define MVNETA_TX_CSUM_MAX_SIZE 9800
266 #define MVNETA_ACC_MODE_EXT1 1
267 #define MVNETA_ACC_MODE_EXT2 2
269 #define MVNETA_MAX_DECODE_WIN 6
271 /* Timeout constants */
272 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
273 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
274 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
276 #define MVNETA_TX_MTU_MAX 0x3ffff
278 /* The RSS lookup table actually has 256 entries but we do not use
281 #define MVNETA_RSS_LU_TABLE_SIZE 1
283 /* TSO header size */
284 #define TSO_HEADER_SIZE 128
286 /* Max number of Rx descriptors */
287 #define MVNETA_MAX_RXD 128
289 /* Max number of Tx descriptors */
290 #define MVNETA_MAX_TXD 532
292 /* Max number of allowed TCP segments for software TSO */
293 #define MVNETA_MAX_TSO_SEGS 100
295 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
297 /* descriptor aligned size */
298 #define MVNETA_DESC_ALIGNED_SIZE 32
300 /* Number of bytes to be taken into account by HW when putting incoming data
301 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
302 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
304 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64
306 #define MVNETA_RX_PKT_SIZE(mtu) \
307 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
308 ETH_HLEN + ETH_FCS_LEN, \
311 #define IS_TSO_HEADER(txq, addr) \
312 ((addr >= txq->tso_hdrs_phys) && \
313 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
315 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
316 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
318 struct mvneta_statistic {
319 unsigned short offset;
321 const char name[ETH_GSTRING_LEN];
327 static const struct mvneta_statistic mvneta_statistics[] = {
328 { 0x3000, T_REG_64, "good_octets_received", },
329 { 0x3010, T_REG_32, "good_frames_received", },
330 { 0x3008, T_REG_32, "bad_octets_received", },
331 { 0x3014, T_REG_32, "bad_frames_received", },
332 { 0x3018, T_REG_32, "broadcast_frames_received", },
333 { 0x301c, T_REG_32, "multicast_frames_received", },
334 { 0x3050, T_REG_32, "unrec_mac_control_received", },
335 { 0x3058, T_REG_32, "good_fc_received", },
336 { 0x305c, T_REG_32, "bad_fc_received", },
337 { 0x3060, T_REG_32, "undersize_received", },
338 { 0x3064, T_REG_32, "fragments_received", },
339 { 0x3068, T_REG_32, "oversize_received", },
340 { 0x306c, T_REG_32, "jabber_received", },
341 { 0x3070, T_REG_32, "mac_receive_error", },
342 { 0x3074, T_REG_32, "bad_crc_event", },
343 { 0x3078, T_REG_32, "collision", },
344 { 0x307c, T_REG_32, "late_collision", },
345 { 0x2484, T_REG_32, "rx_discard", },
346 { 0x2488, T_REG_32, "rx_overrun", },
347 { 0x3020, T_REG_32, "frames_64_octets", },
348 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
349 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
350 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
351 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
352 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
353 { 0x3038, T_REG_64, "good_octets_sent", },
354 { 0x3040, T_REG_32, "good_frames_sent", },
355 { 0x3044, T_REG_32, "excessive_collision", },
356 { 0x3048, T_REG_32, "multicast_frames_sent", },
357 { 0x304c, T_REG_32, "broadcast_frames_sent", },
358 { 0x3054, T_REG_32, "fc_sent", },
359 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
362 struct mvneta_pcpu_stats {
363 struct u64_stats_sync syncp;
370 struct mvneta_pcpu_port {
371 /* Pointer to the shared port */
372 struct mvneta_port *pp;
374 /* Pointer to the CPU-local NAPI struct */
375 struct napi_struct napi;
377 /* Cause of the previous interrupt */
383 struct mvneta_pcpu_port __percpu *ports;
384 struct mvneta_pcpu_stats __percpu *stats;
387 unsigned int frag_size;
389 struct mvneta_rx_queue *rxqs;
390 struct mvneta_tx_queue *txqs;
391 struct net_device *dev;
392 struct hlist_node node_online;
393 struct hlist_node node_dead;
395 /* Protect the access to the percpu interrupt registers,
396 * ensuring that the configuration remains coherent.
402 struct napi_struct napi;
412 struct mii_bus *mii_bus;
413 phy_interface_t phy_interface;
414 struct device_node *phy_node;
418 unsigned int tx_csum_limit;
419 unsigned int use_inband_status:1;
421 struct mvneta_bm *bm_priv;
422 struct mvneta_bm_pool *pool_long;
423 struct mvneta_bm_pool *pool_short;
426 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
428 u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
430 /* Flags for special SoC configurations */
431 bool neta_armada3700;
432 u16 rx_offset_correction;
435 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
436 * layout of the transmit and reception DMA descriptors, and their
437 * layout is therefore defined by the hardware design
440 #define MVNETA_TX_L3_OFF_SHIFT 0
441 #define MVNETA_TX_IP_HLEN_SHIFT 8
442 #define MVNETA_TX_L4_UDP BIT(16)
443 #define MVNETA_TX_L3_IP6 BIT(17)
444 #define MVNETA_TXD_IP_CSUM BIT(18)
445 #define MVNETA_TXD_Z_PAD BIT(19)
446 #define MVNETA_TXD_L_DESC BIT(20)
447 #define MVNETA_TXD_F_DESC BIT(21)
448 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
449 MVNETA_TXD_L_DESC | \
451 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
452 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
454 #define MVNETA_RXD_ERR_CRC 0x0
455 #define MVNETA_RXD_BM_POOL_SHIFT 13
456 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
457 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
458 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
459 #define MVNETA_RXD_ERR_LEN BIT(18)
460 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
461 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
462 #define MVNETA_RXD_L3_IP4 BIT(25)
463 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
464 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
466 #if defined(__LITTLE_ENDIAN)
467 struct mvneta_tx_desc {
468 u32 command; /* Options used by HW for packet transmitting.*/
469 u16 reserverd1; /* csum_l4 (for future use) */
470 u16 data_size; /* Data size of transmitted packet in bytes */
471 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
472 u32 reserved2; /* hw_cmd - (for future use, PMT) */
473 u32 reserved3[4]; /* Reserved - (for future use) */
476 struct mvneta_rx_desc {
477 u32 status; /* Info about received packet */
478 u16 reserved1; /* pnc_info - (for future use, PnC) */
479 u16 data_size; /* Size of received packet in bytes */
481 u32 buf_phys_addr; /* Physical address of the buffer */
482 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
484 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
485 u16 reserved3; /* prefetch_cmd, for future use */
486 u16 reserved4; /* csum_l4 - (for future use, PnC) */
488 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
489 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
492 struct mvneta_tx_desc {
493 u16 data_size; /* Data size of transmitted packet in bytes */
494 u16 reserverd1; /* csum_l4 (for future use) */
495 u32 command; /* Options used by HW for packet transmitting.*/
496 u32 reserved2; /* hw_cmd - (for future use, PMT) */
497 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
498 u32 reserved3[4]; /* Reserved - (for future use) */
501 struct mvneta_rx_desc {
502 u16 data_size; /* Size of received packet in bytes */
503 u16 reserved1; /* pnc_info - (for future use, PnC) */
504 u32 status; /* Info about received packet */
506 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
507 u32 buf_phys_addr; /* Physical address of the buffer */
509 u16 reserved4; /* csum_l4 - (for future use, PnC) */
510 u16 reserved3; /* prefetch_cmd, for future use */
511 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
513 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
514 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
518 struct mvneta_tx_queue {
519 /* Number of this TX queue, in the range 0-7 */
522 /* Number of TX DMA descriptors in the descriptor ring */
525 /* Number of currently used TX DMA descriptor in the
530 int tx_stop_threshold;
531 int tx_wake_threshold;
533 /* Array of transmitted skb */
534 struct sk_buff **tx_skb;
536 /* Index of last TX DMA descriptor that was inserted */
539 /* Index of the TX DMA descriptor to be cleaned up */
544 /* Virtual address of the TX DMA descriptors array */
545 struct mvneta_tx_desc *descs;
547 /* DMA address of the TX DMA descriptors array */
548 dma_addr_t descs_phys;
550 /* Index of the last TX DMA descriptor */
553 /* Index of the next TX DMA descriptor to process */
554 int next_desc_to_proc;
556 /* DMA buffers for TSO headers */
559 /* DMA address of TSO headers */
560 dma_addr_t tso_hdrs_phys;
562 /* Affinity mask for CPUs*/
563 cpumask_t affinity_mask;
566 struct mvneta_rx_queue {
567 /* rx queue number, in the range 0-7 */
570 /* num of rx descriptors in the rx descriptor ring */
573 /* counter of times when mvneta_refill() failed */
579 /* Virtual address of the RX buffer */
580 void **buf_virt_addr;
582 /* Virtual address of the RX DMA descriptors array */
583 struct mvneta_rx_desc *descs;
585 /* DMA address of the RX DMA descriptors array */
586 dma_addr_t descs_phys;
588 /* Index of the last RX DMA descriptor */
591 /* Index of the next RX DMA descriptor to process */
592 int next_desc_to_proc;
595 static enum cpuhp_state online_hpstate;
596 /* The hardware supports eight (8) rx queues, but we are only allowing
597 * the first one to be used. Therefore, let's just allocate one queue.
599 static int rxq_number = 8;
600 static int txq_number = 8;
604 static int rx_copybreak __read_mostly = 256;
606 /* HW BM need that each port be identify by a unique ID */
607 static int global_port_id;
609 #define MVNETA_DRIVER_NAME "mvneta"
610 #define MVNETA_DRIVER_VERSION "1.0"
612 /* Utility/helper methods */
614 /* Write helper method */
615 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
617 writel(data, pp->base + offset);
620 /* Read helper method */
621 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
623 return readl(pp->base + offset);
626 /* Increment txq get counter */
627 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
629 txq->txq_get_index++;
630 if (txq->txq_get_index == txq->size)
631 txq->txq_get_index = 0;
634 /* Increment txq put counter */
635 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
637 txq->txq_put_index++;
638 if (txq->txq_put_index == txq->size)
639 txq->txq_put_index = 0;
643 /* Clear all MIB counters */
644 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
649 /* Perform dummy reads from MIB counters */
650 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
651 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
652 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
653 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
656 /* Get System Network Statistics */
658 mvneta_get_stats64(struct net_device *dev,
659 struct rtnl_link_stats64 *stats)
661 struct mvneta_port *pp = netdev_priv(dev);
665 for_each_possible_cpu(cpu) {
666 struct mvneta_pcpu_stats *cpu_stats;
672 cpu_stats = per_cpu_ptr(pp->stats, cpu);
674 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
675 rx_packets = cpu_stats->rx_packets;
676 rx_bytes = cpu_stats->rx_bytes;
677 tx_packets = cpu_stats->tx_packets;
678 tx_bytes = cpu_stats->tx_bytes;
679 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
681 stats->rx_packets += rx_packets;
682 stats->rx_bytes += rx_bytes;
683 stats->tx_packets += tx_packets;
684 stats->tx_bytes += tx_bytes;
687 stats->rx_errors = dev->stats.rx_errors;
688 stats->rx_dropped = dev->stats.rx_dropped;
690 stats->tx_dropped = dev->stats.tx_dropped;
693 /* Rx descriptors helper methods */
695 /* Checks whether the RX descriptor having this status is both the first
696 * and the last descriptor for the RX packet. Each RX packet is currently
697 * received through a single RX descriptor, so not having each RX
698 * descriptor with its first and last bits set is an error
700 static int mvneta_rxq_desc_is_first_last(u32 status)
702 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
703 MVNETA_RXD_FIRST_LAST_DESC;
706 /* Add number of descriptors ready to receive new packets */
707 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
708 struct mvneta_rx_queue *rxq,
711 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
714 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
715 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
716 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
717 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
718 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
721 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
722 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
725 /* Get number of RX descriptors occupied by received packets */
726 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
727 struct mvneta_rx_queue *rxq)
731 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
732 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
735 /* Update num of rx desc called upon return from rx path or
736 * from mvneta_rxq_drop_pkts().
738 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
739 struct mvneta_rx_queue *rxq,
740 int rx_done, int rx_filled)
744 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
746 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
747 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
751 /* Only 255 descriptors can be added at once */
752 while ((rx_done > 0) || (rx_filled > 0)) {
753 if (rx_done <= 0xff) {
760 if (rx_filled <= 0xff) {
761 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
764 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
767 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
771 /* Get pointer to next RX descriptor to be processed by SW */
772 static struct mvneta_rx_desc *
773 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
775 int rx_desc = rxq->next_desc_to_proc;
777 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
778 prefetch(rxq->descs + rxq->next_desc_to_proc);
779 return rxq->descs + rx_desc;
782 /* Change maximum receive size of the port. */
783 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
787 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
788 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
789 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
790 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
791 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
795 /* Set rx queue offset */
796 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
797 struct mvneta_rx_queue *rxq,
802 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
803 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
806 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
807 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
811 /* Tx descriptors helper methods */
813 /* Update HW with number of TX descriptors to be sent */
814 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
815 struct mvneta_tx_queue *txq,
820 /* Only 255 descriptors can be added at once ; Assume caller
821 * process TX desriptors in quanta less than 256
823 val = pend_desc + txq->pending;
824 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
828 /* Get pointer to next TX descriptor to be processed (send) by HW */
829 static struct mvneta_tx_desc *
830 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
832 int tx_desc = txq->next_desc_to_proc;
834 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
835 return txq->descs + tx_desc;
838 /* Release the last allocated TX descriptor. Useful to handle DMA
839 * mapping failures in the TX path.
841 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
843 if (txq->next_desc_to_proc == 0)
844 txq->next_desc_to_proc = txq->last_desc - 1;
846 txq->next_desc_to_proc--;
849 /* Set rxq buf size */
850 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
851 struct mvneta_rx_queue *rxq,
856 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
858 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
859 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
861 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
864 /* Disable buffer management (BM) */
865 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
866 struct mvneta_rx_queue *rxq)
870 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
871 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
872 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
875 /* Enable buffer management (BM) */
876 static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
877 struct mvneta_rx_queue *rxq)
881 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
882 val |= MVNETA_RXQ_HW_BUF_ALLOC;
883 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
886 /* Notify HW about port's assignment of pool for bigger packets */
887 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
888 struct mvneta_rx_queue *rxq)
892 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
893 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
894 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
896 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
899 /* Notify HW about port's assignment of pool for smaller packets */
900 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
901 struct mvneta_rx_queue *rxq)
905 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
906 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
907 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
909 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
912 /* Set port's receive buffer size for assigned BM pool */
913 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
919 if (!IS_ALIGNED(buf_size, 8)) {
920 dev_warn(pp->dev->dev.parent,
921 "illegal buf_size value %d, round to %d\n",
922 buf_size, ALIGN(buf_size, 8));
923 buf_size = ALIGN(buf_size, 8);
926 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
927 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
928 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
931 /* Configure MBUS window in order to enable access BM internal SRAM */
932 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
935 u32 win_enable, win_protect;
938 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
940 if (pp->bm_win_id < 0) {
941 /* Find first not occupied window */
942 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
943 if (win_enable & (1 << i)) {
948 if (i == MVNETA_MAX_DECODE_WIN)
954 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
955 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
958 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
960 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
961 (attr << 8) | target);
963 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
965 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
966 win_protect |= 3 << (2 * i);
967 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
969 win_enable &= ~(1 << i);
970 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
975 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
981 /* Get BM window information */
982 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
989 /* Open NETA -> BM window */
990 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
993 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
999 /* Assign and initialize pools for port. In case of fail
1000 * buffer manager will remain disabled for current port.
1002 static int mvneta_bm_port_init(struct platform_device *pdev,
1003 struct mvneta_port *pp)
1005 struct device_node *dn = pdev->dev.of_node;
1006 u32 long_pool_id, short_pool_id;
1008 if (!pp->neta_armada3700) {
1011 ret = mvneta_bm_port_mbus_init(pp);
1016 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1017 netdev_info(pp->dev, "missing long pool id\n");
1021 /* Create port's long pool depending on mtu */
1022 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1023 MVNETA_BM_LONG, pp->id,
1024 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1025 if (!pp->pool_long) {
1026 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1030 pp->pool_long->port_map |= 1 << pp->id;
1032 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1035 /* If short pool id is not defined, assume using single pool */
1036 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1037 short_pool_id = long_pool_id;
1039 /* Create port's short pool */
1040 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1041 MVNETA_BM_SHORT, pp->id,
1042 MVNETA_BM_SHORT_PKT_SIZE);
1043 if (!pp->pool_short) {
1044 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1045 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1049 if (short_pool_id != long_pool_id) {
1050 pp->pool_short->port_map |= 1 << pp->id;
1051 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1052 pp->pool_short->id);
1058 /* Update settings of a pool for bigger packets */
1059 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1061 struct mvneta_bm_pool *bm_pool = pp->pool_long;
1062 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1065 /* Release all buffers from long pool */
1066 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1067 if (hwbm_pool->buf_num) {
1068 WARN(1, "cannot free all buffers in pool %d\n",
1073 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1074 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1075 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1076 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1078 /* Fill entire long pool */
1079 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
1080 if (num != hwbm_pool->size) {
1081 WARN(1, "pool %d: %d of %d allocated\n",
1082 bm_pool->id, num, hwbm_pool->size);
1085 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1090 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1091 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1094 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1095 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1098 /* Start the Ethernet port RX and TX activity */
1099 static void mvneta_port_up(struct mvneta_port *pp)
1104 /* Enable all initialized TXs. */
1106 for (queue = 0; queue < txq_number; queue++) {
1107 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1108 if (txq->descs != NULL)
1109 q_map |= (1 << queue);
1111 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1113 /* Enable all initialized RXQs. */
1114 for (queue = 0; queue < rxq_number; queue++) {
1115 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1117 if (rxq->descs != NULL)
1118 q_map |= (1 << queue);
1120 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1123 /* Stop the Ethernet port activity */
1124 static void mvneta_port_down(struct mvneta_port *pp)
1129 /* Stop Rx port activity. Check port Rx activity. */
1130 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1132 /* Issue stop command for active channels only */
1134 mvreg_write(pp, MVNETA_RXQ_CMD,
1135 val << MVNETA_RXQ_DISABLE_SHIFT);
1137 /* Wait for all Rx activity to terminate. */
1140 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1141 netdev_warn(pp->dev,
1142 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1148 val = mvreg_read(pp, MVNETA_RXQ_CMD);
1149 } while (val & MVNETA_RXQ_ENABLE_MASK);
1151 /* Stop Tx port activity. Check port Tx activity. Issue stop
1152 * command for active channels only
1154 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1157 mvreg_write(pp, MVNETA_TXQ_CMD,
1158 (val << MVNETA_TXQ_DISABLE_SHIFT));
1160 /* Wait for all Tx activity to terminate. */
1163 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1164 netdev_warn(pp->dev,
1165 "TIMEOUT for TX stopped status=0x%08x\n",
1171 /* Check TX Command reg that all Txqs are stopped */
1172 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1174 } while (val & MVNETA_TXQ_ENABLE_MASK);
1176 /* Double check to verify that TX FIFO is empty */
1179 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1180 netdev_warn(pp->dev,
1181 "TX FIFO empty timeout status=0x%08x\n",
1187 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1188 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1189 (val & MVNETA_TX_IN_PRGRS));
1194 /* Enable the port by setting the port enable bit of the MAC control register */
1195 static void mvneta_port_enable(struct mvneta_port *pp)
1200 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1201 val |= MVNETA_GMAC0_PORT_ENABLE;
1202 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1205 /* Disable the port and wait for about 200 usec before retuning */
1206 static void mvneta_port_disable(struct mvneta_port *pp)
1210 /* Reset the Enable bit in the Serial Control Register */
1211 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1212 val &= ~MVNETA_GMAC0_PORT_ENABLE;
1213 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1218 /* Multicast tables methods */
1220 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1221 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1229 val = 0x1 | (queue << 1);
1230 val |= (val << 24) | (val << 16) | (val << 8);
1233 for (offset = 0; offset <= 0xc; offset += 4)
1234 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1237 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1238 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1246 val = 0x1 | (queue << 1);
1247 val |= (val << 24) | (val << 16) | (val << 8);
1250 for (offset = 0; offset <= 0xfc; offset += 4)
1251 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1255 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1256 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1262 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1265 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1266 val = 0x1 | (queue << 1);
1267 val |= (val << 24) | (val << 16) | (val << 8);
1270 for (offset = 0; offset <= 0xfc; offset += 4)
1271 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1274 static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1279 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1280 val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1281 MVNETA_GMAC_FORCE_LINK_DOWN |
1282 MVNETA_GMAC_AN_FLOW_CTRL_EN);
1283 val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1284 MVNETA_GMAC_AN_SPEED_EN |
1285 MVNETA_GMAC_AN_DUPLEX_EN;
1286 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1288 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1289 val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1290 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1292 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1293 val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
1294 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1296 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1297 val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
1298 MVNETA_GMAC_AN_SPEED_EN |
1299 MVNETA_GMAC_AN_DUPLEX_EN);
1300 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1302 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1303 val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
1304 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1306 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1307 val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
1308 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1312 static void mvneta_percpu_unmask_interrupt(void *arg)
1314 struct mvneta_port *pp = arg;
1316 /* All the queue are unmasked, but actually only the ones
1317 * mapped to this CPU will be unmasked
1319 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1320 MVNETA_RX_INTR_MASK_ALL |
1321 MVNETA_TX_INTR_MASK_ALL |
1322 MVNETA_MISCINTR_INTR_MASK);
1325 static void mvneta_percpu_mask_interrupt(void *arg)
1327 struct mvneta_port *pp = arg;
1329 /* All the queue are masked, but actually only the ones
1330 * mapped to this CPU will be masked
1332 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1333 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1334 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1337 static void mvneta_percpu_clear_intr_cause(void *arg)
1339 struct mvneta_port *pp = arg;
1341 /* All the queue are cleared, but actually only the ones
1342 * mapped to this CPU will be cleared
1344 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1345 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1346 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1349 /* This method sets defaults to the NETA port:
1350 * Clears interrupt Cause and Mask registers.
1351 * Clears all MAC tables.
1352 * Sets defaults to all registers.
1353 * Resets RX and TX descriptor rings.
1355 * This method can be called after mvneta_port_down() to return the port
1356 * settings to defaults.
1358 static void mvneta_defaults_set(struct mvneta_port *pp)
1363 int max_cpu = num_present_cpus();
1365 /* Clear all Cause registers */
1366 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1368 /* Mask all interrupts */
1369 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1370 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1372 /* Enable MBUS Retry bit16 */
1373 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1375 /* Set CPU queue access map. CPUs are assigned to the RX and
1376 * TX queues modulo their number. If there is only one TX
1377 * queue then it is assigned to the CPU associated to the
1380 for_each_present_cpu(cpu) {
1381 int rxq_map = 0, txq_map = 0;
1383 if (!pp->neta_armada3700) {
1384 for (rxq = 0; rxq < rxq_number; rxq++)
1385 if ((rxq % max_cpu) == cpu)
1386 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1388 for (txq = 0; txq < txq_number; txq++)
1389 if ((txq % max_cpu) == cpu)
1390 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1392 /* With only one TX queue we configure a special case
1393 * which will allow to get all the irq on a single
1396 if (txq_number == 1)
1397 txq_map = (cpu == pp->rxq_def) ?
1398 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1401 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1402 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1405 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1408 /* Reset RX and TX DMAs */
1409 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1410 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1412 /* Disable Legacy WRR, Disable EJP, Release from reset */
1413 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1414 for (queue = 0; queue < txq_number; queue++) {
1415 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1416 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1419 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1420 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1422 /* Set Port Acceleration Mode */
1424 /* HW buffer management + legacy parser */
1425 val = MVNETA_ACC_MODE_EXT2;
1427 /* SW buffer management + legacy parser */
1428 val = MVNETA_ACC_MODE_EXT1;
1429 mvreg_write(pp, MVNETA_ACC_MODE, val);
1432 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1434 /* Update val of portCfg register accordingly with all RxQueue types */
1435 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1436 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1439 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1440 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1442 /* Build PORT_SDMA_CONFIG_REG */
1445 /* Default burst size */
1446 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1447 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1448 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1450 #if defined(__BIG_ENDIAN)
1451 val |= MVNETA_DESC_SWAP;
1454 /* Assign port SDMA configuration */
1455 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1457 /* Disable PHY polling in hardware, since we're using the
1458 * kernel phylib to do this.
1460 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1461 val &= ~MVNETA_PHY_POLLING_ENABLE;
1462 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1464 mvneta_set_autoneg(pp, pp->use_inband_status);
1465 mvneta_set_ucast_table(pp, -1);
1466 mvneta_set_special_mcast_table(pp, -1);
1467 mvneta_set_other_mcast_table(pp, -1);
1469 /* Set port interrupt enable register - default enable all */
1470 mvreg_write(pp, MVNETA_INTR_ENABLE,
1471 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1472 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1474 mvneta_mib_counters_clear(pp);
1477 /* Set max sizes for tx queues */
1478 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1484 mtu = max_tx_size * 8;
1485 if (mtu > MVNETA_TX_MTU_MAX)
1486 mtu = MVNETA_TX_MTU_MAX;
1489 val = mvreg_read(pp, MVNETA_TX_MTU);
1490 val &= ~MVNETA_TX_MTU_MAX;
1492 mvreg_write(pp, MVNETA_TX_MTU, val);
1494 /* TX token size and all TXQs token size must be larger that MTU */
1495 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1497 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1500 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1502 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1504 for (queue = 0; queue < txq_number; queue++) {
1505 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1507 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1510 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1512 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1517 /* Set unicast address */
1518 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1521 unsigned int unicast_reg;
1522 unsigned int tbl_offset;
1523 unsigned int reg_offset;
1525 /* Locate the Unicast table entry */
1526 last_nibble = (0xf & last_nibble);
1528 /* offset from unicast tbl base */
1529 tbl_offset = (last_nibble / 4) * 4;
1531 /* offset within the above reg */
1532 reg_offset = last_nibble % 4;
1534 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1537 /* Clear accepts frame bit at specified unicast DA tbl entry */
1538 unicast_reg &= ~(0xff << (8 * reg_offset));
1540 unicast_reg &= ~(0xff << (8 * reg_offset));
1541 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1544 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1547 /* Set mac address */
1548 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1555 mac_l = (addr[4] << 8) | (addr[5]);
1556 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1557 (addr[2] << 8) | (addr[3] << 0);
1559 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1560 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1563 /* Accept frames of this address */
1564 mvneta_set_ucast_addr(pp, addr[5], queue);
1567 /* Set the number of packets that will be received before RX interrupt
1568 * will be generated by HW.
1570 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1571 struct mvneta_rx_queue *rxq, u32 value)
1573 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1574 value | MVNETA_RXQ_NON_OCCUPIED(0));
1575 rxq->pkts_coal = value;
1578 /* Set the time delay in usec before RX interrupt will be generated by
1581 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1582 struct mvneta_rx_queue *rxq, u32 value)
1585 unsigned long clk_rate;
1587 clk_rate = clk_get_rate(pp->clk);
1588 val = (clk_rate / 1000000) * value;
1590 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1591 rxq->time_coal = value;
1594 /* Set threshold for TX_DONE pkts coalescing */
1595 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1596 struct mvneta_tx_queue *txq, u32 value)
1600 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1602 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1603 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1605 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1607 txq->done_pkts_coal = value;
1610 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1611 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1612 u32 phys_addr, void *virt_addr,
1613 struct mvneta_rx_queue *rxq)
1617 rx_desc->buf_phys_addr = phys_addr;
1618 i = rx_desc - rxq->descs;
1619 rxq->buf_virt_addr[i] = virt_addr;
1622 /* Decrement sent descriptors counter */
1623 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1624 struct mvneta_tx_queue *txq,
1629 /* Only 255 TX descriptors can be updated at once */
1630 while (sent_desc > 0xff) {
1631 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1632 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1633 sent_desc = sent_desc - 0xff;
1636 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1637 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1640 /* Get number of TX descriptors already sent by HW */
1641 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1642 struct mvneta_tx_queue *txq)
1647 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1648 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1649 MVNETA_TXQ_SENT_DESC_SHIFT;
1654 /* Get number of sent descriptors and decrement counter.
1655 * The number of sent descriptors is returned.
1657 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1658 struct mvneta_tx_queue *txq)
1662 /* Get number of sent descriptors */
1663 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1665 /* Decrement sent descriptors counter */
1667 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1672 /* Set TXQ descriptors fields relevant for CSUM calculation */
1673 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1674 int ip_hdr_len, int l4_proto)
1678 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1679 * G_L4_chk, L4_type; required only for checksum
1682 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1683 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1685 if (l3_proto == htons(ETH_P_IP))
1686 command |= MVNETA_TXD_IP_CSUM;
1688 command |= MVNETA_TX_L3_IP6;
1690 if (l4_proto == IPPROTO_TCP)
1691 command |= MVNETA_TX_L4_CSUM_FULL;
1692 else if (l4_proto == IPPROTO_UDP)
1693 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1695 command |= MVNETA_TX_L4_CSUM_NOT;
1701 /* Display more error info */
1702 static void mvneta_rx_error(struct mvneta_port *pp,
1703 struct mvneta_rx_desc *rx_desc)
1705 u32 status = rx_desc->status;
1707 if (!mvneta_rxq_desc_is_first_last(status)) {
1709 "bad rx status %08x (buffer oversize), size=%d\n",
1710 status, rx_desc->data_size);
1714 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1715 case MVNETA_RXD_ERR_CRC:
1716 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1717 status, rx_desc->data_size);
1719 case MVNETA_RXD_ERR_OVERRUN:
1720 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1721 status, rx_desc->data_size);
1723 case MVNETA_RXD_ERR_LEN:
1724 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1725 status, rx_desc->data_size);
1727 case MVNETA_RXD_ERR_RESOURCE:
1728 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1729 status, rx_desc->data_size);
1734 /* Handle RX checksum offload based on the descriptor's status */
1735 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1736 struct sk_buff *skb)
1738 if ((status & MVNETA_RXD_L3_IP4) &&
1739 (status & MVNETA_RXD_L4_CSUM_OK)) {
1741 skb->ip_summed = CHECKSUM_UNNECESSARY;
1745 skb->ip_summed = CHECKSUM_NONE;
1748 /* Return tx queue pointer (find last set bit) according to <cause> returned
1749 * form tx_done reg. <cause> must not be null. The return value is always a
1750 * valid queue for matching the first one found in <cause>.
1752 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1755 int queue = fls(cause) - 1;
1757 return &pp->txqs[queue];
1760 /* Free tx queue skbuffs */
1761 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1762 struct mvneta_tx_queue *txq, int num,
1763 struct netdev_queue *nq)
1765 unsigned int bytes_compl = 0, pkts_compl = 0;
1768 for (i = 0; i < num; i++) {
1769 struct mvneta_tx_desc *tx_desc = txq->descs +
1771 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1774 bytes_compl += skb->len;
1778 mvneta_txq_inc_get(txq);
1780 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1781 dma_unmap_single(pp->dev->dev.parent,
1782 tx_desc->buf_phys_addr,
1783 tx_desc->data_size, DMA_TO_DEVICE);
1786 dev_kfree_skb_any(skb);
1789 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1792 /* Handle end of transmission */
1793 static void mvneta_txq_done(struct mvneta_port *pp,
1794 struct mvneta_tx_queue *txq)
1796 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1799 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1803 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
1805 txq->count -= tx_done;
1807 if (netif_tx_queue_stopped(nq)) {
1808 if (txq->count <= txq->tx_wake_threshold)
1809 netif_tx_wake_queue(nq);
1813 void *mvneta_frag_alloc(unsigned int frag_size)
1815 if (likely(frag_size <= PAGE_SIZE))
1816 return netdev_alloc_frag(frag_size);
1818 return kmalloc(frag_size, GFP_ATOMIC);
1820 EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
1822 void mvneta_frag_free(unsigned int frag_size, void *data)
1824 if (likely(frag_size <= PAGE_SIZE))
1825 skb_free_frag(data);
1829 EXPORT_SYMBOL_GPL(mvneta_frag_free);
1831 /* Refill processing for SW buffer management */
1832 static int mvneta_rx_refill(struct mvneta_port *pp,
1833 struct mvneta_rx_desc *rx_desc,
1834 struct mvneta_rx_queue *rxq)
1837 dma_addr_t phys_addr;
1840 data = mvneta_frag_alloc(pp->frag_size);
1844 phys_addr = dma_map_single(pp->dev->dev.parent, data,
1845 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1847 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1848 mvneta_frag_free(pp->frag_size, data);
1852 phys_addr += pp->rx_offset_correction;
1853 mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
1857 /* Handle tx checksum */
1858 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1860 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1862 __be16 l3_proto = vlan_get_protocol(skb);
1865 if (l3_proto == htons(ETH_P_IP)) {
1866 struct iphdr *ip4h = ip_hdr(skb);
1868 /* Calculate IPv4 checksum and L4 checksum */
1869 ip_hdr_len = ip4h->ihl;
1870 l4_proto = ip4h->protocol;
1871 } else if (l3_proto == htons(ETH_P_IPV6)) {
1872 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1874 /* Read l4_protocol from one of IPv6 extra headers */
1875 if (skb_network_header_len(skb) > 0)
1876 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1877 l4_proto = ip6h->nexthdr;
1879 return MVNETA_TX_L4_CSUM_NOT;
1881 return mvneta_txq_desc_csum(skb_network_offset(skb),
1882 l3_proto, ip_hdr_len, l4_proto);
1885 return MVNETA_TX_L4_CSUM_NOT;
1888 /* Drop packets received by the RXQ and free buffers */
1889 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1890 struct mvneta_rx_queue *rxq)
1894 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1896 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1899 for (i = 0; i < rx_done; i++) {
1900 struct mvneta_rx_desc *rx_desc =
1901 mvneta_rxq_next_desc_get(rxq);
1902 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1903 struct mvneta_bm_pool *bm_pool;
1905 bm_pool = &pp->bm_priv->bm_pools[pool_id];
1906 /* Return dropped buffer to the pool */
1907 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1908 rx_desc->buf_phys_addr);
1913 for (i = 0; i < rxq->size; i++) {
1914 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1915 void *data = rxq->buf_virt_addr[i];
1917 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1918 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1919 mvneta_frag_free(pp->frag_size, data);
1923 /* Main rx processing when using software buffer management */
1924 static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
1925 struct mvneta_rx_queue *rxq)
1927 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
1928 struct net_device *dev = pp->dev;
1933 /* Get number of received packets */
1934 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1936 if (rx_todo > rx_done)
1941 /* Fairness NAPI loop */
1942 while (rx_done < rx_todo) {
1943 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1944 struct sk_buff *skb;
1945 unsigned char *data;
1946 dma_addr_t phys_addr;
1947 u32 rx_status, frag_size;
1948 int rx_bytes, err, index;
1951 rx_status = rx_desc->status;
1952 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1953 index = rx_desc - rxq->descs;
1954 data = rxq->buf_virt_addr[index];
1955 phys_addr = rx_desc->buf_phys_addr;
1957 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1958 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1960 dev->stats.rx_errors++;
1961 mvneta_rx_error(pp, rx_desc);
1962 /* leave the descriptor untouched */
1966 if (rx_bytes <= rx_copybreak) {
1967 /* better copy a small frame and not unmap the DMA region */
1968 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1970 goto err_drop_frame;
1972 dma_sync_single_range_for_cpu(dev->dev.parent,
1974 MVNETA_MH_SIZE + NET_SKB_PAD,
1977 memcpy(skb_put(skb, rx_bytes),
1978 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1981 skb->protocol = eth_type_trans(skb, dev);
1982 mvneta_rx_csum(pp, rx_status, skb);
1983 napi_gro_receive(&port->napi, skb);
1986 rcvd_bytes += rx_bytes;
1988 /* leave the descriptor and buffer untouched */
1992 /* Refill processing */
1993 err = mvneta_rx_refill(pp, rx_desc, rxq);
1995 netdev_err(dev, "Linux processing - Can't refill\n");
1997 goto err_drop_frame;
2000 frag_size = pp->frag_size;
2002 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2004 /* After refill old buffer has to be unmapped regardless
2005 * the skb is successfully built or not.
2007 dma_unmap_single(dev->dev.parent, phys_addr,
2008 MVNETA_RX_BUF_SIZE(pp->pkt_size),
2012 goto err_drop_frame;
2015 rcvd_bytes += rx_bytes;
2017 /* Linux processing */
2018 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2019 skb_put(skb, rx_bytes);
2021 skb->protocol = eth_type_trans(skb, dev);
2023 mvneta_rx_csum(pp, rx_status, skb);
2025 napi_gro_receive(&port->napi, skb);
2029 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2031 u64_stats_update_begin(&stats->syncp);
2032 stats->rx_packets += rcvd_pkts;
2033 stats->rx_bytes += rcvd_bytes;
2034 u64_stats_update_end(&stats->syncp);
2037 /* Update rxq management counters */
2038 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2043 /* Main rx processing when using hardware buffer management */
2044 static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
2045 struct mvneta_rx_queue *rxq)
2047 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2048 struct net_device *dev = pp->dev;
2053 /* Get number of received packets */
2054 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2056 if (rx_todo > rx_done)
2061 /* Fairness NAPI loop */
2062 while (rx_done < rx_todo) {
2063 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2064 struct mvneta_bm_pool *bm_pool = NULL;
2065 struct sk_buff *skb;
2066 unsigned char *data;
2067 dma_addr_t phys_addr;
2068 u32 rx_status, frag_size;
2073 rx_status = rx_desc->status;
2074 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2075 data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2076 phys_addr = rx_desc->buf_phys_addr;
2077 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2078 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2080 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2081 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2082 err_drop_frame_ret_pool:
2083 /* Return the buffer to the pool */
2084 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2085 rx_desc->buf_phys_addr);
2087 dev->stats.rx_errors++;
2088 mvneta_rx_error(pp, rx_desc);
2089 /* leave the descriptor untouched */
2093 if (rx_bytes <= rx_copybreak) {
2094 /* better copy a small frame and not unmap the DMA region */
2095 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2097 goto err_drop_frame_ret_pool;
2099 dma_sync_single_range_for_cpu(dev->dev.parent,
2100 rx_desc->buf_phys_addr,
2101 MVNETA_MH_SIZE + NET_SKB_PAD,
2104 memcpy(skb_put(skb, rx_bytes),
2105 data + MVNETA_MH_SIZE + NET_SKB_PAD,
2108 skb->protocol = eth_type_trans(skb, dev);
2109 mvneta_rx_csum(pp, rx_status, skb);
2110 napi_gro_receive(&port->napi, skb);
2113 rcvd_bytes += rx_bytes;
2115 /* Return the buffer to the pool */
2116 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2117 rx_desc->buf_phys_addr);
2119 /* leave the descriptor and buffer untouched */
2123 /* Refill processing */
2124 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2126 netdev_err(dev, "Linux processing - Can't refill\n");
2128 goto err_drop_frame_ret_pool;
2131 frag_size = bm_pool->hwbm_pool.frag_size;
2133 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2135 /* After refill old buffer has to be unmapped regardless
2136 * the skb is successfully built or not.
2138 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2139 bm_pool->buf_size, DMA_FROM_DEVICE);
2141 goto err_drop_frame;
2144 rcvd_bytes += rx_bytes;
2146 /* Linux processing */
2147 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2148 skb_put(skb, rx_bytes);
2150 skb->protocol = eth_type_trans(skb, dev);
2152 mvneta_rx_csum(pp, rx_status, skb);
2154 napi_gro_receive(&port->napi, skb);
2158 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2160 u64_stats_update_begin(&stats->syncp);
2161 stats->rx_packets += rcvd_pkts;
2162 stats->rx_bytes += rcvd_bytes;
2163 u64_stats_update_end(&stats->syncp);
2166 /* Update rxq management counters */
2167 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2173 mvneta_tso_put_hdr(struct sk_buff *skb,
2174 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2176 struct mvneta_tx_desc *tx_desc;
2177 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2179 txq->tx_skb[txq->txq_put_index] = NULL;
2180 tx_desc = mvneta_txq_next_desc_get(txq);
2181 tx_desc->data_size = hdr_len;
2182 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2183 tx_desc->command |= MVNETA_TXD_F_DESC;
2184 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2185 txq->txq_put_index * TSO_HEADER_SIZE;
2186 mvneta_txq_inc_put(txq);
2190 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2191 struct sk_buff *skb, char *data, int size,
2192 bool last_tcp, bool is_last)
2194 struct mvneta_tx_desc *tx_desc;
2196 tx_desc = mvneta_txq_next_desc_get(txq);
2197 tx_desc->data_size = size;
2198 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2199 size, DMA_TO_DEVICE);
2200 if (unlikely(dma_mapping_error(dev->dev.parent,
2201 tx_desc->buf_phys_addr))) {
2202 mvneta_txq_desc_put(txq);
2206 tx_desc->command = 0;
2207 txq->tx_skb[txq->txq_put_index] = NULL;
2210 /* last descriptor in the TCP packet */
2211 tx_desc->command = MVNETA_TXD_L_DESC;
2213 /* last descriptor in SKB */
2215 txq->tx_skb[txq->txq_put_index] = skb;
2217 mvneta_txq_inc_put(txq);
2221 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2222 struct mvneta_tx_queue *txq)
2224 int total_len, data_left;
2226 struct mvneta_port *pp = netdev_priv(dev);
2228 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2231 /* Count needed descriptors */
2232 if ((txq->count + tso_count_descs(skb)) >= txq->size)
2235 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2236 pr_info("*** Is this even possible???!?!?\n");
2240 /* Initialize the TSO handler, and prepare the first payload */
2241 tso_start(skb, &tso);
2243 total_len = skb->len - hdr_len;
2244 while (total_len > 0) {
2247 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2248 total_len -= data_left;
2251 /* prepare packet headers: MAC + IP + TCP */
2252 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2253 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2255 mvneta_tso_put_hdr(skb, pp, txq);
2257 while (data_left > 0) {
2261 size = min_t(int, tso.size, data_left);
2263 if (mvneta_tso_put_data(dev, txq, skb,
2270 tso_build_data(skb, &tso, size);
2277 /* Release all used data descriptors; header descriptors must not
2280 for (i = desc_count - 1; i >= 0; i--) {
2281 struct mvneta_tx_desc *tx_desc = txq->descs + i;
2282 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2283 dma_unmap_single(pp->dev->dev.parent,
2284 tx_desc->buf_phys_addr,
2287 mvneta_txq_desc_put(txq);
2292 /* Handle tx fragmentation processing */
2293 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2294 struct mvneta_tx_queue *txq)
2296 struct mvneta_tx_desc *tx_desc;
2297 int i, nr_frags = skb_shinfo(skb)->nr_frags;
2299 for (i = 0; i < nr_frags; i++) {
2300 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2301 void *addr = page_address(frag->page.p) + frag->page_offset;
2303 tx_desc = mvneta_txq_next_desc_get(txq);
2304 tx_desc->data_size = frag->size;
2306 tx_desc->buf_phys_addr =
2307 dma_map_single(pp->dev->dev.parent, addr,
2308 tx_desc->data_size, DMA_TO_DEVICE);
2310 if (dma_mapping_error(pp->dev->dev.parent,
2311 tx_desc->buf_phys_addr)) {
2312 mvneta_txq_desc_put(txq);
2316 if (i == nr_frags - 1) {
2317 /* Last descriptor */
2318 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2319 txq->tx_skb[txq->txq_put_index] = skb;
2321 /* Descriptor in the middle: Not First, Not Last */
2322 tx_desc->command = 0;
2323 txq->tx_skb[txq->txq_put_index] = NULL;
2325 mvneta_txq_inc_put(txq);
2331 /* Release all descriptors that were used to map fragments of
2332 * this packet, as well as the corresponding DMA mappings
2334 for (i = i - 1; i >= 0; i--) {
2335 tx_desc = txq->descs + i;
2336 dma_unmap_single(pp->dev->dev.parent,
2337 tx_desc->buf_phys_addr,
2340 mvneta_txq_desc_put(txq);
2346 /* Main tx processing */
2347 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2349 struct mvneta_port *pp = netdev_priv(dev);
2350 u16 txq_id = skb_get_queue_mapping(skb);
2351 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2352 struct mvneta_tx_desc *tx_desc;
2357 if (!netif_running(dev))
2360 if (skb_is_gso(skb)) {
2361 frags = mvneta_tx_tso(skb, dev, txq);
2365 frags = skb_shinfo(skb)->nr_frags + 1;
2367 /* Get a descriptor for the first part of the packet */
2368 tx_desc = mvneta_txq_next_desc_get(txq);
2370 tx_cmd = mvneta_skb_tx_csum(pp, skb);
2372 tx_desc->data_size = skb_headlen(skb);
2374 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2377 if (unlikely(dma_mapping_error(dev->dev.parent,
2378 tx_desc->buf_phys_addr))) {
2379 mvneta_txq_desc_put(txq);
2385 /* First and Last descriptor */
2386 tx_cmd |= MVNETA_TXD_FLZ_DESC;
2387 tx_desc->command = tx_cmd;
2388 txq->tx_skb[txq->txq_put_index] = skb;
2389 mvneta_txq_inc_put(txq);
2391 /* First but not Last */
2392 tx_cmd |= MVNETA_TXD_F_DESC;
2393 txq->tx_skb[txq->txq_put_index] = NULL;
2394 mvneta_txq_inc_put(txq);
2395 tx_desc->command = tx_cmd;
2396 /* Continue with other skb fragments */
2397 if (mvneta_tx_frag_process(pp, skb, txq)) {
2398 dma_unmap_single(dev->dev.parent,
2399 tx_desc->buf_phys_addr,
2402 mvneta_txq_desc_put(txq);
2410 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2411 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2413 netdev_tx_sent_queue(nq, len);
2415 txq->count += frags;
2416 if (txq->count >= txq->tx_stop_threshold)
2417 netif_tx_stop_queue(nq);
2419 if (!skb->xmit_more || netif_xmit_stopped(nq) ||
2420 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2421 mvneta_txq_pend_desc_add(pp, txq, frags);
2423 txq->pending += frags;
2425 u64_stats_update_begin(&stats->syncp);
2426 stats->tx_packets++;
2427 stats->tx_bytes += len;
2428 u64_stats_update_end(&stats->syncp);
2430 dev->stats.tx_dropped++;
2431 dev_kfree_skb_any(skb);
2434 return NETDEV_TX_OK;
2438 /* Free tx resources, when resetting a port */
2439 static void mvneta_txq_done_force(struct mvneta_port *pp,
2440 struct mvneta_tx_queue *txq)
2443 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2444 int tx_done = txq->count;
2446 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
2450 txq->txq_put_index = 0;
2451 txq->txq_get_index = 0;
2454 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2455 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2457 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2459 struct mvneta_tx_queue *txq;
2460 struct netdev_queue *nq;
2462 while (cause_tx_done) {
2463 txq = mvneta_tx_done_policy(pp, cause_tx_done);
2465 nq = netdev_get_tx_queue(pp->dev, txq->id);
2466 __netif_tx_lock(nq, smp_processor_id());
2469 mvneta_txq_done(pp, txq);
2471 __netif_tx_unlock(nq);
2472 cause_tx_done &= ~((1 << txq->id));
2476 /* Compute crc8 of the specified address, using a unique algorithm ,
2477 * according to hw spec, different than generic crc8 algorithm
2479 static int mvneta_addr_crc(unsigned char *addr)
2484 for (i = 0; i < ETH_ALEN; i++) {
2487 crc = (crc ^ addr[i]) << 8;
2488 for (j = 7; j >= 0; j--) {
2489 if (crc & (0x100 << j))
2497 /* This method controls the net device special MAC multicast support.
2498 * The Special Multicast Table for MAC addresses supports MAC of the form
2499 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2500 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2501 * Table entries in the DA-Filter table. This method set the Special
2502 * Multicast Table appropriate entry.
2504 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2505 unsigned char last_byte,
2508 unsigned int smc_table_reg;
2509 unsigned int tbl_offset;
2510 unsigned int reg_offset;
2512 /* Register offset from SMC table base */
2513 tbl_offset = (last_byte / 4);
2514 /* Entry offset within the above reg */
2515 reg_offset = last_byte % 4;
2517 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2521 smc_table_reg &= ~(0xff << (8 * reg_offset));
2523 smc_table_reg &= ~(0xff << (8 * reg_offset));
2524 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2527 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2531 /* This method controls the network device Other MAC multicast support.
2532 * The Other Multicast Table is used for multicast of another type.
2533 * A CRC-8 is used as an index to the Other Multicast Table entries
2534 * in the DA-Filter table.
2535 * The method gets the CRC-8 value from the calling routine and
2536 * sets the Other Multicast Table appropriate entry according to the
2539 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2543 unsigned int omc_table_reg;
2544 unsigned int tbl_offset;
2545 unsigned int reg_offset;
2547 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2548 reg_offset = crc8 % 4; /* Entry offset within the above reg */
2550 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2553 /* Clear accepts frame bit at specified Other DA table entry */
2554 omc_table_reg &= ~(0xff << (8 * reg_offset));
2556 omc_table_reg &= ~(0xff << (8 * reg_offset));
2557 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2560 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2563 /* The network device supports multicast using two tables:
2564 * 1) Special Multicast Table for MAC addresses of the form
2565 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2566 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2567 * Table entries in the DA-Filter table.
2568 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2569 * is used as an index to the Other Multicast Table entries in the
2572 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2575 unsigned char crc_result = 0;
2577 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2578 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2582 crc_result = mvneta_addr_crc(p_addr);
2584 if (pp->mcast_count[crc_result] == 0) {
2585 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2590 pp->mcast_count[crc_result]--;
2591 if (pp->mcast_count[crc_result] != 0) {
2592 netdev_info(pp->dev,
2593 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2594 pp->mcast_count[crc_result], crc_result);
2598 pp->mcast_count[crc_result]++;
2600 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2605 /* Configure Fitering mode of Ethernet port */
2606 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2609 u32 port_cfg_reg, val;
2611 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2613 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2615 /* Set / Clear UPM bit in port configuration register */
2617 /* Accept all Unicast addresses */
2618 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2619 val |= MVNETA_FORCE_UNI;
2620 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2621 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2623 /* Reject all Unicast addresses */
2624 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2625 val &= ~MVNETA_FORCE_UNI;
2628 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2629 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2632 /* register unicast and multicast addresses */
2633 static void mvneta_set_rx_mode(struct net_device *dev)
2635 struct mvneta_port *pp = netdev_priv(dev);
2636 struct netdev_hw_addr *ha;
2638 if (dev->flags & IFF_PROMISC) {
2639 /* Accept all: Multicast + Unicast */
2640 mvneta_rx_unicast_promisc_set(pp, 1);
2641 mvneta_set_ucast_table(pp, pp->rxq_def);
2642 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2643 mvneta_set_other_mcast_table(pp, pp->rxq_def);
2645 /* Accept single Unicast */
2646 mvneta_rx_unicast_promisc_set(pp, 0);
2647 mvneta_set_ucast_table(pp, -1);
2648 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
2650 if (dev->flags & IFF_ALLMULTI) {
2651 /* Accept all multicast */
2652 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2653 mvneta_set_other_mcast_table(pp, pp->rxq_def);
2655 /* Accept only initialized multicast */
2656 mvneta_set_special_mcast_table(pp, -1);
2657 mvneta_set_other_mcast_table(pp, -1);
2659 if (!netdev_mc_empty(dev)) {
2660 netdev_for_each_mc_addr(ha, dev) {
2661 mvneta_mcast_addr_set(pp, ha->addr,
2669 /* Interrupt handling - the callback for request_irq() */
2670 static irqreturn_t mvneta_isr(int irq, void *dev_id)
2672 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2674 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2675 napi_schedule(&pp->napi);
2680 /* Interrupt handling - the callback for request_percpu_irq() */
2681 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
2683 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
2685 disable_percpu_irq(port->pp->dev->irq);
2686 napi_schedule(&port->napi);
2691 static int mvneta_fixed_link_update(struct mvneta_port *pp,
2692 struct phy_device *phy)
2694 struct fixed_phy_status status;
2695 struct fixed_phy_status changed = {};
2696 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2698 status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2699 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2700 status.speed = SPEED_1000;
2701 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2702 status.speed = SPEED_100;
2704 status.speed = SPEED_10;
2705 status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2709 fixed_phy_update_state(phy, &status, &changed);
2714 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2715 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2716 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2717 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2718 * Each CPU has its own causeRxTx register
2720 static int mvneta_poll(struct napi_struct *napi, int budget)
2725 struct mvneta_port *pp = netdev_priv(napi->dev);
2726 struct net_device *ndev = pp->dev;
2727 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2729 if (!netif_running(pp->dev)) {
2730 napi_complete(napi);
2734 /* Read cause register */
2735 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2736 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2737 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2739 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2740 if (pp->use_inband_status && (cause_misc &
2741 (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2742 MVNETA_CAUSE_LINK_CHANGE |
2743 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2744 mvneta_fixed_link_update(pp, ndev->phydev);
2748 /* Release Tx descriptors */
2749 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2750 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2751 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2754 /* For the case where the last mvneta_poll did not process all
2757 rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2759 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2763 rx_queue = rx_queue - 1;
2765 rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
2767 rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
2770 if (rx_done < budget) {
2772 napi_complete_done(napi, rx_done);
2774 if (pp->neta_armada3700) {
2775 unsigned long flags;
2777 local_irq_save(flags);
2778 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2779 MVNETA_RX_INTR_MASK(rxq_number) |
2780 MVNETA_TX_INTR_MASK(txq_number) |
2781 MVNETA_MISCINTR_INTR_MASK);
2782 local_irq_restore(flags);
2784 enable_percpu_irq(pp->dev->irq, 0);
2788 if (pp->neta_armada3700)
2789 pp->cause_rx_tx = cause_rx_tx;
2791 port->cause_rx_tx = cause_rx_tx;
2796 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2797 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2802 for (i = 0; i < num; i++) {
2803 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2804 if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
2805 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
2806 __func__, rxq->id, i, num);
2811 /* Add this number of RX descriptors as non occupied (ready to
2814 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2819 /* Free all packets pending transmit from all TXQs and reset TX port */
2820 static void mvneta_tx_reset(struct mvneta_port *pp)
2824 /* free the skb's in the tx ring */
2825 for (queue = 0; queue < txq_number; queue++)
2826 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2828 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2829 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2832 static void mvneta_rx_reset(struct mvneta_port *pp)
2834 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2835 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2838 /* Rx/Tx queue initialization/cleanup methods */
2840 /* Create a specified RX queue */
2841 static int mvneta_rxq_init(struct mvneta_port *pp,
2842 struct mvneta_rx_queue *rxq)
2845 rxq->size = pp->rx_ring_size;
2847 /* Allocate memory for RX descriptors */
2848 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2849 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2850 &rxq->descs_phys, GFP_KERNEL);
2851 if (rxq->descs == NULL)
2854 rxq->last_desc = rxq->size - 1;
2856 /* Set Rx descriptors queue starting address */
2857 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2858 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2861 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
2863 /* Set coalescing pkts and time */
2864 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2865 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2868 /* Fill RXQ with buffers from RX pool */
2869 mvneta_rxq_buf_size_set(pp, rxq,
2870 MVNETA_RX_BUF_SIZE(pp->pkt_size));
2871 mvneta_rxq_bm_disable(pp, rxq);
2872 mvneta_rxq_fill(pp, rxq, rxq->size);
2874 mvneta_rxq_bm_enable(pp, rxq);
2875 mvneta_rxq_long_pool_set(pp, rxq);
2876 mvneta_rxq_short_pool_set(pp, rxq);
2877 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
2883 /* Cleanup Rx queue */
2884 static void mvneta_rxq_deinit(struct mvneta_port *pp,
2885 struct mvneta_rx_queue *rxq)
2887 mvneta_rxq_drop_pkts(pp, rxq);
2890 dma_free_coherent(pp->dev->dev.parent,
2891 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2897 rxq->next_desc_to_proc = 0;
2898 rxq->descs_phys = 0;
2901 /* Create and initialize a tx queue */
2902 static int mvneta_txq_init(struct mvneta_port *pp,
2903 struct mvneta_tx_queue *txq)
2907 txq->size = pp->tx_ring_size;
2909 /* A queue must always have room for at least one skb.
2910 * Therefore, stop the queue when the free entries reaches
2911 * the maximum number of descriptors per skb.
2913 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2914 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2917 /* Allocate memory for TX descriptors */
2918 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2919 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2920 &txq->descs_phys, GFP_KERNEL);
2921 if (txq->descs == NULL)
2924 txq->last_desc = txq->size - 1;
2926 /* Set maximum bandwidth for enabled TXQs */
2927 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2928 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2930 /* Set Tx descriptors queue starting address */
2931 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2932 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2934 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2935 if (txq->tx_skb == NULL) {
2936 dma_free_coherent(pp->dev->dev.parent,
2937 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2938 txq->descs, txq->descs_phys);
2942 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2943 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2944 txq->size * TSO_HEADER_SIZE,
2945 &txq->tso_hdrs_phys, GFP_KERNEL);
2946 if (txq->tso_hdrs == NULL) {
2948 dma_free_coherent(pp->dev->dev.parent,
2949 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2950 txq->descs, txq->descs_phys);
2953 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2955 /* Setup XPS mapping */
2957 cpu = txq->id % num_present_cpus();
2959 cpu = pp->rxq_def % num_present_cpus();
2960 cpumask_set_cpu(cpu, &txq->affinity_mask);
2961 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
2966 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2967 static void mvneta_txq_deinit(struct mvneta_port *pp,
2968 struct mvneta_tx_queue *txq)
2970 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2975 dma_free_coherent(pp->dev->dev.parent,
2976 txq->size * TSO_HEADER_SIZE,
2977 txq->tso_hdrs, txq->tso_hdrs_phys);
2979 dma_free_coherent(pp->dev->dev.parent,
2980 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2981 txq->descs, txq->descs_phys);
2983 netdev_tx_reset_queue(nq);
2987 txq->next_desc_to_proc = 0;
2988 txq->descs_phys = 0;
2990 /* Set minimum bandwidth for disabled TXQs */
2991 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2992 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2994 /* Set Tx descriptors queue starting address and size */
2995 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2996 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2999 /* Cleanup all Tx queues */
3000 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3004 for (queue = 0; queue < txq_number; queue++)
3005 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3008 /* Cleanup all Rx queues */
3009 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3013 for (queue = 0; queue < txq_number; queue++)
3014 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3018 /* Init all Rx queues */
3019 static int mvneta_setup_rxqs(struct mvneta_port *pp)
3023 for (queue = 0; queue < rxq_number; queue++) {
3024 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3027 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3029 mvneta_cleanup_rxqs(pp);
3037 /* Init all tx queues */
3038 static int mvneta_setup_txqs(struct mvneta_port *pp)
3042 for (queue = 0; queue < txq_number; queue++) {
3043 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3045 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3047 mvneta_cleanup_txqs(pp);
3055 static void mvneta_start_dev(struct mvneta_port *pp)
3058 struct net_device *ndev = pp->dev;
3060 mvneta_max_rx_size_set(pp, pp->pkt_size);
3061 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3063 /* start the Rx/Tx activity */
3064 mvneta_port_enable(pp);
3066 if (!pp->neta_armada3700) {
3067 /* Enable polling on the port */
3068 for_each_online_cpu(cpu) {
3069 struct mvneta_pcpu_port *port =
3070 per_cpu_ptr(pp->ports, cpu);
3072 napi_enable(&port->napi);
3075 napi_enable(&pp->napi);
3078 /* Unmask interrupts. It has to be done from each CPU */
3079 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3081 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3082 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3083 MVNETA_CAUSE_LINK_CHANGE |
3084 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3086 phy_start(ndev->phydev);
3087 netif_tx_start_all_queues(pp->dev);
3090 static void mvneta_stop_dev(struct mvneta_port *pp)
3093 struct net_device *ndev = pp->dev;
3095 phy_stop(ndev->phydev);
3097 if (!pp->neta_armada3700) {
3098 for_each_online_cpu(cpu) {
3099 struct mvneta_pcpu_port *port =
3100 per_cpu_ptr(pp->ports, cpu);
3102 napi_disable(&port->napi);
3105 napi_disable(&pp->napi);
3108 netif_carrier_off(pp->dev);
3110 mvneta_port_down(pp);
3111 netif_tx_stop_all_queues(pp->dev);
3113 /* Stop the port activity */
3114 mvneta_port_disable(pp);
3116 /* Clear all ethernet port interrupts */
3117 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3119 /* Mask all ethernet port interrupts */
3120 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3122 mvneta_tx_reset(pp);
3123 mvneta_rx_reset(pp);
3126 static void mvneta_percpu_enable(void *arg)
3128 struct mvneta_port *pp = arg;
3130 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3133 static void mvneta_percpu_disable(void *arg)
3135 struct mvneta_port *pp = arg;
3137 disable_percpu_irq(pp->dev->irq);
3140 /* Change the device mtu */
3141 static int mvneta_change_mtu(struct net_device *dev, int mtu)
3143 struct mvneta_port *pp = netdev_priv(dev);
3146 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3147 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3148 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3149 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3154 if (!netif_running(dev)) {
3156 mvneta_bm_update_mtu(pp, mtu);
3158 netdev_update_features(dev);
3162 /* The interface is running, so we have to force a
3163 * reallocation of the queues
3165 mvneta_stop_dev(pp);
3166 on_each_cpu(mvneta_percpu_disable, pp, true);
3168 mvneta_cleanup_txqs(pp);
3169 mvneta_cleanup_rxqs(pp);
3172 mvneta_bm_update_mtu(pp, mtu);
3174 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3175 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3176 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3178 ret = mvneta_setup_rxqs(pp);
3180 netdev_err(dev, "unable to setup rxqs after MTU change\n");
3184 ret = mvneta_setup_txqs(pp);
3186 netdev_err(dev, "unable to setup txqs after MTU change\n");
3190 on_each_cpu(mvneta_percpu_enable, pp, true);
3191 mvneta_start_dev(pp);
3194 netdev_update_features(dev);
3199 static netdev_features_t mvneta_fix_features(struct net_device *dev,
3200 netdev_features_t features)
3202 struct mvneta_port *pp = netdev_priv(dev);
3204 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3205 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3207 "Disable IP checksum for MTU greater than %dB\n",
3214 /* Get mac address */
3215 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3217 u32 mac_addr_l, mac_addr_h;
3219 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3220 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3221 addr[0] = (mac_addr_h >> 24) & 0xFF;
3222 addr[1] = (mac_addr_h >> 16) & 0xFF;
3223 addr[2] = (mac_addr_h >> 8) & 0xFF;
3224 addr[3] = mac_addr_h & 0xFF;
3225 addr[4] = (mac_addr_l >> 8) & 0xFF;
3226 addr[5] = mac_addr_l & 0xFF;
3229 /* Handle setting mac address */
3230 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3232 struct mvneta_port *pp = netdev_priv(dev);
3233 struct sockaddr *sockaddr = addr;
3236 ret = eth_prepare_mac_addr_change(dev, addr);
3239 /* Remove previous address table entry */
3240 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3242 /* Set new addr in hw */
3243 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3245 eth_commit_mac_addr_change(dev, addr);
3249 static void mvneta_adjust_link(struct net_device *ndev)
3251 struct mvneta_port *pp = netdev_priv(ndev);
3252 struct phy_device *phydev = ndev->phydev;
3253 int status_change = 0;
3256 if ((pp->speed != phydev->speed) ||
3257 (pp->duplex != phydev->duplex)) {
3260 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3261 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3262 MVNETA_GMAC_CONFIG_GMII_SPEED |
3263 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3266 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3268 if (phydev->speed == SPEED_1000)
3269 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3270 else if (phydev->speed == SPEED_100)
3271 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3273 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3275 pp->duplex = phydev->duplex;
3276 pp->speed = phydev->speed;
3280 if (phydev->link != pp->link) {
3281 if (!phydev->link) {
3286 pp->link = phydev->link;
3290 if (status_change) {
3292 if (!pp->use_inband_status) {
3293 u32 val = mvreg_read(pp,
3294 MVNETA_GMAC_AUTONEG_CONFIG);
3295 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
3296 val |= MVNETA_GMAC_FORCE_LINK_PASS;
3297 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3302 if (!pp->use_inband_status) {
3303 u32 val = mvreg_read(pp,
3304 MVNETA_GMAC_AUTONEG_CONFIG);
3305 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3306 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3307 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3310 mvneta_port_down(pp);
3312 phy_print_status(phydev);
3316 static int mvneta_mdio_probe(struct mvneta_port *pp)
3318 struct phy_device *phy_dev;
3320 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
3323 netdev_err(pp->dev, "could not find the PHY\n");
3327 phy_dev->supported &= PHY_GBIT_FEATURES;
3328 phy_dev->advertising = phy_dev->supported;
3337 static void mvneta_mdio_remove(struct mvneta_port *pp)
3339 struct net_device *ndev = pp->dev;
3341 phy_disconnect(ndev->phydev);
3344 /* Electing a CPU must be done in an atomic way: it should be done
3345 * after or before the removal/insertion of a CPU and this function is
3348 static void mvneta_percpu_elect(struct mvneta_port *pp)
3350 int elected_cpu = 0, max_cpu, cpu, i = 0;
3352 /* Use the cpu associated to the rxq when it is online, in all
3353 * the other cases, use the cpu 0 which can't be offline.
3355 if (cpu_online(pp->rxq_def))
3356 elected_cpu = pp->rxq_def;
3358 max_cpu = num_present_cpus();
3360 for_each_online_cpu(cpu) {
3361 int rxq_map = 0, txq_map = 0;
3364 for (rxq = 0; rxq < rxq_number; rxq++)
3365 if ((rxq % max_cpu) == cpu)
3366 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
3368 if (cpu == elected_cpu)
3369 /* Map the default receive queue queue to the
3372 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
3374 /* We update the TX queue map only if we have one
3375 * queue. In this case we associate the TX queue to
3376 * the CPU bound to the default RX queue
3378 if (txq_number == 1)
3379 txq_map = (cpu == elected_cpu) ?
3380 MVNETA_CPU_TXQ_ACCESS(1) : 0;
3382 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
3383 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
3385 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
3387 /* Update the interrupt mask on each CPU according the
3390 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
3397 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3400 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3402 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3405 spin_lock(&pp->lock);
3407 * Configuring the driver for a new CPU while the driver is
3408 * stopping is racy, so just avoid it.
3410 if (pp->is_stopped) {
3411 spin_unlock(&pp->lock);
3414 netif_tx_stop_all_queues(pp->dev);
3417 * We have to synchronise on tha napi of each CPU except the one
3418 * just being woken up
3420 for_each_online_cpu(other_cpu) {
3421 if (other_cpu != cpu) {
3422 struct mvneta_pcpu_port *other_port =
3423 per_cpu_ptr(pp->ports, other_cpu);
3425 napi_synchronize(&other_port->napi);
3429 /* Mask all ethernet port interrupts */
3430 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3431 napi_enable(&port->napi);
3434 * Enable per-CPU interrupts on the CPU that is
3437 mvneta_percpu_enable(pp);
3440 * Enable per-CPU interrupt on the one CPU we care
3443 mvneta_percpu_elect(pp);
3445 /* Unmask all ethernet port interrupts */
3446 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3447 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3448 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3449 MVNETA_CAUSE_LINK_CHANGE |
3450 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3451 netif_tx_start_all_queues(pp->dev);
3452 spin_unlock(&pp->lock);
3456 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3458 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3460 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3463 * Thanks to this lock we are sure that any pending cpu election is
3466 spin_lock(&pp->lock);
3467 /* Mask all ethernet port interrupts */
3468 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3469 spin_unlock(&pp->lock);
3471 napi_synchronize(&port->napi);
3472 napi_disable(&port->napi);
3473 /* Disable per-CPU interrupts on the CPU that is brought down. */
3474 mvneta_percpu_disable(pp);
3478 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3480 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3483 /* Check if a new CPU must be elected now this on is down */
3484 spin_lock(&pp->lock);
3485 mvneta_percpu_elect(pp);
3486 spin_unlock(&pp->lock);
3487 /* Unmask all ethernet port interrupts */
3488 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3489 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3490 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3491 MVNETA_CAUSE_LINK_CHANGE |
3492 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3493 netif_tx_start_all_queues(pp->dev);
3497 static int mvneta_open(struct net_device *dev)
3499 struct mvneta_port *pp = netdev_priv(dev);
3502 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
3503 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3504 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3506 ret = mvneta_setup_rxqs(pp);
3510 ret = mvneta_setup_txqs(pp);
3512 goto err_cleanup_rxqs;
3514 /* Connect to port interrupt line */
3515 if (pp->neta_armada3700)
3516 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
3519 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
3520 dev->name, pp->ports);
3522 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3523 goto err_cleanup_txqs;
3526 if (!pp->neta_armada3700) {
3527 /* Enable per-CPU interrupt on all the CPU to handle our RX
3530 on_each_cpu(mvneta_percpu_enable, pp, true);
3532 pp->is_stopped = false;
3533 /* Register a CPU notifier to handle the case where our CPU
3534 * might be taken offline.
3536 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3541 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3544 goto err_free_online_hp;
3547 /* In default link is down */
3548 netif_carrier_off(pp->dev);
3550 ret = mvneta_mdio_probe(pp);
3552 netdev_err(dev, "cannot probe MDIO bus\n");
3553 goto err_free_dead_hp;
3556 mvneta_start_dev(pp);
3561 if (!pp->neta_armada3700)
3562 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3565 if (!pp->neta_armada3700)
3566 cpuhp_state_remove_instance_nocalls(online_hpstate,
3569 if (pp->neta_armada3700) {
3570 free_irq(pp->dev->irq, pp);
3572 on_each_cpu(mvneta_percpu_disable, pp, true);
3573 free_percpu_irq(pp->dev->irq, pp->ports);
3576 mvneta_cleanup_txqs(pp);
3578 mvneta_cleanup_rxqs(pp);
3582 /* Stop the port, free port interrupt line */
3583 static int mvneta_stop(struct net_device *dev)
3585 struct mvneta_port *pp = netdev_priv(dev);
3587 if (!pp->neta_armada3700) {
3588 /* Inform that we are stopping so we don't want to setup the
3589 * driver for new CPUs in the notifiers. The code of the
3590 * notifier for CPU online is protected by the same spinlock,
3591 * so when we get the lock, the notifer work is done.
3593 spin_lock(&pp->lock);
3594 pp->is_stopped = true;
3595 spin_unlock(&pp->lock);
3597 mvneta_stop_dev(pp);
3598 mvneta_mdio_remove(pp);
3600 cpuhp_state_remove_instance_nocalls(online_hpstate,
3602 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3604 on_each_cpu(mvneta_percpu_disable, pp, true);
3605 free_percpu_irq(dev->irq, pp->ports);
3607 mvneta_stop_dev(pp);
3608 mvneta_mdio_remove(pp);
3609 free_irq(dev->irq, pp);
3612 mvneta_cleanup_rxqs(pp);
3613 mvneta_cleanup_txqs(pp);
3618 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3623 return phy_mii_ioctl(dev->phydev, ifr, cmd);
3626 /* Ethtool methods */
3628 /* Set link ksettings (phy address, speed) for ethtools */
3630 mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
3631 const struct ethtool_link_ksettings *cmd)
3633 struct mvneta_port *pp = netdev_priv(ndev);
3634 struct phy_device *phydev = ndev->phydev;
3639 if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
3642 mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
3644 if (cmd->base.autoneg == AUTONEG_DISABLE) {
3645 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3646 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3647 MVNETA_GMAC_CONFIG_GMII_SPEED |
3648 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3651 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3653 if (phydev->speed == SPEED_1000)
3654 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3655 else if (phydev->speed == SPEED_100)
3656 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3658 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3661 pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
3662 netdev_info(pp->dev, "autoneg status set to %i\n",
3663 pp->use_inband_status);
3665 if (netif_running(ndev)) {
3666 mvneta_port_down(pp);
3671 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
3674 /* Set interrupt coalescing for ethtools */
3675 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3676 struct ethtool_coalesce *c)
3678 struct mvneta_port *pp = netdev_priv(dev);
3681 for (queue = 0; queue < rxq_number; queue++) {
3682 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3683 rxq->time_coal = c->rx_coalesce_usecs;
3684 rxq->pkts_coal = c->rx_max_coalesced_frames;
3685 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3686 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3689 for (queue = 0; queue < txq_number; queue++) {
3690 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3691 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3692 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3698 /* get coalescing for ethtools */
3699 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3700 struct ethtool_coalesce *c)
3702 struct mvneta_port *pp = netdev_priv(dev);
3704 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
3705 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
3707 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
3712 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
3713 struct ethtool_drvinfo *drvinfo)
3715 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
3716 sizeof(drvinfo->driver));
3717 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
3718 sizeof(drvinfo->version));
3719 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3720 sizeof(drvinfo->bus_info));
3724 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3725 struct ethtool_ringparam *ring)
3727 struct mvneta_port *pp = netdev_priv(netdev);
3729 ring->rx_max_pending = MVNETA_MAX_RXD;
3730 ring->tx_max_pending = MVNETA_MAX_TXD;
3731 ring->rx_pending = pp->rx_ring_size;
3732 ring->tx_pending = pp->tx_ring_size;
3735 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
3736 struct ethtool_ringparam *ring)
3738 struct mvneta_port *pp = netdev_priv(dev);
3740 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
3742 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
3743 ring->rx_pending : MVNETA_MAX_RXD;
3745 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
3746 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
3747 if (pp->tx_ring_size != ring->tx_pending)
3748 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
3749 pp->tx_ring_size, ring->tx_pending);
3751 if (netif_running(dev)) {
3753 if (mvneta_open(dev)) {
3755 "error on opening device after ring param change\n");
3763 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
3766 if (sset == ETH_SS_STATS) {
3769 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3770 memcpy(data + i * ETH_GSTRING_LEN,
3771 mvneta_statistics[i].name, ETH_GSTRING_LEN);
3775 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
3777 const struct mvneta_statistic *s;
3778 void __iomem *base = pp->base;
3783 for (i = 0, s = mvneta_statistics;
3784 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
3788 val = readl_relaxed(base + s->offset);
3789 pp->ethtool_stats[i] += val;
3792 /* Docs say to read low 32-bit then high */
3793 low = readl_relaxed(base + s->offset);
3794 high = readl_relaxed(base + s->offset + 4);
3795 val64 = (u64)high << 32 | low;
3796 pp->ethtool_stats[i] += val64;
3802 static void mvneta_ethtool_get_stats(struct net_device *dev,
3803 struct ethtool_stats *stats, u64 *data)
3805 struct mvneta_port *pp = netdev_priv(dev);
3808 mvneta_ethtool_update_stats(pp);
3810 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3811 *data++ = pp->ethtool_stats[i];
3814 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
3816 if (sset == ETH_SS_STATS)
3817 return ARRAY_SIZE(mvneta_statistics);
3821 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
3823 return MVNETA_RSS_LU_TABLE_SIZE;
3826 static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
3827 struct ethtool_rxnfc *info,
3828 u32 *rules __always_unused)
3830 switch (info->cmd) {
3831 case ETHTOOL_GRXRINGS:
3832 info->data = rxq_number;
3841 static int mvneta_config_rss(struct mvneta_port *pp)
3846 netif_tx_stop_all_queues(pp->dev);
3848 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3850 /* We have to synchronise on the napi of each CPU */
3851 for_each_online_cpu(cpu) {
3852 struct mvneta_pcpu_port *pcpu_port =
3853 per_cpu_ptr(pp->ports, cpu);
3855 napi_synchronize(&pcpu_port->napi);
3856 napi_disable(&pcpu_port->napi);
3859 pp->rxq_def = pp->indir[0];
3861 /* Update unicast mapping */
3862 mvneta_set_rx_mode(pp->dev);
3864 /* Update val of portCfg register accordingly with all RxQueue types */
3865 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
3866 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3868 /* Update the elected CPU matching the new rxq_def */
3869 spin_lock(&pp->lock);
3870 mvneta_percpu_elect(pp);
3871 spin_unlock(&pp->lock);
3873 /* We have to synchronise on the napi of each CPU */
3874 for_each_online_cpu(cpu) {
3875 struct mvneta_pcpu_port *pcpu_port =
3876 per_cpu_ptr(pp->ports, cpu);
3878 napi_enable(&pcpu_port->napi);
3881 netif_tx_start_all_queues(pp->dev);
3886 static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
3887 const u8 *key, const u8 hfunc)
3889 struct mvneta_port *pp = netdev_priv(dev);
3891 /* Current code for Armada 3700 doesn't support RSS features yet */
3892 if (pp->neta_armada3700)
3895 /* We require at least one supported parameter to be changed
3896 * and no change in any of the unsupported parameters
3899 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
3905 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
3907 return mvneta_config_rss(pp);
3910 static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
3913 struct mvneta_port *pp = netdev_priv(dev);
3915 /* Current code for Armada 3700 doesn't support RSS features yet */
3916 if (pp->neta_armada3700)
3920 *hfunc = ETH_RSS_HASH_TOP;
3925 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
3930 static const struct net_device_ops mvneta_netdev_ops = {
3931 .ndo_open = mvneta_open,
3932 .ndo_stop = mvneta_stop,
3933 .ndo_start_xmit = mvneta_tx,
3934 .ndo_set_rx_mode = mvneta_set_rx_mode,
3935 .ndo_set_mac_address = mvneta_set_mac_addr,
3936 .ndo_change_mtu = mvneta_change_mtu,
3937 .ndo_fix_features = mvneta_fix_features,
3938 .ndo_get_stats64 = mvneta_get_stats64,
3939 .ndo_do_ioctl = mvneta_ioctl,
3942 const struct ethtool_ops mvneta_eth_tool_ops = {
3943 .nway_reset = phy_ethtool_nway_reset,
3944 .get_link = ethtool_op_get_link,
3945 .set_coalesce = mvneta_ethtool_set_coalesce,
3946 .get_coalesce = mvneta_ethtool_get_coalesce,
3947 .get_drvinfo = mvneta_ethtool_get_drvinfo,
3948 .get_ringparam = mvneta_ethtool_get_ringparam,
3949 .set_ringparam = mvneta_ethtool_set_ringparam,
3950 .get_strings = mvneta_ethtool_get_strings,
3951 .get_ethtool_stats = mvneta_ethtool_get_stats,
3952 .get_sset_count = mvneta_ethtool_get_sset_count,
3953 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
3954 .get_rxnfc = mvneta_ethtool_get_rxnfc,
3955 .get_rxfh = mvneta_ethtool_get_rxfh,
3956 .set_rxfh = mvneta_ethtool_set_rxfh,
3957 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3958 .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
3962 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
3967 mvneta_port_disable(pp);
3969 /* Set port default values */
3970 mvneta_defaults_set(pp);
3972 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
3977 /* Initialize TX descriptor rings */
3978 for (queue = 0; queue < txq_number; queue++) {
3979 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3981 txq->size = pp->tx_ring_size;
3982 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
3985 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
3990 /* Create Rx descriptor rings */
3991 for (queue = 0; queue < rxq_number; queue++) {
3992 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3994 rxq->size = pp->rx_ring_size;
3995 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
3996 rxq->time_coal = MVNETA_RX_COAL_USEC;
3997 rxq->buf_virt_addr = devm_kmalloc(pp->dev->dev.parent,
3998 rxq->size * sizeof(void *),
4000 if (!rxq->buf_virt_addr)
4007 /* platform glue : initialize decoding windows */
4008 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4009 const struct mbus_dram_target_info *dram)
4015 for (i = 0; i < 6; i++) {
4016 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4017 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4020 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4027 for (i = 0; i < dram->num_cs; i++) {
4028 const struct mbus_dram_window *cs = dram->cs + i;
4030 mvreg_write(pp, MVNETA_WIN_BASE(i),
4031 (cs->base & 0xffff0000) |
4032 (cs->mbus_attr << 8) |
4033 dram->mbus_dram_target_id);
4035 mvreg_write(pp, MVNETA_WIN_SIZE(i),
4036 (cs->size - 1) & 0xffff0000);
4038 win_enable &= ~(1 << i);
4039 win_protect |= 3 << (2 * i);
4042 /* For Armada3700 open default 4GB Mbus window, leaving
4043 * arbitration of target/attribute to a different layer
4046 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4047 win_enable &= ~BIT(0);
4051 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
4052 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
4055 /* Power up the port */
4056 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4060 /* MAC Cause register should be cleared */
4061 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4063 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
4065 /* Even though it might look weird, when we're configured in
4066 * SGMII or QSGMII mode, the RGMII bit needs to be set.
4069 case PHY_INTERFACE_MODE_QSGMII:
4070 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
4071 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
4073 case PHY_INTERFACE_MODE_SGMII:
4074 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
4075 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
4077 case PHY_INTERFACE_MODE_RGMII:
4078 case PHY_INTERFACE_MODE_RGMII_ID:
4079 ctrl |= MVNETA_GMAC2_PORT_RGMII;
4085 /* Cancel Port Reset */
4086 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
4087 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
4089 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4090 MVNETA_GMAC2_PORT_RESET) != 0)
4096 /* Device initialization routine */
4097 static int mvneta_probe(struct platform_device *pdev)
4099 const struct mbus_dram_target_info *dram_target_info;
4100 struct resource *res;
4101 struct device_node *dn = pdev->dev.of_node;
4102 struct device_node *phy_node;
4103 struct device_node *bm_node;
4104 struct mvneta_port *pp;
4105 struct net_device *dev;
4106 const char *dt_mac_addr;
4107 char hw_mac_addr[ETH_ALEN];
4108 const char *mac_from;
4109 const char *managed;
4115 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
4119 dev->irq = irq_of_parse_and_map(dn, 0);
4120 if (dev->irq == 0) {
4122 goto err_free_netdev;
4125 phy_node = of_parse_phandle(dn, "phy", 0);
4127 if (!of_phy_is_fixed_link(dn)) {
4128 dev_err(&pdev->dev, "no PHY specified\n");
4133 err = of_phy_register_fixed_link(dn);
4135 dev_err(&pdev->dev, "cannot register fixed PHY\n");
4139 /* In the case of a fixed PHY, the DT node associated
4140 * to the PHY is the Ethernet MAC DT node.
4142 phy_node = of_node_get(dn);
4145 phy_mode = of_get_phy_mode(dn);
4147 dev_err(&pdev->dev, "incorrect phy-mode\n");
4149 goto err_put_phy_node;
4152 dev->tx_queue_len = MVNETA_MAX_TXD;
4153 dev->watchdog_timeo = 5 * HZ;
4154 dev->netdev_ops = &mvneta_netdev_ops;
4156 dev->ethtool_ops = &mvneta_eth_tool_ops;
4158 pp = netdev_priv(dev);
4159 spin_lock_init(&pp->lock);
4160 pp->phy_node = phy_node;
4161 pp->phy_interface = phy_mode;
4163 err = of_property_read_string(dn, "managed", &managed);
4164 pp->use_inband_status = (err == 0 &&
4165 strcmp(managed, "in-band-status") == 0);
4167 pp->rxq_def = rxq_def;
4169 /* Set RX packet offset correction for platforms, whose
4170 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
4171 * platforms and 0B for 32-bit ones.
4173 pp->rx_offset_correction =
4174 max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);
4176 pp->indir[0] = rxq_def;
4178 /* Get special SoC configurations */
4179 if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
4180 pp->neta_armada3700 = true;
4182 pp->clk = devm_clk_get(&pdev->dev, "core");
4183 if (IS_ERR(pp->clk))
4184 pp->clk = devm_clk_get(&pdev->dev, NULL);
4185 if (IS_ERR(pp->clk)) {
4186 err = PTR_ERR(pp->clk);
4187 goto err_put_phy_node;
4190 clk_prepare_enable(pp->clk);
4192 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
4193 if (!IS_ERR(pp->clk_bus))
4194 clk_prepare_enable(pp->clk_bus);
4196 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4197 pp->base = devm_ioremap_resource(&pdev->dev, res);
4198 if (IS_ERR(pp->base)) {
4199 err = PTR_ERR(pp->base);
4203 /* Alloc per-cpu port structure */
4204 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
4210 /* Alloc per-cpu stats */
4211 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
4214 goto err_free_ports;
4217 dt_mac_addr = of_get_mac_address(dn);
4219 mac_from = "device tree";
4220 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
4222 mvneta_get_mac_addr(pp, hw_mac_addr);
4223 if (is_valid_ether_addr(hw_mac_addr)) {
4224 mac_from = "hardware";
4225 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
4227 mac_from = "random";
4228 eth_hw_addr_random(dev);
4232 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
4233 if (tx_csum_limit < 0 ||
4234 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
4235 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4236 dev_info(&pdev->dev,
4237 "Wrong TX csum limit in DT, set to %dB\n",
4238 MVNETA_TX_CSUM_DEF_SIZE);
4240 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
4241 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4243 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
4246 pp->tx_csum_limit = tx_csum_limit;
4248 dram_target_info = mv_mbus_dram_info();
4249 /* Armada3700 requires setting default configuration of Mbus
4250 * windows, however without using filled mbus_dram_target_info
4253 if (dram_target_info || pp->neta_armada3700)
4254 mvneta_conf_mbus_windows(pp, dram_target_info);
4256 pp->tx_ring_size = MVNETA_MAX_TXD;
4257 pp->rx_ring_size = MVNETA_MAX_RXD;
4260 SET_NETDEV_DEV(dev, &pdev->dev);
4262 pp->id = global_port_id++;
4264 /* Obtain access to BM resources if enabled and already initialized */
4265 bm_node = of_parse_phandle(dn, "buffer-manager", 0);
4266 if (bm_node && bm_node->data) {
4267 pp->bm_priv = bm_node->data;
4268 err = mvneta_bm_port_init(pdev, pp);
4270 dev_info(&pdev->dev, "use SW buffer management\n");
4274 of_node_put(bm_node);
4276 err = mvneta_init(&pdev->dev, pp);
4280 err = mvneta_port_power_up(pp, phy_mode);
4282 dev_err(&pdev->dev, "can't power up port\n");
4286 /* Armada3700 network controller does not support per-cpu
4287 * operation, so only single NAPI should be initialized.
4289 if (pp->neta_armada3700) {
4290 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
4292 for_each_present_cpu(cpu) {
4293 struct mvneta_pcpu_port *port =
4294 per_cpu_ptr(pp->ports, cpu);
4296 netif_napi_add(dev, &port->napi, mvneta_poll,
4302 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
4303 dev->hw_features |= dev->features;
4304 dev->vlan_features |= dev->features;
4305 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4306 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
4308 /* MTU range: 68 - 9676 */
4309 dev->min_mtu = ETH_MIN_MTU;
4310 /* 9676 == 9700 - 20 and rounding to 8 */
4311 dev->max_mtu = 9676;
4313 err = register_netdev(dev);
4315 dev_err(&pdev->dev, "failed to register\n");
4316 goto err_free_stats;
4319 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
4322 platform_set_drvdata(pdev, pp->dev);
4324 if (pp->use_inband_status) {
4325 struct phy_device *phy = of_phy_find_device(dn);
4327 mvneta_fixed_link_update(pp, phy);
4329 put_device(&phy->mdio.dev);
4335 unregister_netdev(dev);
4337 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4338 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4342 free_percpu(pp->stats);
4344 free_percpu(pp->ports);
4346 clk_disable_unprepare(pp->clk_bus);
4347 clk_disable_unprepare(pp->clk);
4349 of_node_put(phy_node);
4350 if (of_phy_is_fixed_link(dn))
4351 of_phy_deregister_fixed_link(dn);
4353 irq_dispose_mapping(dev->irq);
4359 /* Device removal routine */
4360 static int mvneta_remove(struct platform_device *pdev)
4362 struct net_device *dev = platform_get_drvdata(pdev);
4363 struct device_node *dn = pdev->dev.of_node;
4364 struct mvneta_port *pp = netdev_priv(dev);
4366 unregister_netdev(dev);
4367 clk_disable_unprepare(pp->clk_bus);
4368 clk_disable_unprepare(pp->clk);
4369 free_percpu(pp->ports);
4370 free_percpu(pp->stats);
4371 if (of_phy_is_fixed_link(dn))
4372 of_phy_deregister_fixed_link(dn);
4373 irq_dispose_mapping(dev->irq);
4374 of_node_put(pp->phy_node);
4378 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4379 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4386 static const struct of_device_id mvneta_match[] = {
4387 { .compatible = "marvell,armada-370-neta" },
4388 { .compatible = "marvell,armada-xp-neta" },
4389 { .compatible = "marvell,armada-3700-neta" },
4392 MODULE_DEVICE_TABLE(of, mvneta_match);
4394 static struct platform_driver mvneta_driver = {
4395 .probe = mvneta_probe,
4396 .remove = mvneta_remove,
4398 .name = MVNETA_DRIVER_NAME,
4399 .of_match_table = mvneta_match,
4403 static int __init mvneta_driver_init(void)
4407 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
4409 mvneta_cpu_down_prepare);
4412 online_hpstate = ret;
4413 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4414 NULL, mvneta_cpu_dead);
4418 ret = platform_driver_register(&mvneta_driver);
4424 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4426 cpuhp_remove_multi_state(online_hpstate);
4430 module_init(mvneta_driver_init);
4432 static void __exit mvneta_driver_exit(void)
4434 platform_driver_unregister(&mvneta_driver);
4435 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4436 cpuhp_remove_multi_state(online_hpstate);
4438 module_exit(mvneta_driver_exit);
4440 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4441 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4442 MODULE_LICENSE("GPL");
4444 module_param(rxq_number, int, S_IRUGO);
4445 module_param(txq_number, int, S_IRUGO);
4447 module_param(rxq_def, int, S_IRUGO);
4448 module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);