Commit | Line | Data |
---|---|---|
c5aff182 TP |
1 | /* |
2 | * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. | |
3 | * | |
4 | * Copyright (C) 2012 Marvell | |
5 | * | |
6 | * Rami Rosen <rosenr@marvell.com> | |
7 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | |
8 | * | |
9 | * This file is licensed under the terms of the GNU General Public | |
10 | * License version 2. This program is licensed "as is" without any | |
11 | * warranty of any kind, whether express or implied. | |
12 | */ | |
13 | ||
0e03f563 JZ |
14 | #include <linux/clk.h> |
15 | #include <linux/cpu.h> | |
c5aff182 | 16 | #include <linux/etherdevice.h> |
0e03f563 | 17 | #include <linux/if_vlan.h> |
c5aff182 | 18 | #include <linux/inetdevice.h> |
c5aff182 | 19 | #include <linux/interrupt.h> |
c3f0dd38 | 20 | #include <linux/io.h> |
0e03f563 JZ |
21 | #include <linux/kernel.h> |
22 | #include <linux/mbus.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/netdevice.h> | |
c5aff182 | 25 | #include <linux/of.h> |
0e03f563 | 26 | #include <linux/of_address.h> |
c5aff182 TP |
27 | #include <linux/of_irq.h> |
28 | #include <linux/of_mdio.h> | |
29 | #include <linux/of_net.h> | |
c5aff182 | 30 | #include <linux/phy.h> |
0e03f563 JZ |
31 | #include <linux/platform_device.h> |
32 | #include <linux/skbuff.h> | |
baa11ebc | 33 | #include <net/hwbm.h> |
dc35a10f | 34 | #include "mvneta_bm.h" |
0e03f563 JZ |
35 | #include <net/ip.h> |
36 | #include <net/ipv6.h> | |
37 | #include <net/tso.h> | |
c5aff182 TP |
38 | |
39 | /* Registers */ | |
40 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) | |
e5bdf689 | 41 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) |
dc35a10f MW |
42 | #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 |
43 | #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 | |
44 | #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 | |
45 | #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 | |
c5aff182 TP |
46 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) |
47 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) | |
48 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) | |
49 | #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) | |
50 | #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) | |
51 | #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) | |
52 | #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 | |
53 | #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) | |
54 | #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) | |
55 | #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff | |
56 | #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) | |
57 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 | |
58 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 | |
dc35a10f MW |
59 | #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) |
60 | #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 | |
61 | #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 | |
c5aff182 TP |
62 | #define MVNETA_PORT_RX_RESET 0x1cc0 |
63 | #define MVNETA_PORT_RX_DMA_RESET BIT(0) | |
64 | #define MVNETA_PHY_ADDR 0x2000 | |
65 | #define MVNETA_PHY_ADDR_MASK 0x1f | |
66 | #define MVNETA_MBUS_RETRY 0x2010 | |
67 | #define MVNETA_UNIT_INTR_CAUSE 0x2080 | |
68 | #define MVNETA_UNIT_CONTROL 0x20B0 | |
69 | #define MVNETA_PHY_POLLING_ENABLE BIT(1) | |
70 | #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) | |
71 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) | |
72 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) | |
73 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 | |
db6ba9a5 | 74 | #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 |
c5aff182 TP |
75 | #define MVNETA_PORT_CONFIG 0x2400 |
76 | #define MVNETA_UNI_PROMISC_MODE BIT(0) | |
77 | #define MVNETA_DEF_RXQ(q) ((q) << 1) | |
78 | #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) | |
79 | #define MVNETA_TX_UNSET_ERR_SUM BIT(12) | |
80 | #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) | |
81 | #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) | |
82 | #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) | |
83 | #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) | |
84 | #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ | |
85 | MVNETA_DEF_RXQ_ARP(q) | \ | |
86 | MVNETA_DEF_RXQ_TCP(q) | \ | |
87 | MVNETA_DEF_RXQ_UDP(q) | \ | |
88 | MVNETA_DEF_RXQ_BPDU(q) | \ | |
89 | MVNETA_TX_UNSET_ERR_SUM | \ | |
90 | MVNETA_RX_CSUM_WITH_PSEUDO_HDR) | |
91 | #define MVNETA_PORT_CONFIG_EXTEND 0x2404 | |
92 | #define MVNETA_MAC_ADDR_LOW 0x2414 | |
93 | #define MVNETA_MAC_ADDR_HIGH 0x2418 | |
94 | #define MVNETA_SDMA_CONFIG 0x241c | |
95 | #define MVNETA_SDMA_BRST_SIZE_16 4 | |
c5aff182 TP |
96 | #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) |
97 | #define MVNETA_RX_NO_DATA_SWAP BIT(4) | |
98 | #define MVNETA_TX_NO_DATA_SWAP BIT(5) | |
9ad8fef6 | 99 | #define MVNETA_DESC_SWAP BIT(6) |
c5aff182 TP |
100 | #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) |
101 | #define MVNETA_PORT_STATUS 0x2444 | |
102 | #define MVNETA_TX_IN_PRGRS BIT(1) | |
103 | #define MVNETA_TX_FIFO_EMPTY BIT(8) | |
104 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c | |
3f1dd4bc | 105 | #define MVNETA_SERDES_CFG 0x24A0 |
5445eaf3 | 106 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 |
3f1dd4bc | 107 | #define MVNETA_QSGMII_SERDES_PROTO 0x0667 |
c5aff182 TP |
108 | #define MVNETA_TYPE_PRIO 0x24bc |
109 | #define MVNETA_FORCE_UNI BIT(21) | |
110 | #define MVNETA_TXQ_CMD_1 0x24e4 | |
111 | #define MVNETA_TXQ_CMD 0x2448 | |
112 | #define MVNETA_TXQ_DISABLE_SHIFT 8 | |
113 | #define MVNETA_TXQ_ENABLE_MASK 0x000000ff | |
e483911f AL |
114 | #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 |
115 | #define MVNETA_OVERRUN_FRAME_COUNT 0x2488 | |
898b2970 SS |
116 | #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 |
117 | #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) | |
c5aff182 | 118 | #define MVNETA_ACC_MODE 0x2500 |
dc35a10f | 119 | #define MVNETA_BM_ADDRESS 0x2504 |
c5aff182 TP |
120 | #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) |
121 | #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff | |
122 | #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 | |
2dcf75e2 | 123 | #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) |
50bf8cb6 | 124 | #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) |
c5aff182 | 125 | #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) |
40ba35e7 | 126 | |
2dcf75e2 GC |
127 | /* Exception Interrupt Port/Queue Cause register |
128 | * | |
129 | * Their behavior depend of the mapping done using the PCPX2Q | |
130 | * registers. For a given CPU if the bit associated to a queue is not | |
131 | * set, then for the register a read from this CPU will always return | |
132 | * 0 and a write won't do anything | |
133 | */ | |
40ba35e7 | 134 | |
c5aff182 | 135 | #define MVNETA_INTR_NEW_CAUSE 0x25a0 |
c5aff182 | 136 | #define MVNETA_INTR_NEW_MASK 0x25a4 |
40ba35e7 | 137 | |
138 | /* bits 0..7 = TXQ SENT, one bit per queue. | |
139 | * bits 8..15 = RXQ OCCUP, one bit per queue. | |
140 | * bits 16..23 = RXQ FREE, one bit per queue. | |
141 | * bit 29 = OLD_REG_SUM, see old reg ? | |
142 | * bit 30 = TX_ERR_SUM, one bit for 4 ports | |
143 | * bit 31 = MISC_SUM, one bit for 4 ports | |
144 | */ | |
145 | #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) | |
146 | #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) | |
147 | #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) | |
148 | #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) | |
898b2970 | 149 | #define MVNETA_MISCINTR_INTR_MASK BIT(31) |
40ba35e7 | 150 | |
c5aff182 TP |
151 | #define MVNETA_INTR_OLD_CAUSE 0x25a8 |
152 | #define MVNETA_INTR_OLD_MASK 0x25ac | |
40ba35e7 | 153 | |
154 | /* Data Path Port/Queue Cause Register */ | |
c5aff182 TP |
155 | #define MVNETA_INTR_MISC_CAUSE 0x25b0 |
156 | #define MVNETA_INTR_MISC_MASK 0x25b4 | |
40ba35e7 | 157 | |
158 | #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) | |
159 | #define MVNETA_CAUSE_LINK_CHANGE BIT(1) | |
160 | #define MVNETA_CAUSE_PTP BIT(4) | |
161 | ||
162 | #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) | |
163 | #define MVNETA_CAUSE_RX_OVERRUN BIT(8) | |
164 | #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) | |
165 | #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) | |
166 | #define MVNETA_CAUSE_TX_UNDERUN BIT(11) | |
167 | #define MVNETA_CAUSE_PRBS_ERR BIT(12) | |
168 | #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) | |
169 | #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) | |
170 | ||
171 | #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 | |
172 | #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) | |
173 | #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) | |
174 | ||
175 | #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 | |
176 | #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) | |
177 | #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) | |
178 | ||
c5aff182 TP |
179 | #define MVNETA_INTR_ENABLE 0x25b8 |
180 | #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 | |
dc1aadf6 | 181 | #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff |
40ba35e7 | 182 | |
c5aff182 TP |
183 | #define MVNETA_RXQ_CMD 0x2680 |
184 | #define MVNETA_RXQ_DISABLE_SHIFT 8 | |
185 | #define MVNETA_RXQ_ENABLE_MASK 0x000000ff | |
186 | #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) | |
187 | #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) | |
188 | #define MVNETA_GMAC_CTRL_0 0x2c00 | |
189 | #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 | |
190 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc | |
191 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) | |
192 | #define MVNETA_GMAC_CTRL_2 0x2c08 | |
898b2970 | 193 | #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) |
a79121d3 | 194 | #define MVNETA_GMAC2_PCS_ENABLE BIT(3) |
c5aff182 TP |
195 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) |
196 | #define MVNETA_GMAC2_PORT_RESET BIT(6) | |
197 | #define MVNETA_GMAC_STATUS 0x2c10 | |
198 | #define MVNETA_GMAC_LINK_UP BIT(0) | |
199 | #define MVNETA_GMAC_SPEED_1000 BIT(1) | |
200 | #define MVNETA_GMAC_SPEED_100 BIT(2) | |
201 | #define MVNETA_GMAC_FULL_DUPLEX BIT(3) | |
202 | #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) | |
203 | #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) | |
204 | #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) | |
205 | #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) | |
206 | #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c | |
207 | #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) | |
208 | #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) | |
898b2970 | 209 | #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) |
c5aff182 TP |
210 | #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) |
211 | #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) | |
71408602 | 212 | #define MVNETA_GMAC_AN_SPEED_EN BIT(7) |
898b2970 | 213 | #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) |
c5aff182 | 214 | #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) |
71408602 | 215 | #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) |
e483911f | 216 | #define MVNETA_MIB_COUNTERS_BASE 0x3000 |
c5aff182 TP |
217 | #define MVNETA_MIB_LATE_COLLISION 0x7c |
218 | #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 | |
219 | #define MVNETA_DA_FILT_OTH_MCAST 0x3500 | |
220 | #define MVNETA_DA_FILT_UCAST_BASE 0x3600 | |
221 | #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) | |
222 | #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) | |
223 | #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 | |
224 | #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) | |
225 | #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) | |
226 | #define MVNETA_TXQ_DEC_SENT_SHIFT 16 | |
227 | #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) | |
228 | #define MVNETA_TXQ_SENT_DESC_SHIFT 16 | |
229 | #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 | |
230 | #define MVNETA_PORT_TX_RESET 0x3cf0 | |
231 | #define MVNETA_PORT_TX_DMA_RESET BIT(0) | |
232 | #define MVNETA_TX_MTU 0x3e0c | |
233 | #define MVNETA_TX_TOKEN_SIZE 0x3e14 | |
234 | #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff | |
235 | #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) | |
236 | #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff | |
237 | ||
238 | #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff | |
239 | ||
240 | /* Descriptor ring Macros */ | |
241 | #define MVNETA_QUEUE_NEXT_DESC(q, index) \ | |
242 | (((index) < (q)->last_desc) ? ((index) + 1) : 0) | |
243 | ||
244 | /* Various constants */ | |
245 | ||
246 | /* Coalescing */ | |
aebea2ba | 247 | #define MVNETA_TXDONE_COAL_PKTS 1 |
c5aff182 TP |
248 | #define MVNETA_RX_COAL_PKTS 32 |
249 | #define MVNETA_RX_COAL_USEC 100 | |
250 | ||
6a20c175 | 251 | /* The two bytes Marvell header. Either contains a special value used |
c5aff182 TP |
252 | * by Marvell switches when a specific hardware mode is enabled (not |
253 | * supported by this driver) or is filled automatically by zeroes on | |
254 | * the RX side. Those two bytes being at the front of the Ethernet | |
255 | * header, they allow to have the IP header aligned on a 4 bytes | |
256 | * boundary automatically: the hardware skips those two bytes on its | |
257 | * own. | |
258 | */ | |
259 | #define MVNETA_MH_SIZE 2 | |
260 | ||
261 | #define MVNETA_VLAN_TAG_LEN 4 | |
262 | ||
263 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 | |
9110ee07 | 264 | #define MVNETA_TX_CSUM_DEF_SIZE 1600 |
c5aff182 | 265 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 |
dc35a10f MW |
266 | #define MVNETA_ACC_MODE_EXT1 1 |
267 | #define MVNETA_ACC_MODE_EXT2 2 | |
268 | ||
269 | #define MVNETA_MAX_DECODE_WIN 6 | |
c5aff182 TP |
270 | |
271 | /* Timeout constants */ | |
272 | #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 | |
273 | #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 | |
274 | #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 | |
275 | ||
276 | #define MVNETA_TX_MTU_MAX 0x3ffff | |
277 | ||
9a401dea GC |
278 | /* The RSS lookup table actually has 256 entries but we do not use |
279 | * them yet | |
280 | */ | |
281 | #define MVNETA_RSS_LU_TABLE_SIZE 1 | |
282 | ||
2adb719d EG |
283 | /* TSO header size */ |
284 | #define TSO_HEADER_SIZE 128 | |
285 | ||
c5aff182 TP |
286 | /* Max number of Rx descriptors */ |
287 | #define MVNETA_MAX_RXD 128 | |
288 | ||
289 | /* Max number of Tx descriptors */ | |
290 | #define MVNETA_MAX_TXD 532 | |
291 | ||
8eef5f97 EG |
292 | /* Max number of allowed TCP segments for software TSO */ |
293 | #define MVNETA_MAX_TSO_SEGS 100 | |
294 | ||
295 | #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) | |
296 | ||
c5aff182 TP |
297 | /* descriptor aligned size */ |
298 | #define MVNETA_DESC_ALIGNED_SIZE 32 | |
299 | ||
300 | #define MVNETA_RX_PKT_SIZE(mtu) \ | |
301 | ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ | |
302 | ETH_HLEN + ETH_FCS_LEN, \ | |
303 | MVNETA_CPU_D_CACHE_LINE_SIZE) | |
304 | ||
2e3173a3 EG |
305 | #define IS_TSO_HEADER(txq, addr) \ |
306 | ((addr >= txq->tso_hdrs_phys) && \ | |
307 | (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) | |
308 | ||
dc35a10f MW |
309 | #define MVNETA_RX_GET_BM_POOL_ID(rxd) \ |
310 | (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) | |
c5aff182 | 311 | |
9b0cdefa RK |
312 | struct mvneta_statistic { |
313 | unsigned short offset; | |
314 | unsigned short type; | |
315 | const char name[ETH_GSTRING_LEN]; | |
316 | }; | |
317 | ||
318 | #define T_REG_32 32 | |
319 | #define T_REG_64 64 | |
320 | ||
321 | static const struct mvneta_statistic mvneta_statistics[] = { | |
322 | { 0x3000, T_REG_64, "good_octets_received", }, | |
323 | { 0x3010, T_REG_32, "good_frames_received", }, | |
324 | { 0x3008, T_REG_32, "bad_octets_received", }, | |
325 | { 0x3014, T_REG_32, "bad_frames_received", }, | |
326 | { 0x3018, T_REG_32, "broadcast_frames_received", }, | |
327 | { 0x301c, T_REG_32, "multicast_frames_received", }, | |
328 | { 0x3050, T_REG_32, "unrec_mac_control_received", }, | |
329 | { 0x3058, T_REG_32, "good_fc_received", }, | |
330 | { 0x305c, T_REG_32, "bad_fc_received", }, | |
331 | { 0x3060, T_REG_32, "undersize_received", }, | |
332 | { 0x3064, T_REG_32, "fragments_received", }, | |
333 | { 0x3068, T_REG_32, "oversize_received", }, | |
334 | { 0x306c, T_REG_32, "jabber_received", }, | |
335 | { 0x3070, T_REG_32, "mac_receive_error", }, | |
336 | { 0x3074, T_REG_32, "bad_crc_event", }, | |
337 | { 0x3078, T_REG_32, "collision", }, | |
338 | { 0x307c, T_REG_32, "late_collision", }, | |
339 | { 0x2484, T_REG_32, "rx_discard", }, | |
340 | { 0x2488, T_REG_32, "rx_overrun", }, | |
341 | { 0x3020, T_REG_32, "frames_64_octets", }, | |
342 | { 0x3024, T_REG_32, "frames_65_to_127_octets", }, | |
343 | { 0x3028, T_REG_32, "frames_128_to_255_octets", }, | |
344 | { 0x302c, T_REG_32, "frames_256_to_511_octets", }, | |
345 | { 0x3030, T_REG_32, "frames_512_to_1023_octets", }, | |
346 | { 0x3034, T_REG_32, "frames_1024_to_max_octets", }, | |
347 | { 0x3038, T_REG_64, "good_octets_sent", }, | |
348 | { 0x3040, T_REG_32, "good_frames_sent", }, | |
349 | { 0x3044, T_REG_32, "excessive_collision", }, | |
350 | { 0x3048, T_REG_32, "multicast_frames_sent", }, | |
351 | { 0x304c, T_REG_32, "broadcast_frames_sent", }, | |
352 | { 0x3054, T_REG_32, "fc_sent", }, | |
353 | { 0x300c, T_REG_32, "internal_mac_transmit_err", }, | |
354 | }; | |
355 | ||
74c41b04 | 356 | struct mvneta_pcpu_stats { |
c5aff182 | 357 | struct u64_stats_sync syncp; |
74c41b04 | 358 | u64 rx_packets; |
359 | u64 rx_bytes; | |
360 | u64 tx_packets; | |
361 | u64 tx_bytes; | |
c5aff182 TP |
362 | }; |
363 | ||
12bb03b4 MR |
364 | struct mvneta_pcpu_port { |
365 | /* Pointer to the shared port */ | |
366 | struct mvneta_port *pp; | |
367 | ||
368 | /* Pointer to the CPU-local NAPI struct */ | |
369 | struct napi_struct napi; | |
370 | ||
371 | /* Cause of the previous interrupt */ | |
372 | u32 cause_rx_tx; | |
373 | }; | |
374 | ||
c5aff182 | 375 | struct mvneta_port { |
dc35a10f | 376 | u8 id; |
12bb03b4 MR |
377 | struct mvneta_pcpu_port __percpu *ports; |
378 | struct mvneta_pcpu_stats __percpu *stats; | |
379 | ||
c5aff182 | 380 | int pkt_size; |
8ec2cd48 | 381 | unsigned int frag_size; |
c5aff182 TP |
382 | void __iomem *base; |
383 | struct mvneta_rx_queue *rxqs; | |
384 | struct mvneta_tx_queue *txqs; | |
c5aff182 | 385 | struct net_device *dev; |
f8642885 | 386 | struct notifier_block cpu_notifier; |
90b74c01 | 387 | int rxq_def; |
5888511e GC |
388 | /* Protect the access to the percpu interrupt registers, |
389 | * ensuring that the configuration remains coherent. | |
390 | */ | |
391 | spinlock_t lock; | |
120cfa50 | 392 | bool is_stopped; |
c5aff182 | 393 | |
c5aff182 | 394 | /* Core clock */ |
189dd626 | 395 | struct clk *clk; |
15cc4a4a JZ |
396 | /* AXI clock */ |
397 | struct clk *clk_bus; | |
c5aff182 TP |
398 | u8 mcast_count[256]; |
399 | u16 tx_ring_size; | |
400 | u16 rx_ring_size; | |
c5aff182 TP |
401 | |
402 | struct mii_bus *mii_bus; | |
403 | struct phy_device *phy_dev; | |
404 | phy_interface_t phy_interface; | |
405 | struct device_node *phy_node; | |
406 | unsigned int link; | |
407 | unsigned int duplex; | |
408 | unsigned int speed; | |
b65657fc | 409 | unsigned int tx_csum_limit; |
0c0744fc | 410 | unsigned int use_inband_status:1; |
9b0cdefa | 411 | |
dc35a10f MW |
412 | struct mvneta_bm *bm_priv; |
413 | struct mvneta_bm_pool *pool_long; | |
414 | struct mvneta_bm_pool *pool_short; | |
415 | int bm_win_id; | |
416 | ||
9b0cdefa | 417 | u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; |
9a401dea GC |
418 | |
419 | u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; | |
c5aff182 TP |
420 | }; |
421 | ||
6a20c175 | 422 | /* The mvneta_tx_desc and mvneta_rx_desc structures describe the |
c5aff182 TP |
423 | * layout of the transmit and reception DMA descriptors, and their |
424 | * layout is therefore defined by the hardware design | |
425 | */ | |
6083ed44 | 426 | |
c5aff182 TP |
427 | #define MVNETA_TX_L3_OFF_SHIFT 0 |
428 | #define MVNETA_TX_IP_HLEN_SHIFT 8 | |
429 | #define MVNETA_TX_L4_UDP BIT(16) | |
430 | #define MVNETA_TX_L3_IP6 BIT(17) | |
431 | #define MVNETA_TXD_IP_CSUM BIT(18) | |
432 | #define MVNETA_TXD_Z_PAD BIT(19) | |
433 | #define MVNETA_TXD_L_DESC BIT(20) | |
434 | #define MVNETA_TXD_F_DESC BIT(21) | |
435 | #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ | |
436 | MVNETA_TXD_L_DESC | \ | |
437 | MVNETA_TXD_F_DESC) | |
438 | #define MVNETA_TX_L4_CSUM_FULL BIT(30) | |
439 | #define MVNETA_TX_L4_CSUM_NOT BIT(31) | |
440 | ||
c5aff182 | 441 | #define MVNETA_RXD_ERR_CRC 0x0 |
dc35a10f MW |
442 | #define MVNETA_RXD_BM_POOL_SHIFT 13 |
443 | #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) | |
c5aff182 TP |
444 | #define MVNETA_RXD_ERR_SUMMARY BIT(16) |
445 | #define MVNETA_RXD_ERR_OVERRUN BIT(17) | |
446 | #define MVNETA_RXD_ERR_LEN BIT(18) | |
447 | #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) | |
448 | #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) | |
449 | #define MVNETA_RXD_L3_IP4 BIT(25) | |
450 | #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) | |
451 | #define MVNETA_RXD_L4_CSUM_OK BIT(30) | |
452 | ||
9ad8fef6 | 453 | #if defined(__LITTLE_ENDIAN) |
6083ed44 TP |
454 | struct mvneta_tx_desc { |
455 | u32 command; /* Options used by HW for packet transmitting.*/ | |
456 | u16 reserverd1; /* csum_l4 (for future use) */ | |
457 | u16 data_size; /* Data size of transmitted packet in bytes */ | |
458 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ | |
459 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ | |
460 | u32 reserved3[4]; /* Reserved - (for future use) */ | |
461 | }; | |
462 | ||
463 | struct mvneta_rx_desc { | |
464 | u32 status; /* Info about received packet */ | |
c5aff182 TP |
465 | u16 reserved1; /* pnc_info - (for future use, PnC) */ |
466 | u16 data_size; /* Size of received packet in bytes */ | |
6083ed44 | 467 | |
c5aff182 TP |
468 | u32 buf_phys_addr; /* Physical address of the buffer */ |
469 | u32 reserved2; /* pnc_flow_id (for future use, PnC) */ | |
6083ed44 | 470 | |
c5aff182 TP |
471 | u32 buf_cookie; /* cookie for access to RX buffer in rx path */ |
472 | u16 reserved3; /* prefetch_cmd, for future use */ | |
473 | u16 reserved4; /* csum_l4 - (for future use, PnC) */ | |
6083ed44 | 474 | |
c5aff182 TP |
475 | u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ |
476 | u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ | |
477 | }; | |
9ad8fef6 TP |
478 | #else |
479 | struct mvneta_tx_desc { | |
480 | u16 data_size; /* Data size of transmitted packet in bytes */ | |
481 | u16 reserverd1; /* csum_l4 (for future use) */ | |
482 | u32 command; /* Options used by HW for packet transmitting.*/ | |
483 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ | |
484 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ | |
485 | u32 reserved3[4]; /* Reserved - (for future use) */ | |
486 | }; | |
487 | ||
488 | struct mvneta_rx_desc { | |
489 | u16 data_size; /* Size of received packet in bytes */ | |
490 | u16 reserved1; /* pnc_info - (for future use, PnC) */ | |
491 | u32 status; /* Info about received packet */ | |
492 | ||
493 | u32 reserved2; /* pnc_flow_id (for future use, PnC) */ | |
494 | u32 buf_phys_addr; /* Physical address of the buffer */ | |
495 | ||
496 | u16 reserved4; /* csum_l4 - (for future use, PnC) */ | |
497 | u16 reserved3; /* prefetch_cmd, for future use */ | |
498 | u32 buf_cookie; /* cookie for access to RX buffer in rx path */ | |
499 | ||
500 | u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ | |
501 | u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ | |
502 | }; | |
503 | #endif | |
c5aff182 TP |
504 | |
505 | struct mvneta_tx_queue { | |
506 | /* Number of this TX queue, in the range 0-7 */ | |
507 | u8 id; | |
508 | ||
509 | /* Number of TX DMA descriptors in the descriptor ring */ | |
510 | int size; | |
511 | ||
512 | /* Number of currently used TX DMA descriptor in the | |
6a20c175 TP |
513 | * descriptor ring |
514 | */ | |
c5aff182 | 515 | int count; |
8eef5f97 EG |
516 | int tx_stop_threshold; |
517 | int tx_wake_threshold; | |
c5aff182 TP |
518 | |
519 | /* Array of transmitted skb */ | |
520 | struct sk_buff **tx_skb; | |
521 | ||
522 | /* Index of last TX DMA descriptor that was inserted */ | |
523 | int txq_put_index; | |
524 | ||
525 | /* Index of the TX DMA descriptor to be cleaned up */ | |
526 | int txq_get_index; | |
527 | ||
528 | u32 done_pkts_coal; | |
529 | ||
530 | /* Virtual address of the TX DMA descriptors array */ | |
531 | struct mvneta_tx_desc *descs; | |
532 | ||
533 | /* DMA address of the TX DMA descriptors array */ | |
534 | dma_addr_t descs_phys; | |
535 | ||
536 | /* Index of the last TX DMA descriptor */ | |
537 | int last_desc; | |
538 | ||
539 | /* Index of the next TX DMA descriptor to process */ | |
540 | int next_desc_to_proc; | |
2adb719d EG |
541 | |
542 | /* DMA buffers for TSO headers */ | |
543 | char *tso_hdrs; | |
544 | ||
545 | /* DMA address of TSO headers */ | |
546 | dma_addr_t tso_hdrs_phys; | |
50bf8cb6 GC |
547 | |
548 | /* Affinity mask for CPUs*/ | |
549 | cpumask_t affinity_mask; | |
c5aff182 TP |
550 | }; |
551 | ||
552 | struct mvneta_rx_queue { | |
553 | /* rx queue number, in the range 0-7 */ | |
554 | u8 id; | |
555 | ||
556 | /* num of rx descriptors in the rx descriptor ring */ | |
557 | int size; | |
558 | ||
559 | /* counter of times when mvneta_refill() failed */ | |
560 | int missed; | |
561 | ||
562 | u32 pkts_coal; | |
563 | u32 time_coal; | |
564 | ||
565 | /* Virtual address of the RX DMA descriptors array */ | |
566 | struct mvneta_rx_desc *descs; | |
567 | ||
568 | /* DMA address of the RX DMA descriptors array */ | |
569 | dma_addr_t descs_phys; | |
570 | ||
571 | /* Index of the last RX DMA descriptor */ | |
572 | int last_desc; | |
573 | ||
574 | /* Index of the next RX DMA descriptor to process */ | |
575 | int next_desc_to_proc; | |
576 | }; | |
577 | ||
edadb7fa EG |
578 | /* The hardware supports eight (8) rx queues, but we are only allowing |
579 | * the first one to be used. Therefore, let's just allocate one queue. | |
580 | */ | |
d8936657 | 581 | static int rxq_number = 8; |
c5aff182 TP |
582 | static int txq_number = 8; |
583 | ||
584 | static int rxq_def; | |
c5aff182 | 585 | |
f19fadfc | 586 | static int rx_copybreak __read_mostly = 256; |
587 | ||
dc35a10f MW |
588 | /* HW BM need that each port be identify by a unique ID */ |
589 | static int global_port_id; | |
590 | ||
c5aff182 TP |
591 | #define MVNETA_DRIVER_NAME "mvneta" |
592 | #define MVNETA_DRIVER_VERSION "1.0" | |
593 | ||
594 | /* Utility/helper methods */ | |
595 | ||
596 | /* Write helper method */ | |
597 | static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) | |
598 | { | |
599 | writel(data, pp->base + offset); | |
600 | } | |
601 | ||
602 | /* Read helper method */ | |
603 | static u32 mvreg_read(struct mvneta_port *pp, u32 offset) | |
604 | { | |
605 | return readl(pp->base + offset); | |
606 | } | |
607 | ||
608 | /* Increment txq get counter */ | |
609 | static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) | |
610 | { | |
611 | txq->txq_get_index++; | |
612 | if (txq->txq_get_index == txq->size) | |
613 | txq->txq_get_index = 0; | |
614 | } | |
615 | ||
616 | /* Increment txq put counter */ | |
617 | static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) | |
618 | { | |
619 | txq->txq_put_index++; | |
620 | if (txq->txq_put_index == txq->size) | |
621 | txq->txq_put_index = 0; | |
622 | } | |
623 | ||
624 | ||
625 | /* Clear all MIB counters */ | |
626 | static void mvneta_mib_counters_clear(struct mvneta_port *pp) | |
627 | { | |
628 | int i; | |
629 | u32 dummy; | |
630 | ||
631 | /* Perform dummy reads from MIB counters */ | |
632 | for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) | |
633 | dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); | |
e483911f AL |
634 | dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); |
635 | dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); | |
c5aff182 TP |
636 | } |
637 | ||
638 | /* Get System Network Statistics */ | |
639 | struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev, | |
640 | struct rtnl_link_stats64 *stats) | |
641 | { | |
642 | struct mvneta_port *pp = netdev_priv(dev); | |
643 | unsigned int start; | |
74c41b04 | 644 | int cpu; |
c5aff182 | 645 | |
74c41b04 | 646 | for_each_possible_cpu(cpu) { |
647 | struct mvneta_pcpu_stats *cpu_stats; | |
648 | u64 rx_packets; | |
649 | u64 rx_bytes; | |
650 | u64 tx_packets; | |
651 | u64 tx_bytes; | |
c5aff182 | 652 | |
74c41b04 | 653 | cpu_stats = per_cpu_ptr(pp->stats, cpu); |
654 | do { | |
57a7744e | 655 | start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); |
74c41b04 | 656 | rx_packets = cpu_stats->rx_packets; |
657 | rx_bytes = cpu_stats->rx_bytes; | |
658 | tx_packets = cpu_stats->tx_packets; | |
659 | tx_bytes = cpu_stats->tx_bytes; | |
57a7744e | 660 | } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); |
c5aff182 | 661 | |
74c41b04 | 662 | stats->rx_packets += rx_packets; |
663 | stats->rx_bytes += rx_bytes; | |
664 | stats->tx_packets += tx_packets; | |
665 | stats->tx_bytes += tx_bytes; | |
666 | } | |
c5aff182 TP |
667 | |
668 | stats->rx_errors = dev->stats.rx_errors; | |
669 | stats->rx_dropped = dev->stats.rx_dropped; | |
670 | ||
671 | stats->tx_dropped = dev->stats.tx_dropped; | |
672 | ||
673 | return stats; | |
674 | } | |
675 | ||
676 | /* Rx descriptors helper methods */ | |
677 | ||
5428213c | 678 | /* Checks whether the RX descriptor having this status is both the first |
679 | * and the last descriptor for the RX packet. Each RX packet is currently | |
c5aff182 TP |
680 | * received through a single RX descriptor, so not having each RX |
681 | * descriptor with its first and last bits set is an error | |
682 | */ | |
5428213c | 683 | static int mvneta_rxq_desc_is_first_last(u32 status) |
c5aff182 | 684 | { |
5428213c | 685 | return (status & MVNETA_RXD_FIRST_LAST_DESC) == |
c5aff182 TP |
686 | MVNETA_RXD_FIRST_LAST_DESC; |
687 | } | |
688 | ||
689 | /* Add number of descriptors ready to receive new packets */ | |
690 | static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, | |
691 | struct mvneta_rx_queue *rxq, | |
692 | int ndescs) | |
693 | { | |
694 | /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can | |
6a20c175 TP |
695 | * be added at once |
696 | */ | |
c5aff182 TP |
697 | while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { |
698 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), | |
699 | (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << | |
700 | MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); | |
701 | ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; | |
702 | } | |
703 | ||
704 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), | |
705 | (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); | |
706 | } | |
707 | ||
708 | /* Get number of RX descriptors occupied by received packets */ | |
709 | static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, | |
710 | struct mvneta_rx_queue *rxq) | |
711 | { | |
712 | u32 val; | |
713 | ||
714 | val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); | |
715 | return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; | |
716 | } | |
717 | ||
6a20c175 | 718 | /* Update num of rx desc called upon return from rx path or |
c5aff182 TP |
719 | * from mvneta_rxq_drop_pkts(). |
720 | */ | |
721 | static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, | |
722 | struct mvneta_rx_queue *rxq, | |
723 | int rx_done, int rx_filled) | |
724 | { | |
725 | u32 val; | |
726 | ||
727 | if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { | |
728 | val = rx_done | | |
729 | (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); | |
730 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); | |
731 | return; | |
732 | } | |
733 | ||
734 | /* Only 255 descriptors can be added at once */ | |
735 | while ((rx_done > 0) || (rx_filled > 0)) { | |
736 | if (rx_done <= 0xff) { | |
737 | val = rx_done; | |
738 | rx_done = 0; | |
739 | } else { | |
740 | val = 0xff; | |
741 | rx_done -= 0xff; | |
742 | } | |
743 | if (rx_filled <= 0xff) { | |
744 | val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; | |
745 | rx_filled = 0; | |
746 | } else { | |
747 | val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; | |
748 | rx_filled -= 0xff; | |
749 | } | |
750 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); | |
751 | } | |
752 | } | |
753 | ||
754 | /* Get pointer to next RX descriptor to be processed by SW */ | |
755 | static struct mvneta_rx_desc * | |
756 | mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) | |
757 | { | |
758 | int rx_desc = rxq->next_desc_to_proc; | |
759 | ||
760 | rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); | |
34e4179d | 761 | prefetch(rxq->descs + rxq->next_desc_to_proc); |
c5aff182 TP |
762 | return rxq->descs + rx_desc; |
763 | } | |
764 | ||
765 | /* Change maximum receive size of the port. */ | |
766 | static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) | |
767 | { | |
768 | u32 val; | |
769 | ||
770 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); | |
771 | val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; | |
772 | val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << | |
773 | MVNETA_GMAC_MAX_RX_SIZE_SHIFT; | |
774 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); | |
775 | } | |
776 | ||
777 | ||
778 | /* Set rx queue offset */ | |
779 | static void mvneta_rxq_offset_set(struct mvneta_port *pp, | |
780 | struct mvneta_rx_queue *rxq, | |
781 | int offset) | |
782 | { | |
783 | u32 val; | |
784 | ||
785 | val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); | |
786 | val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; | |
787 | ||
788 | /* Offset is in */ | |
789 | val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); | |
790 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); | |
791 | } | |
792 | ||
793 | ||
794 | /* Tx descriptors helper methods */ | |
795 | ||
796 | /* Update HW with number of TX descriptors to be sent */ | |
797 | static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, | |
798 | struct mvneta_tx_queue *txq, | |
799 | int pend_desc) | |
800 | { | |
801 | u32 val; | |
802 | ||
803 | /* Only 255 descriptors can be added at once ; Assume caller | |
6a20c175 TP |
804 | * process TX desriptors in quanta less than 256 |
805 | */ | |
c5aff182 TP |
806 | val = pend_desc; |
807 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
808 | } | |
809 | ||
810 | /* Get pointer to next TX descriptor to be processed (send) by HW */ | |
811 | static struct mvneta_tx_desc * | |
812 | mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) | |
813 | { | |
814 | int tx_desc = txq->next_desc_to_proc; | |
815 | ||
816 | txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); | |
817 | return txq->descs + tx_desc; | |
818 | } | |
819 | ||
820 | /* Release the last allocated TX descriptor. Useful to handle DMA | |
6a20c175 TP |
821 | * mapping failures in the TX path. |
822 | */ | |
c5aff182 TP |
823 | static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) |
824 | { | |
825 | if (txq->next_desc_to_proc == 0) | |
826 | txq->next_desc_to_proc = txq->last_desc - 1; | |
827 | else | |
828 | txq->next_desc_to_proc--; | |
829 | } | |
830 | ||
831 | /* Set rxq buf size */ | |
832 | static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, | |
833 | struct mvneta_rx_queue *rxq, | |
834 | int buf_size) | |
835 | { | |
836 | u32 val; | |
837 | ||
838 | val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); | |
839 | ||
840 | val &= ~MVNETA_RXQ_BUF_SIZE_MASK; | |
841 | val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); | |
842 | ||
843 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); | |
844 | } | |
845 | ||
846 | /* Disable buffer management (BM) */ | |
847 | static void mvneta_rxq_bm_disable(struct mvneta_port *pp, | |
848 | struct mvneta_rx_queue *rxq) | |
849 | { | |
850 | u32 val; | |
851 | ||
852 | val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); | |
853 | val &= ~MVNETA_RXQ_HW_BUF_ALLOC; | |
854 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); | |
855 | } | |
856 | ||
dc35a10f MW |
857 | /* Enable buffer management (BM) */ |
858 | static void mvneta_rxq_bm_enable(struct mvneta_port *pp, | |
859 | struct mvneta_rx_queue *rxq) | |
860 | { | |
861 | u32 val; | |
862 | ||
863 | val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); | |
864 | val |= MVNETA_RXQ_HW_BUF_ALLOC; | |
865 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); | |
866 | } | |
867 | ||
868 | /* Notify HW about port's assignment of pool for bigger packets */ | |
869 | static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, | |
870 | struct mvneta_rx_queue *rxq) | |
871 | { | |
872 | u32 val; | |
873 | ||
874 | val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); | |
875 | val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; | |
876 | val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); | |
877 | ||
878 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); | |
879 | } | |
880 | ||
881 | /* Notify HW about port's assignment of pool for smaller packets */ | |
882 | static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, | |
883 | struct mvneta_rx_queue *rxq) | |
884 | { | |
885 | u32 val; | |
886 | ||
887 | val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); | |
888 | val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; | |
889 | val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); | |
890 | ||
891 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); | |
892 | } | |
893 | ||
894 | /* Set port's receive buffer size for assigned BM pool */ | |
895 | static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, | |
896 | int buf_size, | |
897 | u8 pool_id) | |
898 | { | |
899 | u32 val; | |
900 | ||
901 | if (!IS_ALIGNED(buf_size, 8)) { | |
902 | dev_warn(pp->dev->dev.parent, | |
903 | "illegal buf_size value %d, round to %d\n", | |
904 | buf_size, ALIGN(buf_size, 8)); | |
905 | buf_size = ALIGN(buf_size, 8); | |
906 | } | |
907 | ||
908 | val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); | |
909 | val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; | |
910 | mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); | |
911 | } | |
912 | ||
913 | /* Configure MBUS window in order to enable access BM internal SRAM */ | |
914 | static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, | |
915 | u8 target, u8 attr) | |
916 | { | |
917 | u32 win_enable, win_protect; | |
918 | int i; | |
919 | ||
920 | win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); | |
921 | ||
922 | if (pp->bm_win_id < 0) { | |
923 | /* Find first not occupied window */ | |
924 | for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { | |
925 | if (win_enable & (1 << i)) { | |
926 | pp->bm_win_id = i; | |
927 | break; | |
928 | } | |
929 | } | |
930 | if (i == MVNETA_MAX_DECODE_WIN) | |
931 | return -ENOMEM; | |
932 | } else { | |
933 | i = pp->bm_win_id; | |
934 | } | |
935 | ||
936 | mvreg_write(pp, MVNETA_WIN_BASE(i), 0); | |
937 | mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); | |
938 | ||
939 | if (i < 4) | |
940 | mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); | |
941 | ||
942 | mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | | |
943 | (attr << 8) | target); | |
944 | ||
945 | mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); | |
946 | ||
947 | win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); | |
948 | win_protect |= 3 << (2 * i); | |
949 | mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); | |
950 | ||
951 | win_enable &= ~(1 << i); | |
952 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); | |
953 | ||
954 | return 0; | |
955 | } | |
956 | ||
957 | /* Assign and initialize pools for port. In case of fail | |
958 | * buffer manager will remain disabled for current port. | |
959 | */ | |
960 | static int mvneta_bm_port_init(struct platform_device *pdev, | |
961 | struct mvneta_port *pp) | |
962 | { | |
963 | struct device_node *dn = pdev->dev.of_node; | |
964 | u32 long_pool_id, short_pool_id, wsize; | |
965 | u8 target, attr; | |
966 | int err; | |
967 | ||
968 | /* Get BM window information */ | |
969 | err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, | |
970 | &target, &attr); | |
971 | if (err < 0) | |
972 | return err; | |
973 | ||
974 | pp->bm_win_id = -1; | |
975 | ||
976 | /* Open NETA -> BM window */ | |
977 | err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, | |
978 | target, attr); | |
979 | if (err < 0) { | |
980 | netdev_info(pp->dev, "fail to configure mbus window to BM\n"); | |
981 | return err; | |
982 | } | |
983 | ||
984 | if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { | |
985 | netdev_info(pp->dev, "missing long pool id\n"); | |
986 | return -EINVAL; | |
987 | } | |
988 | ||
989 | /* Create port's long pool depending on mtu */ | |
990 | pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, | |
991 | MVNETA_BM_LONG, pp->id, | |
992 | MVNETA_RX_PKT_SIZE(pp->dev->mtu)); | |
993 | if (!pp->pool_long) { | |
994 | netdev_info(pp->dev, "fail to obtain long pool for port\n"); | |
995 | return -ENOMEM; | |
996 | } | |
997 | ||
998 | pp->pool_long->port_map |= 1 << pp->id; | |
999 | ||
1000 | mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, | |
1001 | pp->pool_long->id); | |
1002 | ||
1003 | /* If short pool id is not defined, assume using single pool */ | |
1004 | if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) | |
1005 | short_pool_id = long_pool_id; | |
1006 | ||
1007 | /* Create port's short pool */ | |
1008 | pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, | |
1009 | MVNETA_BM_SHORT, pp->id, | |
1010 | MVNETA_BM_SHORT_PKT_SIZE); | |
1011 | if (!pp->pool_short) { | |
1012 | netdev_info(pp->dev, "fail to obtain short pool for port\n"); | |
1013 | mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); | |
1014 | return -ENOMEM; | |
1015 | } | |
1016 | ||
1017 | if (short_pool_id != long_pool_id) { | |
1018 | pp->pool_short->port_map |= 1 << pp->id; | |
1019 | mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, | |
1020 | pp->pool_short->id); | |
1021 | } | |
1022 | ||
1023 | return 0; | |
1024 | } | |
1025 | ||
1026 | /* Update settings of a pool for bigger packets */ | |
1027 | static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) | |
1028 | { | |
1029 | struct mvneta_bm_pool *bm_pool = pp->pool_long; | |
baa11ebc | 1030 | struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; |
dc35a10f MW |
1031 | int num; |
1032 | ||
1033 | /* Release all buffers from long pool */ | |
1034 | mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); | |
baa11ebc | 1035 | if (hwbm_pool->buf_num) { |
dc35a10f MW |
1036 | WARN(1, "cannot free all buffers in pool %d\n", |
1037 | bm_pool->id); | |
1038 | goto bm_mtu_err; | |
1039 | } | |
1040 | ||
1041 | bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); | |
1042 | bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); | |
baa11ebc GC |
1043 | hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + |
1044 | SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); | |
dc35a10f MW |
1045 | |
1046 | /* Fill entire long pool */ | |
baa11ebc GC |
1047 | num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); |
1048 | if (num != hwbm_pool->size) { | |
dc35a10f | 1049 | WARN(1, "pool %d: %d of %d allocated\n", |
baa11ebc | 1050 | bm_pool->id, num, hwbm_pool->size); |
dc35a10f MW |
1051 | goto bm_mtu_err; |
1052 | } | |
1053 | mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); | |
1054 | ||
1055 | return; | |
1056 | ||
1057 | bm_mtu_err: | |
1058 | mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); | |
1059 | mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); | |
1060 | ||
1061 | pp->bm_priv = NULL; | |
1062 | mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); | |
1063 | netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); | |
1064 | } | |
1065 | ||
c5aff182 TP |
1066 | /* Start the Ethernet port RX and TX activity */ |
1067 | static void mvneta_port_up(struct mvneta_port *pp) | |
1068 | { | |
1069 | int queue; | |
1070 | u32 q_map; | |
1071 | ||
1072 | /* Enable all initialized TXs. */ | |
c5aff182 TP |
1073 | q_map = 0; |
1074 | for (queue = 0; queue < txq_number; queue++) { | |
1075 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; | |
1076 | if (txq->descs != NULL) | |
1077 | q_map |= (1 << queue); | |
1078 | } | |
1079 | mvreg_write(pp, MVNETA_TXQ_CMD, q_map); | |
1080 | ||
1081 | /* Enable all initialized RXQs. */ | |
2dcf75e2 GC |
1082 | for (queue = 0; queue < rxq_number; queue++) { |
1083 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; | |
1084 | ||
1085 | if (rxq->descs != NULL) | |
1086 | q_map |= (1 << queue); | |
1087 | } | |
1088 | mvreg_write(pp, MVNETA_RXQ_CMD, q_map); | |
c5aff182 TP |
1089 | } |
1090 | ||
1091 | /* Stop the Ethernet port activity */ | |
1092 | static void mvneta_port_down(struct mvneta_port *pp) | |
1093 | { | |
1094 | u32 val; | |
1095 | int count; | |
1096 | ||
1097 | /* Stop Rx port activity. Check port Rx activity. */ | |
1098 | val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; | |
1099 | ||
1100 | /* Issue stop command for active channels only */ | |
1101 | if (val != 0) | |
1102 | mvreg_write(pp, MVNETA_RXQ_CMD, | |
1103 | val << MVNETA_RXQ_DISABLE_SHIFT); | |
1104 | ||
1105 | /* Wait for all Rx activity to terminate. */ | |
1106 | count = 0; | |
1107 | do { | |
1108 | if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { | |
1109 | netdev_warn(pp->dev, | |
0838abb3 | 1110 | "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", |
c5aff182 TP |
1111 | val); |
1112 | break; | |
1113 | } | |
1114 | mdelay(1); | |
1115 | ||
1116 | val = mvreg_read(pp, MVNETA_RXQ_CMD); | |
a3703fb3 | 1117 | } while (val & MVNETA_RXQ_ENABLE_MASK); |
c5aff182 TP |
1118 | |
1119 | /* Stop Tx port activity. Check port Tx activity. Issue stop | |
6a20c175 TP |
1120 | * command for active channels only |
1121 | */ | |
c5aff182 TP |
1122 | val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; |
1123 | ||
1124 | if (val != 0) | |
1125 | mvreg_write(pp, MVNETA_TXQ_CMD, | |
1126 | (val << MVNETA_TXQ_DISABLE_SHIFT)); | |
1127 | ||
1128 | /* Wait for all Tx activity to terminate. */ | |
1129 | count = 0; | |
1130 | do { | |
1131 | if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { | |
1132 | netdev_warn(pp->dev, | |
1133 | "TIMEOUT for TX stopped status=0x%08x\n", | |
1134 | val); | |
1135 | break; | |
1136 | } | |
1137 | mdelay(1); | |
1138 | ||
1139 | /* Check TX Command reg that all Txqs are stopped */ | |
1140 | val = mvreg_read(pp, MVNETA_TXQ_CMD); | |
1141 | ||
a3703fb3 | 1142 | } while (val & MVNETA_TXQ_ENABLE_MASK); |
c5aff182 TP |
1143 | |
1144 | /* Double check to verify that TX FIFO is empty */ | |
1145 | count = 0; | |
1146 | do { | |
1147 | if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { | |
1148 | netdev_warn(pp->dev, | |
0838abb3 | 1149 | "TX FIFO empty timeout status=0x%08x\n", |
c5aff182 TP |
1150 | val); |
1151 | break; | |
1152 | } | |
1153 | mdelay(1); | |
1154 | ||
1155 | val = mvreg_read(pp, MVNETA_PORT_STATUS); | |
1156 | } while (!(val & MVNETA_TX_FIFO_EMPTY) && | |
1157 | (val & MVNETA_TX_IN_PRGRS)); | |
1158 | ||
1159 | udelay(200); | |
1160 | } | |
1161 | ||
1162 | /* Enable the port by setting the port enable bit of the MAC control register */ | |
1163 | static void mvneta_port_enable(struct mvneta_port *pp) | |
1164 | { | |
1165 | u32 val; | |
1166 | ||
1167 | /* Enable port */ | |
1168 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); | |
1169 | val |= MVNETA_GMAC0_PORT_ENABLE; | |
1170 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); | |
1171 | } | |
1172 | ||
1173 | /* Disable the port and wait for about 200 usec before retuning */ | |
1174 | static void mvneta_port_disable(struct mvneta_port *pp) | |
1175 | { | |
1176 | u32 val; | |
1177 | ||
1178 | /* Reset the Enable bit in the Serial Control Register */ | |
1179 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); | |
1180 | val &= ~MVNETA_GMAC0_PORT_ENABLE; | |
1181 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); | |
1182 | ||
1183 | udelay(200); | |
1184 | } | |
1185 | ||
1186 | /* Multicast tables methods */ | |
1187 | ||
1188 | /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ | |
1189 | static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) | |
1190 | { | |
1191 | int offset; | |
1192 | u32 val; | |
1193 | ||
1194 | if (queue == -1) { | |
1195 | val = 0; | |
1196 | } else { | |
1197 | val = 0x1 | (queue << 1); | |
1198 | val |= (val << 24) | (val << 16) | (val << 8); | |
1199 | } | |
1200 | ||
1201 | for (offset = 0; offset <= 0xc; offset += 4) | |
1202 | mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); | |
1203 | } | |
1204 | ||
1205 | /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ | |
1206 | static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) | |
1207 | { | |
1208 | int offset; | |
1209 | u32 val; | |
1210 | ||
1211 | if (queue == -1) { | |
1212 | val = 0; | |
1213 | } else { | |
1214 | val = 0x1 | (queue << 1); | |
1215 | val |= (val << 24) | (val << 16) | (val << 8); | |
1216 | } | |
1217 | ||
1218 | for (offset = 0; offset <= 0xfc; offset += 4) | |
1219 | mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); | |
1220 | ||
1221 | } | |
1222 | ||
1223 | /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ | |
1224 | static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) | |
1225 | { | |
1226 | int offset; | |
1227 | u32 val; | |
1228 | ||
1229 | if (queue == -1) { | |
1230 | memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); | |
1231 | val = 0; | |
1232 | } else { | |
1233 | memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); | |
1234 | val = 0x1 | (queue << 1); | |
1235 | val |= (val << 24) | (val << 16) | (val << 8); | |
1236 | } | |
1237 | ||
1238 | for (offset = 0; offset <= 0xfc; offset += 4) | |
1239 | mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); | |
1240 | } | |
1241 | ||
0c0744fc SS |
1242 | static void mvneta_set_autoneg(struct mvneta_port *pp, int enable) |
1243 | { | |
1244 | u32 val; | |
1245 | ||
1246 | if (enable) { | |
1247 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); | |
1248 | val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | | |
1249 | MVNETA_GMAC_FORCE_LINK_DOWN | | |
1250 | MVNETA_GMAC_AN_FLOW_CTRL_EN); | |
1251 | val |= MVNETA_GMAC_INBAND_AN_ENABLE | | |
1252 | MVNETA_GMAC_AN_SPEED_EN | | |
1253 | MVNETA_GMAC_AN_DUPLEX_EN; | |
1254 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | |
1255 | ||
1256 | val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); | |
1257 | val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; | |
1258 | mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); | |
1259 | ||
1260 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | |
1261 | val |= MVNETA_GMAC2_INBAND_AN_ENABLE; | |
1262 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | |
1263 | } else { | |
1264 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); | |
1265 | val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | | |
1266 | MVNETA_GMAC_AN_SPEED_EN | | |
1267 | MVNETA_GMAC_AN_DUPLEX_EN); | |
1268 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | |
1269 | ||
1270 | val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); | |
1271 | val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; | |
1272 | mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); | |
1273 | ||
1274 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | |
1275 | val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE; | |
1276 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | |
1277 | } | |
1278 | } | |
1279 | ||
db488c10 GC |
1280 | static void mvneta_percpu_unmask_interrupt(void *arg) |
1281 | { | |
1282 | struct mvneta_port *pp = arg; | |
1283 | ||
1284 | /* All the queue are unmasked, but actually only the ones | |
1285 | * mapped to this CPU will be unmasked | |
1286 | */ | |
1287 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, | |
1288 | MVNETA_RX_INTR_MASK_ALL | | |
1289 | MVNETA_TX_INTR_MASK_ALL | | |
1290 | MVNETA_MISCINTR_INTR_MASK); | |
1291 | } | |
1292 | ||
1293 | static void mvneta_percpu_mask_interrupt(void *arg) | |
1294 | { | |
1295 | struct mvneta_port *pp = arg; | |
1296 | ||
1297 | /* All the queue are masked, but actually only the ones | |
1298 | * mapped to this CPU will be masked | |
1299 | */ | |
1300 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); | |
1301 | mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); | |
1302 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); | |
1303 | } | |
1304 | ||
1305 | static void mvneta_percpu_clear_intr_cause(void *arg) | |
1306 | { | |
1307 | struct mvneta_port *pp = arg; | |
1308 | ||
1309 | /* All the queue are cleared, but actually only the ones | |
1310 | * mapped to this CPU will be cleared | |
1311 | */ | |
1312 | mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); | |
1313 | mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); | |
1314 | mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); | |
1315 | } | |
1316 | ||
c5aff182 TP |
1317 | /* This method sets defaults to the NETA port: |
1318 | * Clears interrupt Cause and Mask registers. | |
1319 | * Clears all MAC tables. | |
1320 | * Sets defaults to all registers. | |
1321 | * Resets RX and TX descriptor rings. | |
1322 | * Resets PHY. | |
1323 | * This method can be called after mvneta_port_down() to return the port | |
1324 | * settings to defaults. | |
1325 | */ | |
1326 | static void mvneta_defaults_set(struct mvneta_port *pp) | |
1327 | { | |
1328 | int cpu; | |
1329 | int queue; | |
1330 | u32 val; | |
2dcf75e2 | 1331 | int max_cpu = num_present_cpus(); |
c5aff182 TP |
1332 | |
1333 | /* Clear all Cause registers */ | |
db488c10 | 1334 | on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); |
c5aff182 TP |
1335 | |
1336 | /* Mask all interrupts */ | |
db488c10 | 1337 | on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); |
c5aff182 TP |
1338 | mvreg_write(pp, MVNETA_INTR_ENABLE, 0); |
1339 | ||
1340 | /* Enable MBUS Retry bit16 */ | |
1341 | mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); | |
1342 | ||
50bf8cb6 GC |
1343 | /* Set CPU queue access map. CPUs are assigned to the RX and |
1344 | * TX queues modulo their number. If there is only one TX | |
1345 | * queue then it is assigned to the CPU associated to the | |
1346 | * default RX queue. | |
6a20c175 | 1347 | */ |
2dcf75e2 GC |
1348 | for_each_present_cpu(cpu) { |
1349 | int rxq_map = 0, txq_map = 0; | |
50bf8cb6 | 1350 | int rxq, txq; |
2dcf75e2 GC |
1351 | |
1352 | for (rxq = 0; rxq < rxq_number; rxq++) | |
1353 | if ((rxq % max_cpu) == cpu) | |
1354 | rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); | |
1355 | ||
50bf8cb6 GC |
1356 | for (txq = 0; txq < txq_number; txq++) |
1357 | if ((txq % max_cpu) == cpu) | |
1358 | txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); | |
1359 | ||
1360 | /* With only one TX queue we configure a special case | |
1361 | * which will allow to get all the irq on a single | |
1362 | * CPU | |
1363 | */ | |
1364 | if (txq_number == 1) | |
1365 | txq_map = (cpu == pp->rxq_def) ? | |
1366 | MVNETA_CPU_TXQ_ACCESS(1) : 0; | |
2dcf75e2 GC |
1367 | |
1368 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); | |
1369 | } | |
c5aff182 TP |
1370 | |
1371 | /* Reset RX and TX DMAs */ | |
1372 | mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); | |
1373 | mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); | |
1374 | ||
1375 | /* Disable Legacy WRR, Disable EJP, Release from reset */ | |
1376 | mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); | |
1377 | for (queue = 0; queue < txq_number; queue++) { | |
1378 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); | |
1379 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); | |
1380 | } | |
1381 | ||
1382 | mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); | |
1383 | mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); | |
1384 | ||
1385 | /* Set Port Acceleration Mode */ | |
dc35a10f MW |
1386 | if (pp->bm_priv) |
1387 | /* HW buffer management + legacy parser */ | |
1388 | val = MVNETA_ACC_MODE_EXT2; | |
1389 | else | |
1390 | /* SW buffer management + legacy parser */ | |
1391 | val = MVNETA_ACC_MODE_EXT1; | |
c5aff182 TP |
1392 | mvreg_write(pp, MVNETA_ACC_MODE, val); |
1393 | ||
dc35a10f MW |
1394 | if (pp->bm_priv) |
1395 | mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); | |
1396 | ||
c5aff182 | 1397 | /* Update val of portCfg register accordingly with all RxQueue types */ |
90b74c01 | 1398 | val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); |
c5aff182 TP |
1399 | mvreg_write(pp, MVNETA_PORT_CONFIG, val); |
1400 | ||
1401 | val = 0; | |
1402 | mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); | |
1403 | mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); | |
1404 | ||
1405 | /* Build PORT_SDMA_CONFIG_REG */ | |
1406 | val = 0; | |
1407 | ||
1408 | /* Default burst size */ | |
1409 | val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); | |
1410 | val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); | |
9ad8fef6 | 1411 | val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; |
c5aff182 | 1412 | |
9ad8fef6 TP |
1413 | #if defined(__BIG_ENDIAN) |
1414 | val |= MVNETA_DESC_SWAP; | |
1415 | #endif | |
c5aff182 TP |
1416 | |
1417 | /* Assign port SDMA configuration */ | |
1418 | mvreg_write(pp, MVNETA_SDMA_CONFIG, val); | |
1419 | ||
71408602 TP |
1420 | /* Disable PHY polling in hardware, since we're using the |
1421 | * kernel phylib to do this. | |
1422 | */ | |
1423 | val = mvreg_read(pp, MVNETA_UNIT_CONTROL); | |
1424 | val &= ~MVNETA_PHY_POLLING_ENABLE; | |
1425 | mvreg_write(pp, MVNETA_UNIT_CONTROL, val); | |
1426 | ||
0c0744fc | 1427 | mvneta_set_autoneg(pp, pp->use_inband_status); |
c5aff182 TP |
1428 | mvneta_set_ucast_table(pp, -1); |
1429 | mvneta_set_special_mcast_table(pp, -1); | |
1430 | mvneta_set_other_mcast_table(pp, -1); | |
1431 | ||
1432 | /* Set port interrupt enable register - default enable all */ | |
1433 | mvreg_write(pp, MVNETA_INTR_ENABLE, | |
1434 | (MVNETA_RXQ_INTR_ENABLE_ALL_MASK | |
1435 | | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); | |
e483911f AL |
1436 | |
1437 | mvneta_mib_counters_clear(pp); | |
c5aff182 TP |
1438 | } |
1439 | ||
1440 | /* Set max sizes for tx queues */ | |
1441 | static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) | |
1442 | ||
1443 | { | |
1444 | u32 val, size, mtu; | |
1445 | int queue; | |
1446 | ||
1447 | mtu = max_tx_size * 8; | |
1448 | if (mtu > MVNETA_TX_MTU_MAX) | |
1449 | mtu = MVNETA_TX_MTU_MAX; | |
1450 | ||
1451 | /* Set MTU */ | |
1452 | val = mvreg_read(pp, MVNETA_TX_MTU); | |
1453 | val &= ~MVNETA_TX_MTU_MAX; | |
1454 | val |= mtu; | |
1455 | mvreg_write(pp, MVNETA_TX_MTU, val); | |
1456 | ||
1457 | /* TX token size and all TXQs token size must be larger that MTU */ | |
1458 | val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); | |
1459 | ||
1460 | size = val & MVNETA_TX_TOKEN_SIZE_MAX; | |
1461 | if (size < mtu) { | |
1462 | size = mtu; | |
1463 | val &= ~MVNETA_TX_TOKEN_SIZE_MAX; | |
1464 | val |= size; | |
1465 | mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); | |
1466 | } | |
1467 | for (queue = 0; queue < txq_number; queue++) { | |
1468 | val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); | |
1469 | ||
1470 | size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; | |
1471 | if (size < mtu) { | |
1472 | size = mtu; | |
1473 | val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; | |
1474 | val |= size; | |
1475 | mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); | |
1476 | } | |
1477 | } | |
1478 | } | |
1479 | ||
1480 | /* Set unicast address */ | |
1481 | static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, | |
1482 | int queue) | |
1483 | { | |
1484 | unsigned int unicast_reg; | |
1485 | unsigned int tbl_offset; | |
1486 | unsigned int reg_offset; | |
1487 | ||
1488 | /* Locate the Unicast table entry */ | |
1489 | last_nibble = (0xf & last_nibble); | |
1490 | ||
1491 | /* offset from unicast tbl base */ | |
1492 | tbl_offset = (last_nibble / 4) * 4; | |
1493 | ||
1494 | /* offset within the above reg */ | |
1495 | reg_offset = last_nibble % 4; | |
1496 | ||
1497 | unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); | |
1498 | ||
1499 | if (queue == -1) { | |
1500 | /* Clear accepts frame bit at specified unicast DA tbl entry */ | |
1501 | unicast_reg &= ~(0xff << (8 * reg_offset)); | |
1502 | } else { | |
1503 | unicast_reg &= ~(0xff << (8 * reg_offset)); | |
1504 | unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); | |
1505 | } | |
1506 | ||
1507 | mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); | |
1508 | } | |
1509 | ||
1510 | /* Set mac address */ | |
1511 | static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, | |
1512 | int queue) | |
1513 | { | |
1514 | unsigned int mac_h; | |
1515 | unsigned int mac_l; | |
1516 | ||
1517 | if (queue != -1) { | |
1518 | mac_l = (addr[4] << 8) | (addr[5]); | |
1519 | mac_h = (addr[0] << 24) | (addr[1] << 16) | | |
1520 | (addr[2] << 8) | (addr[3] << 0); | |
1521 | ||
1522 | mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); | |
1523 | mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); | |
1524 | } | |
1525 | ||
1526 | /* Accept frames of this address */ | |
1527 | mvneta_set_ucast_addr(pp, addr[5], queue); | |
1528 | } | |
1529 | ||
6a20c175 TP |
1530 | /* Set the number of packets that will be received before RX interrupt |
1531 | * will be generated by HW. | |
c5aff182 TP |
1532 | */ |
1533 | static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, | |
1534 | struct mvneta_rx_queue *rxq, u32 value) | |
1535 | { | |
1536 | mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), | |
1537 | value | MVNETA_RXQ_NON_OCCUPIED(0)); | |
1538 | rxq->pkts_coal = value; | |
1539 | } | |
1540 | ||
6a20c175 TP |
1541 | /* Set the time delay in usec before RX interrupt will be generated by |
1542 | * HW. | |
c5aff182 TP |
1543 | */ |
1544 | static void mvneta_rx_time_coal_set(struct mvneta_port *pp, | |
1545 | struct mvneta_rx_queue *rxq, u32 value) | |
1546 | { | |
189dd626 TP |
1547 | u32 val; |
1548 | unsigned long clk_rate; | |
1549 | ||
1550 | clk_rate = clk_get_rate(pp->clk); | |
1551 | val = (clk_rate / 1000000) * value; | |
c5aff182 TP |
1552 | |
1553 | mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); | |
1554 | rxq->time_coal = value; | |
1555 | } | |
1556 | ||
1557 | /* Set threshold for TX_DONE pkts coalescing */ | |
1558 | static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, | |
1559 | struct mvneta_tx_queue *txq, u32 value) | |
1560 | { | |
1561 | u32 val; | |
1562 | ||
1563 | val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); | |
1564 | ||
1565 | val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; | |
1566 | val |= MVNETA_TXQ_SENT_THRESH_MASK(value); | |
1567 | ||
1568 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); | |
1569 | ||
1570 | txq->done_pkts_coal = value; | |
1571 | } | |
1572 | ||
c5aff182 TP |
1573 | /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ |
1574 | static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, | |
1575 | u32 phys_addr, u32 cookie) | |
1576 | { | |
1577 | rx_desc->buf_cookie = cookie; | |
1578 | rx_desc->buf_phys_addr = phys_addr; | |
1579 | } | |
1580 | ||
1581 | /* Decrement sent descriptors counter */ | |
1582 | static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, | |
1583 | struct mvneta_tx_queue *txq, | |
1584 | int sent_desc) | |
1585 | { | |
1586 | u32 val; | |
1587 | ||
1588 | /* Only 255 TX descriptors can be updated at once */ | |
1589 | while (sent_desc > 0xff) { | |
1590 | val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; | |
1591 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
1592 | sent_desc = sent_desc - 0xff; | |
1593 | } | |
1594 | ||
1595 | val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; | |
1596 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); | |
1597 | } | |
1598 | ||
1599 | /* Get number of TX descriptors already sent by HW */ | |
1600 | static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, | |
1601 | struct mvneta_tx_queue *txq) | |
1602 | { | |
1603 | u32 val; | |
1604 | int sent_desc; | |
1605 | ||
1606 | val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); | |
1607 | sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> | |
1608 | MVNETA_TXQ_SENT_DESC_SHIFT; | |
1609 | ||
1610 | return sent_desc; | |
1611 | } | |
1612 | ||
6a20c175 | 1613 | /* Get number of sent descriptors and decrement counter. |
c5aff182 TP |
1614 | * The number of sent descriptors is returned. |
1615 | */ | |
1616 | static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, | |
1617 | struct mvneta_tx_queue *txq) | |
1618 | { | |
1619 | int sent_desc; | |
1620 | ||
1621 | /* Get number of sent descriptors */ | |
1622 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); | |
1623 | ||
1624 | /* Decrement sent descriptors counter */ | |
1625 | if (sent_desc) | |
1626 | mvneta_txq_sent_desc_dec(pp, txq, sent_desc); | |
1627 | ||
1628 | return sent_desc; | |
1629 | } | |
1630 | ||
1631 | /* Set TXQ descriptors fields relevant for CSUM calculation */ | |
1632 | static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, | |
1633 | int ip_hdr_len, int l4_proto) | |
1634 | { | |
1635 | u32 command; | |
1636 | ||
1637 | /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, | |
6a20c175 TP |
1638 | * G_L4_chk, L4_type; required only for checksum |
1639 | * calculation | |
1640 | */ | |
c5aff182 TP |
1641 | command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; |
1642 | command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; | |
1643 | ||
0a198587 | 1644 | if (l3_proto == htons(ETH_P_IP)) |
c5aff182 TP |
1645 | command |= MVNETA_TXD_IP_CSUM; |
1646 | else | |
1647 | command |= MVNETA_TX_L3_IP6; | |
1648 | ||
1649 | if (l4_proto == IPPROTO_TCP) | |
1650 | command |= MVNETA_TX_L4_CSUM_FULL; | |
1651 | else if (l4_proto == IPPROTO_UDP) | |
1652 | command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; | |
1653 | else | |
1654 | command |= MVNETA_TX_L4_CSUM_NOT; | |
1655 | ||
1656 | return command; | |
1657 | } | |
1658 | ||
1659 | ||
1660 | /* Display more error info */ | |
1661 | static void mvneta_rx_error(struct mvneta_port *pp, | |
1662 | struct mvneta_rx_desc *rx_desc) | |
1663 | { | |
1664 | u32 status = rx_desc->status; | |
1665 | ||
5428213c | 1666 | if (!mvneta_rxq_desc_is_first_last(status)) { |
c5aff182 TP |
1667 | netdev_err(pp->dev, |
1668 | "bad rx status %08x (buffer oversize), size=%d\n", | |
5428213c | 1669 | status, rx_desc->data_size); |
c5aff182 TP |
1670 | return; |
1671 | } | |
1672 | ||
1673 | switch (status & MVNETA_RXD_ERR_CODE_MASK) { | |
1674 | case MVNETA_RXD_ERR_CRC: | |
1675 | netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", | |
1676 | status, rx_desc->data_size); | |
1677 | break; | |
1678 | case MVNETA_RXD_ERR_OVERRUN: | |
1679 | netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", | |
1680 | status, rx_desc->data_size); | |
1681 | break; | |
1682 | case MVNETA_RXD_ERR_LEN: | |
1683 | netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", | |
1684 | status, rx_desc->data_size); | |
1685 | break; | |
1686 | case MVNETA_RXD_ERR_RESOURCE: | |
1687 | netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", | |
1688 | status, rx_desc->data_size); | |
1689 | break; | |
1690 | } | |
1691 | } | |
1692 | ||
5428213c | 1693 | /* Handle RX checksum offload based on the descriptor's status */ |
1694 | static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, | |
c5aff182 TP |
1695 | struct sk_buff *skb) |
1696 | { | |
5428213c | 1697 | if ((status & MVNETA_RXD_L3_IP4) && |
1698 | (status & MVNETA_RXD_L4_CSUM_OK)) { | |
c5aff182 TP |
1699 | skb->csum = 0; |
1700 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1701 | return; | |
1702 | } | |
1703 | ||
1704 | skb->ip_summed = CHECKSUM_NONE; | |
1705 | } | |
1706 | ||
6c498974 | 1707 | /* Return tx queue pointer (find last set bit) according to <cause> returned |
1708 | * form tx_done reg. <cause> must not be null. The return value is always a | |
1709 | * valid queue for matching the first one found in <cause>. | |
1710 | */ | |
c5aff182 TP |
1711 | static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, |
1712 | u32 cause) | |
1713 | { | |
1714 | int queue = fls(cause) - 1; | |
1715 | ||
6c498974 | 1716 | return &pp->txqs[queue]; |
c5aff182 TP |
1717 | } |
1718 | ||
1719 | /* Free tx queue skbuffs */ | |
1720 | static void mvneta_txq_bufs_free(struct mvneta_port *pp, | |
1721 | struct mvneta_tx_queue *txq, int num) | |
1722 | { | |
1723 | int i; | |
1724 | ||
1725 | for (i = 0; i < num; i++) { | |
1726 | struct mvneta_tx_desc *tx_desc = txq->descs + | |
1727 | txq->txq_get_index; | |
1728 | struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; | |
1729 | ||
1730 | mvneta_txq_inc_get(txq); | |
1731 | ||
2e3173a3 EG |
1732 | if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) |
1733 | dma_unmap_single(pp->dev->dev.parent, | |
1734 | tx_desc->buf_phys_addr, | |
1735 | tx_desc->data_size, DMA_TO_DEVICE); | |
ba7e46ef EG |
1736 | if (!skb) |
1737 | continue; | |
c5aff182 TP |
1738 | dev_kfree_skb_any(skb); |
1739 | } | |
1740 | } | |
1741 | ||
1742 | /* Handle end of transmission */ | |
cd713199 | 1743 | static void mvneta_txq_done(struct mvneta_port *pp, |
c5aff182 TP |
1744 | struct mvneta_tx_queue *txq) |
1745 | { | |
1746 | struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); | |
1747 | int tx_done; | |
1748 | ||
1749 | tx_done = mvneta_txq_sent_desc_proc(pp, txq); | |
cd713199 AE |
1750 | if (!tx_done) |
1751 | return; | |
1752 | ||
c5aff182 TP |
1753 | mvneta_txq_bufs_free(pp, txq, tx_done); |
1754 | ||
1755 | txq->count -= tx_done; | |
1756 | ||
1757 | if (netif_tx_queue_stopped(nq)) { | |
8eef5f97 | 1758 | if (txq->count <= txq->tx_wake_threshold) |
c5aff182 TP |
1759 | netif_tx_wake_queue(nq); |
1760 | } | |
c5aff182 TP |
1761 | } |
1762 | ||
dc35a10f | 1763 | void *mvneta_frag_alloc(unsigned int frag_size) |
8ec2cd48 | 1764 | { |
dc35a10f MW |
1765 | if (likely(frag_size <= PAGE_SIZE)) |
1766 | return netdev_alloc_frag(frag_size); | |
8ec2cd48 | 1767 | else |
dc35a10f | 1768 | return kmalloc(frag_size, GFP_ATOMIC); |
8ec2cd48 | 1769 | } |
dc35a10f | 1770 | EXPORT_SYMBOL_GPL(mvneta_frag_alloc); |
8ec2cd48 | 1771 | |
dc35a10f | 1772 | void mvneta_frag_free(unsigned int frag_size, void *data) |
8ec2cd48 | 1773 | { |
dc35a10f | 1774 | if (likely(frag_size <= PAGE_SIZE)) |
13dc0d2b | 1775 | skb_free_frag(data); |
8ec2cd48 | 1776 | else |
1777 | kfree(data); | |
1778 | } | |
dc35a10f | 1779 | EXPORT_SYMBOL_GPL(mvneta_frag_free); |
8ec2cd48 | 1780 | |
dc35a10f | 1781 | /* Refill processing for SW buffer management */ |
c5aff182 TP |
1782 | static int mvneta_rx_refill(struct mvneta_port *pp, |
1783 | struct mvneta_rx_desc *rx_desc) | |
1784 | ||
1785 | { | |
1786 | dma_addr_t phys_addr; | |
8ec2cd48 | 1787 | void *data; |
c5aff182 | 1788 | |
dc35a10f | 1789 | data = mvneta_frag_alloc(pp->frag_size); |
8ec2cd48 | 1790 | if (!data) |
c5aff182 TP |
1791 | return -ENOMEM; |
1792 | ||
8ec2cd48 | 1793 | phys_addr = dma_map_single(pp->dev->dev.parent, data, |
c5aff182 TP |
1794 | MVNETA_RX_BUF_SIZE(pp->pkt_size), |
1795 | DMA_FROM_DEVICE); | |
1796 | if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { | |
dc35a10f | 1797 | mvneta_frag_free(pp->frag_size, data); |
c5aff182 TP |
1798 | return -ENOMEM; |
1799 | } | |
1800 | ||
8ec2cd48 | 1801 | mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data); |
c5aff182 TP |
1802 | return 0; |
1803 | } | |
1804 | ||
1805 | /* Handle tx checksum */ | |
1806 | static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) | |
1807 | { | |
1808 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
1809 | int ip_hdr_len = 0; | |
817dbfa5 | 1810 | __be16 l3_proto = vlan_get_protocol(skb); |
c5aff182 TP |
1811 | u8 l4_proto; |
1812 | ||
817dbfa5 | 1813 | if (l3_proto == htons(ETH_P_IP)) { |
c5aff182 TP |
1814 | struct iphdr *ip4h = ip_hdr(skb); |
1815 | ||
1816 | /* Calculate IPv4 checksum and L4 checksum */ | |
1817 | ip_hdr_len = ip4h->ihl; | |
1818 | l4_proto = ip4h->protocol; | |
817dbfa5 | 1819 | } else if (l3_proto == htons(ETH_P_IPV6)) { |
c5aff182 TP |
1820 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
1821 | ||
1822 | /* Read l4_protocol from one of IPv6 extra headers */ | |
1823 | if (skb_network_header_len(skb) > 0) | |
1824 | ip_hdr_len = (skb_network_header_len(skb) >> 2); | |
1825 | l4_proto = ip6h->nexthdr; | |
1826 | } else | |
1827 | return MVNETA_TX_L4_CSUM_NOT; | |
1828 | ||
1829 | return mvneta_txq_desc_csum(skb_network_offset(skb), | |
817dbfa5 | 1830 | l3_proto, ip_hdr_len, l4_proto); |
c5aff182 TP |
1831 | } |
1832 | ||
1833 | return MVNETA_TX_L4_CSUM_NOT; | |
1834 | } | |
1835 | ||
c5aff182 TP |
1836 | /* Drop packets received by the RXQ and free buffers */ |
1837 | static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, | |
1838 | struct mvneta_rx_queue *rxq) | |
1839 | { | |
1840 | int rx_done, i; | |
1841 | ||
1842 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); | |
dc35a10f MW |
1843 | if (rx_done) |
1844 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); | |
1845 | ||
1846 | if (pp->bm_priv) { | |
1847 | for (i = 0; i < rx_done; i++) { | |
1848 | struct mvneta_rx_desc *rx_desc = | |
1849 | mvneta_rxq_next_desc_get(rxq); | |
1850 | u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); | |
1851 | struct mvneta_bm_pool *bm_pool; | |
1852 | ||
1853 | bm_pool = &pp->bm_priv->bm_pools[pool_id]; | |
1854 | /* Return dropped buffer to the pool */ | |
1855 | mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, | |
1856 | rx_desc->buf_phys_addr); | |
1857 | } | |
1858 | return; | |
1859 | } | |
1860 | ||
c5aff182 TP |
1861 | for (i = 0; i < rxq->size; i++) { |
1862 | struct mvneta_rx_desc *rx_desc = rxq->descs + i; | |
8ec2cd48 | 1863 | void *data = (void *)rx_desc->buf_cookie; |
c5aff182 | 1864 | |
c5aff182 | 1865 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, |
a328f3a0 | 1866 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); |
dc35a10f | 1867 | mvneta_frag_free(pp->frag_size, data); |
c5aff182 | 1868 | } |
dc35a10f | 1869 | } |
c5aff182 | 1870 | |
dc35a10f MW |
1871 | /* Main rx processing when using software buffer management */ |
1872 | static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo, | |
1873 | struct mvneta_rx_queue *rxq) | |
1874 | { | |
1875 | struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); | |
1876 | struct net_device *dev = pp->dev; | |
1877 | int rx_done; | |
1878 | u32 rcvd_pkts = 0; | |
1879 | u32 rcvd_bytes = 0; | |
1880 | ||
1881 | /* Get number of received packets */ | |
1882 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); | |
1883 | ||
1884 | if (rx_todo > rx_done) | |
1885 | rx_todo = rx_done; | |
1886 | ||
1887 | rx_done = 0; | |
1888 | ||
1889 | /* Fairness NAPI loop */ | |
1890 | while (rx_done < rx_todo) { | |
1891 | struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); | |
1892 | struct sk_buff *skb; | |
1893 | unsigned char *data; | |
1894 | dma_addr_t phys_addr; | |
1895 | u32 rx_status, frag_size; | |
1896 | int rx_bytes, err; | |
1897 | ||
1898 | rx_done++; | |
1899 | rx_status = rx_desc->status; | |
1900 | rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); | |
1901 | data = (unsigned char *)rx_desc->buf_cookie; | |
1902 | phys_addr = rx_desc->buf_phys_addr; | |
1903 | ||
1904 | if (!mvneta_rxq_desc_is_first_last(rx_status) || | |
1905 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { | |
1906 | err_drop_frame: | |
1907 | dev->stats.rx_errors++; | |
1908 | mvneta_rx_error(pp, rx_desc); | |
1909 | /* leave the descriptor untouched */ | |
1910 | continue; | |
1911 | } | |
1912 | ||
1913 | if (rx_bytes <= rx_copybreak) { | |
1914 | /* better copy a small frame and not unmap the DMA region */ | |
1915 | skb = netdev_alloc_skb_ip_align(dev, rx_bytes); | |
1916 | if (unlikely(!skb)) | |
1917 | goto err_drop_frame; | |
1918 | ||
1919 | dma_sync_single_range_for_cpu(dev->dev.parent, | |
1920 | rx_desc->buf_phys_addr, | |
1921 | MVNETA_MH_SIZE + NET_SKB_PAD, | |
1922 | rx_bytes, | |
1923 | DMA_FROM_DEVICE); | |
1924 | memcpy(skb_put(skb, rx_bytes), | |
1925 | data + MVNETA_MH_SIZE + NET_SKB_PAD, | |
1926 | rx_bytes); | |
1927 | ||
1928 | skb->protocol = eth_type_trans(skb, dev); | |
1929 | mvneta_rx_csum(pp, rx_status, skb); | |
1930 | napi_gro_receive(&port->napi, skb); | |
1931 | ||
1932 | rcvd_pkts++; | |
1933 | rcvd_bytes += rx_bytes; | |
1934 | ||
1935 | /* leave the descriptor and buffer untouched */ | |
1936 | continue; | |
1937 | } | |
1938 | ||
1939 | /* Refill processing */ | |
1940 | err = mvneta_rx_refill(pp, rx_desc); | |
1941 | if (err) { | |
1942 | netdev_err(dev, "Linux processing - Can't refill\n"); | |
1943 | rxq->missed++; | |
1944 | goto err_drop_frame; | |
1945 | } | |
1946 | ||
1947 | frag_size = pp->frag_size; | |
1948 | ||
1949 | skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); | |
1950 | ||
1951 | /* After refill old buffer has to be unmapped regardless | |
1952 | * the skb is successfully built or not. | |
1953 | */ | |
1954 | dma_unmap_single(dev->dev.parent, phys_addr, | |
1955 | MVNETA_RX_BUF_SIZE(pp->pkt_size), | |
1956 | DMA_FROM_DEVICE); | |
1957 | ||
1958 | if (!skb) | |
1959 | goto err_drop_frame; | |
1960 | ||
1961 | rcvd_pkts++; | |
1962 | rcvd_bytes += rx_bytes; | |
1963 | ||
1964 | /* Linux processing */ | |
1965 | skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); | |
1966 | skb_put(skb, rx_bytes); | |
1967 | ||
1968 | skb->protocol = eth_type_trans(skb, dev); | |
1969 | ||
1970 | mvneta_rx_csum(pp, rx_status, skb); | |
1971 | ||
1972 | napi_gro_receive(&port->napi, skb); | |
1973 | } | |
1974 | ||
1975 | if (rcvd_pkts) { | |
1976 | struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); | |
1977 | ||
1978 | u64_stats_update_begin(&stats->syncp); | |
1979 | stats->rx_packets += rcvd_pkts; | |
1980 | stats->rx_bytes += rcvd_bytes; | |
1981 | u64_stats_update_end(&stats->syncp); | |
1982 | } | |
1983 | ||
1984 | /* Update rxq management counters */ | |
1985 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); | |
1986 | ||
1987 | return rx_done; | |
c5aff182 TP |
1988 | } |
1989 | ||
dc35a10f MW |
1990 | /* Main rx processing when using hardware buffer management */ |
1991 | static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo, | |
1992 | struct mvneta_rx_queue *rxq) | |
c5aff182 | 1993 | { |
12bb03b4 | 1994 | struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); |
c5aff182 | 1995 | struct net_device *dev = pp->dev; |
a84e3289 | 1996 | int rx_done; |
dc4277dd | 1997 | u32 rcvd_pkts = 0; |
1998 | u32 rcvd_bytes = 0; | |
c5aff182 TP |
1999 | |
2000 | /* Get number of received packets */ | |
2001 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); | |
2002 | ||
2003 | if (rx_todo > rx_done) | |
2004 | rx_todo = rx_done; | |
2005 | ||
2006 | rx_done = 0; | |
c5aff182 TP |
2007 | |
2008 | /* Fairness NAPI loop */ | |
2009 | while (rx_done < rx_todo) { | |
2010 | struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); | |
dc35a10f | 2011 | struct mvneta_bm_pool *bm_pool = NULL; |
c5aff182 | 2012 | struct sk_buff *skb; |
8ec2cd48 | 2013 | unsigned char *data; |
daf158d0 | 2014 | dma_addr_t phys_addr; |
dc35a10f | 2015 | u32 rx_status, frag_size; |
c5aff182 | 2016 | int rx_bytes, err; |
dc35a10f | 2017 | u8 pool_id; |
c5aff182 | 2018 | |
c5aff182 | 2019 | rx_done++; |
c5aff182 | 2020 | rx_status = rx_desc->status; |
f19fadfc | 2021 | rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); |
8ec2cd48 | 2022 | data = (unsigned char *)rx_desc->buf_cookie; |
daf158d0 | 2023 | phys_addr = rx_desc->buf_phys_addr; |
dc35a10f MW |
2024 | pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); |
2025 | bm_pool = &pp->bm_priv->bm_pools[pool_id]; | |
c5aff182 | 2026 | |
5428213c | 2027 | if (!mvneta_rxq_desc_is_first_last(rx_status) || |
f19fadfc | 2028 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { |
dc35a10f MW |
2029 | err_drop_frame_ret_pool: |
2030 | /* Return the buffer to the pool */ | |
2031 | mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, | |
2032 | rx_desc->buf_phys_addr); | |
2033 | err_drop_frame: | |
c5aff182 TP |
2034 | dev->stats.rx_errors++; |
2035 | mvneta_rx_error(pp, rx_desc); | |
8ec2cd48 | 2036 | /* leave the descriptor untouched */ |
c5aff182 TP |
2037 | continue; |
2038 | } | |
2039 | ||
f19fadfc | 2040 | if (rx_bytes <= rx_copybreak) { |
2041 | /* better copy a small frame and not unmap the DMA region */ | |
2042 | skb = netdev_alloc_skb_ip_align(dev, rx_bytes); | |
2043 | if (unlikely(!skb)) | |
dc35a10f | 2044 | goto err_drop_frame_ret_pool; |
f19fadfc | 2045 | |
2046 | dma_sync_single_range_for_cpu(dev->dev.parent, | |
2047 | rx_desc->buf_phys_addr, | |
2048 | MVNETA_MH_SIZE + NET_SKB_PAD, | |
2049 | rx_bytes, | |
2050 | DMA_FROM_DEVICE); | |
2051 | memcpy(skb_put(skb, rx_bytes), | |
2052 | data + MVNETA_MH_SIZE + NET_SKB_PAD, | |
2053 | rx_bytes); | |
2054 | ||
2055 | skb->protocol = eth_type_trans(skb, dev); | |
2056 | mvneta_rx_csum(pp, rx_status, skb); | |
12bb03b4 | 2057 | napi_gro_receive(&port->napi, skb); |
f19fadfc | 2058 | |
2059 | rcvd_pkts++; | |
2060 | rcvd_bytes += rx_bytes; | |
2061 | ||
dc35a10f MW |
2062 | /* Return the buffer to the pool */ |
2063 | mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, | |
2064 | rx_desc->buf_phys_addr); | |
2065 | ||
f19fadfc | 2066 | /* leave the descriptor and buffer untouched */ |
2067 | continue; | |
2068 | } | |
2069 | ||
a84e3289 | 2070 | /* Refill processing */ |
baa11ebc | 2071 | err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); |
a84e3289 SG |
2072 | if (err) { |
2073 | netdev_err(dev, "Linux processing - Can't refill\n"); | |
2074 | rxq->missed++; | |
dc35a10f | 2075 | goto err_drop_frame_ret_pool; |
a84e3289 SG |
2076 | } |
2077 | ||
baa11ebc | 2078 | frag_size = bm_pool->hwbm_pool.frag_size; |
dc35a10f MW |
2079 | |
2080 | skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); | |
f19fadfc | 2081 | |
26c17a17 MW |
2082 | /* After refill old buffer has to be unmapped regardless |
2083 | * the skb is successfully built or not. | |
2084 | */ | |
dc35a10f MW |
2085 | dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, |
2086 | bm_pool->buf_size, DMA_FROM_DEVICE); | |
26c17a17 MW |
2087 | if (!skb) |
2088 | goto err_drop_frame; | |
2089 | ||
dc4277dd | 2090 | rcvd_pkts++; |
2091 | rcvd_bytes += rx_bytes; | |
c5aff182 TP |
2092 | |
2093 | /* Linux processing */ | |
8ec2cd48 | 2094 | skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); |
c5aff182 TP |
2095 | skb_put(skb, rx_bytes); |
2096 | ||
2097 | skb->protocol = eth_type_trans(skb, dev); | |
2098 | ||
5428213c | 2099 | mvneta_rx_csum(pp, rx_status, skb); |
c5aff182 | 2100 | |
12bb03b4 | 2101 | napi_gro_receive(&port->napi, skb); |
c5aff182 TP |
2102 | } |
2103 | ||
dc4277dd | 2104 | if (rcvd_pkts) { |
74c41b04 | 2105 | struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); |
2106 | ||
2107 | u64_stats_update_begin(&stats->syncp); | |
2108 | stats->rx_packets += rcvd_pkts; | |
2109 | stats->rx_bytes += rcvd_bytes; | |
2110 | u64_stats_update_end(&stats->syncp); | |
dc4277dd | 2111 | } |
2112 | ||
c5aff182 | 2113 | /* Update rxq management counters */ |
a84e3289 | 2114 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); |
c5aff182 TP |
2115 | |
2116 | return rx_done; | |
2117 | } | |
2118 | ||
2adb719d EG |
2119 | static inline void |
2120 | mvneta_tso_put_hdr(struct sk_buff *skb, | |
2121 | struct mvneta_port *pp, struct mvneta_tx_queue *txq) | |
2122 | { | |
2123 | struct mvneta_tx_desc *tx_desc; | |
2124 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
2125 | ||
2126 | txq->tx_skb[txq->txq_put_index] = NULL; | |
2127 | tx_desc = mvneta_txq_next_desc_get(txq); | |
2128 | tx_desc->data_size = hdr_len; | |
2129 | tx_desc->command = mvneta_skb_tx_csum(pp, skb); | |
2130 | tx_desc->command |= MVNETA_TXD_F_DESC; | |
2131 | tx_desc->buf_phys_addr = txq->tso_hdrs_phys + | |
2132 | txq->txq_put_index * TSO_HEADER_SIZE; | |
2133 | mvneta_txq_inc_put(txq); | |
2134 | } | |
2135 | ||
2136 | static inline int | |
2137 | mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, | |
2138 | struct sk_buff *skb, char *data, int size, | |
2139 | bool last_tcp, bool is_last) | |
2140 | { | |
2141 | struct mvneta_tx_desc *tx_desc; | |
2142 | ||
2143 | tx_desc = mvneta_txq_next_desc_get(txq); | |
2144 | tx_desc->data_size = size; | |
2145 | tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, | |
2146 | size, DMA_TO_DEVICE); | |
2147 | if (unlikely(dma_mapping_error(dev->dev.parent, | |
2148 | tx_desc->buf_phys_addr))) { | |
2149 | mvneta_txq_desc_put(txq); | |
2150 | return -ENOMEM; | |
2151 | } | |
2152 | ||
2153 | tx_desc->command = 0; | |
2154 | txq->tx_skb[txq->txq_put_index] = NULL; | |
2155 | ||
2156 | if (last_tcp) { | |
2157 | /* last descriptor in the TCP packet */ | |
2158 | tx_desc->command = MVNETA_TXD_L_DESC; | |
2159 | ||
2160 | /* last descriptor in SKB */ | |
2161 | if (is_last) | |
2162 | txq->tx_skb[txq->txq_put_index] = skb; | |
2163 | } | |
2164 | mvneta_txq_inc_put(txq); | |
2165 | return 0; | |
2166 | } | |
2167 | ||
2168 | static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, | |
2169 | struct mvneta_tx_queue *txq) | |
2170 | { | |
2171 | int total_len, data_left; | |
2172 | int desc_count = 0; | |
2173 | struct mvneta_port *pp = netdev_priv(dev); | |
2174 | struct tso_t tso; | |
2175 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
2176 | int i; | |
2177 | ||
2178 | /* Count needed descriptors */ | |
2179 | if ((txq->count + tso_count_descs(skb)) >= txq->size) | |
2180 | return 0; | |
2181 | ||
2182 | if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { | |
2183 | pr_info("*** Is this even possible???!?!?\n"); | |
2184 | return 0; | |
2185 | } | |
2186 | ||
2187 | /* Initialize the TSO handler, and prepare the first payload */ | |
2188 | tso_start(skb, &tso); | |
2189 | ||
2190 | total_len = skb->len - hdr_len; | |
2191 | while (total_len > 0) { | |
2192 | char *hdr; | |
2193 | ||
2194 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); | |
2195 | total_len -= data_left; | |
2196 | desc_count++; | |
2197 | ||
2198 | /* prepare packet headers: MAC + IP + TCP */ | |
2199 | hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; | |
2200 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); | |
2201 | ||
2202 | mvneta_tso_put_hdr(skb, pp, txq); | |
2203 | ||
2204 | while (data_left > 0) { | |
2205 | int size; | |
2206 | desc_count++; | |
2207 | ||
2208 | size = min_t(int, tso.size, data_left); | |
2209 | ||
2210 | if (mvneta_tso_put_data(dev, txq, skb, | |
2211 | tso.data, size, | |
2212 | size == data_left, | |
2213 | total_len == 0)) | |
2214 | goto err_release; | |
2215 | data_left -= size; | |
2216 | ||
2217 | tso_build_data(skb, &tso, size); | |
2218 | } | |
2219 | } | |
2220 | ||
2221 | return desc_count; | |
2222 | ||
2223 | err_release: | |
2224 | /* Release all used data descriptors; header descriptors must not | |
2225 | * be DMA-unmapped. | |
2226 | */ | |
2227 | for (i = desc_count - 1; i >= 0; i--) { | |
2228 | struct mvneta_tx_desc *tx_desc = txq->descs + i; | |
2e3173a3 | 2229 | if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) |
2adb719d EG |
2230 | dma_unmap_single(pp->dev->dev.parent, |
2231 | tx_desc->buf_phys_addr, | |
2232 | tx_desc->data_size, | |
2233 | DMA_TO_DEVICE); | |
2234 | mvneta_txq_desc_put(txq); | |
2235 | } | |
2236 | return 0; | |
2237 | } | |
2238 | ||
c5aff182 TP |
2239 | /* Handle tx fragmentation processing */ |
2240 | static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, | |
2241 | struct mvneta_tx_queue *txq) | |
2242 | { | |
2243 | struct mvneta_tx_desc *tx_desc; | |
3d4ea02f | 2244 | int i, nr_frags = skb_shinfo(skb)->nr_frags; |
c5aff182 | 2245 | |
3d4ea02f | 2246 | for (i = 0; i < nr_frags; i++) { |
c5aff182 TP |
2247 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2248 | void *addr = page_address(frag->page.p) + frag->page_offset; | |
2249 | ||
2250 | tx_desc = mvneta_txq_next_desc_get(txq); | |
2251 | tx_desc->data_size = frag->size; | |
2252 | ||
2253 | tx_desc->buf_phys_addr = | |
2254 | dma_map_single(pp->dev->dev.parent, addr, | |
2255 | tx_desc->data_size, DMA_TO_DEVICE); | |
2256 | ||
2257 | if (dma_mapping_error(pp->dev->dev.parent, | |
2258 | tx_desc->buf_phys_addr)) { | |
2259 | mvneta_txq_desc_put(txq); | |
2260 | goto error; | |
2261 | } | |
2262 | ||
3d4ea02f | 2263 | if (i == nr_frags - 1) { |
c5aff182 TP |
2264 | /* Last descriptor */ |
2265 | tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; | |
c5aff182 | 2266 | txq->tx_skb[txq->txq_put_index] = skb; |
c5aff182 TP |
2267 | } else { |
2268 | /* Descriptor in the middle: Not First, Not Last */ | |
2269 | tx_desc->command = 0; | |
c5aff182 | 2270 | txq->tx_skb[txq->txq_put_index] = NULL; |
c5aff182 | 2271 | } |
3d4ea02f | 2272 | mvneta_txq_inc_put(txq); |
c5aff182 TP |
2273 | } |
2274 | ||
2275 | return 0; | |
2276 | ||
2277 | error: | |
2278 | /* Release all descriptors that were used to map fragments of | |
6a20c175 TP |
2279 | * this packet, as well as the corresponding DMA mappings |
2280 | */ | |
c5aff182 TP |
2281 | for (i = i - 1; i >= 0; i--) { |
2282 | tx_desc = txq->descs + i; | |
2283 | dma_unmap_single(pp->dev->dev.parent, | |
2284 | tx_desc->buf_phys_addr, | |
2285 | tx_desc->data_size, | |
2286 | DMA_TO_DEVICE); | |
2287 | mvneta_txq_desc_put(txq); | |
2288 | } | |
2289 | ||
2290 | return -ENOMEM; | |
2291 | } | |
2292 | ||
2293 | /* Main tx processing */ | |
2294 | static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) | |
2295 | { | |
2296 | struct mvneta_port *pp = netdev_priv(dev); | |
ee40a116 WT |
2297 | u16 txq_id = skb_get_queue_mapping(skb); |
2298 | struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; | |
c5aff182 | 2299 | struct mvneta_tx_desc *tx_desc; |
5f478b41 | 2300 | int len = skb->len; |
c5aff182 TP |
2301 | int frags = 0; |
2302 | u32 tx_cmd; | |
2303 | ||
2304 | if (!netif_running(dev)) | |
2305 | goto out; | |
2306 | ||
2adb719d EG |
2307 | if (skb_is_gso(skb)) { |
2308 | frags = mvneta_tx_tso(skb, dev, txq); | |
2309 | goto out; | |
2310 | } | |
2311 | ||
c5aff182 | 2312 | frags = skb_shinfo(skb)->nr_frags + 1; |
c5aff182 TP |
2313 | |
2314 | /* Get a descriptor for the first part of the packet */ | |
2315 | tx_desc = mvneta_txq_next_desc_get(txq); | |
2316 | ||
2317 | tx_cmd = mvneta_skb_tx_csum(pp, skb); | |
2318 | ||
2319 | tx_desc->data_size = skb_headlen(skb); | |
2320 | ||
2321 | tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, | |
2322 | tx_desc->data_size, | |
2323 | DMA_TO_DEVICE); | |
2324 | if (unlikely(dma_mapping_error(dev->dev.parent, | |
2325 | tx_desc->buf_phys_addr))) { | |
2326 | mvneta_txq_desc_put(txq); | |
2327 | frags = 0; | |
2328 | goto out; | |
2329 | } | |
2330 | ||
2331 | if (frags == 1) { | |
2332 | /* First and Last descriptor */ | |
2333 | tx_cmd |= MVNETA_TXD_FLZ_DESC; | |
2334 | tx_desc->command = tx_cmd; | |
2335 | txq->tx_skb[txq->txq_put_index] = skb; | |
2336 | mvneta_txq_inc_put(txq); | |
2337 | } else { | |
2338 | /* First but not Last */ | |
2339 | tx_cmd |= MVNETA_TXD_F_DESC; | |
2340 | txq->tx_skb[txq->txq_put_index] = NULL; | |
2341 | mvneta_txq_inc_put(txq); | |
2342 | tx_desc->command = tx_cmd; | |
2343 | /* Continue with other skb fragments */ | |
2344 | if (mvneta_tx_frag_process(pp, skb, txq)) { | |
2345 | dma_unmap_single(dev->dev.parent, | |
2346 | tx_desc->buf_phys_addr, | |
2347 | tx_desc->data_size, | |
2348 | DMA_TO_DEVICE); | |
2349 | mvneta_txq_desc_put(txq); | |
2350 | frags = 0; | |
2351 | goto out; | |
2352 | } | |
2353 | } | |
2354 | ||
c5aff182 TP |
2355 | out: |
2356 | if (frags > 0) { | |
74c41b04 | 2357 | struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); |
e19d2dda EG |
2358 | struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); |
2359 | ||
2360 | txq->count += frags; | |
2361 | mvneta_txq_pend_desc_add(pp, txq, frags); | |
2362 | ||
8eef5f97 | 2363 | if (txq->count >= txq->tx_stop_threshold) |
e19d2dda | 2364 | netif_tx_stop_queue(nq); |
c5aff182 | 2365 | |
74c41b04 | 2366 | u64_stats_update_begin(&stats->syncp); |
2367 | stats->tx_packets++; | |
5f478b41 | 2368 | stats->tx_bytes += len; |
74c41b04 | 2369 | u64_stats_update_end(&stats->syncp); |
c5aff182 TP |
2370 | } else { |
2371 | dev->stats.tx_dropped++; | |
2372 | dev_kfree_skb_any(skb); | |
2373 | } | |
2374 | ||
c5aff182 TP |
2375 | return NETDEV_TX_OK; |
2376 | } | |
2377 | ||
2378 | ||
2379 | /* Free tx resources, when resetting a port */ | |
2380 | static void mvneta_txq_done_force(struct mvneta_port *pp, | |
2381 | struct mvneta_tx_queue *txq) | |
2382 | ||
2383 | { | |
2384 | int tx_done = txq->count; | |
2385 | ||
2386 | mvneta_txq_bufs_free(pp, txq, tx_done); | |
2387 | ||
2388 | /* reset txq */ | |
2389 | txq->count = 0; | |
2390 | txq->txq_put_index = 0; | |
2391 | txq->txq_get_index = 0; | |
2392 | } | |
2393 | ||
6c498974 | 2394 | /* Handle tx done - called in softirq context. The <cause_tx_done> argument |
2395 | * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. | |
2396 | */ | |
0713a86a | 2397 | static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) |
c5aff182 TP |
2398 | { |
2399 | struct mvneta_tx_queue *txq; | |
c5aff182 TP |
2400 | struct netdev_queue *nq; |
2401 | ||
6c498974 | 2402 | while (cause_tx_done) { |
c5aff182 | 2403 | txq = mvneta_tx_done_policy(pp, cause_tx_done); |
c5aff182 TP |
2404 | |
2405 | nq = netdev_get_tx_queue(pp->dev, txq->id); | |
2406 | __netif_tx_lock(nq, smp_processor_id()); | |
2407 | ||
0713a86a AE |
2408 | if (txq->count) |
2409 | mvneta_txq_done(pp, txq); | |
c5aff182 TP |
2410 | |
2411 | __netif_tx_unlock(nq); | |
2412 | cause_tx_done &= ~((1 << txq->id)); | |
2413 | } | |
c5aff182 TP |
2414 | } |
2415 | ||
6a20c175 | 2416 | /* Compute crc8 of the specified address, using a unique algorithm , |
c5aff182 TP |
2417 | * according to hw spec, different than generic crc8 algorithm |
2418 | */ | |
2419 | static int mvneta_addr_crc(unsigned char *addr) | |
2420 | { | |
2421 | int crc = 0; | |
2422 | int i; | |
2423 | ||
2424 | for (i = 0; i < ETH_ALEN; i++) { | |
2425 | int j; | |
2426 | ||
2427 | crc = (crc ^ addr[i]) << 8; | |
2428 | for (j = 7; j >= 0; j--) { | |
2429 | if (crc & (0x100 << j)) | |
2430 | crc ^= 0x107 << j; | |
2431 | } | |
2432 | } | |
2433 | ||
2434 | return crc; | |
2435 | } | |
2436 | ||
2437 | /* This method controls the net device special MAC multicast support. | |
2438 | * The Special Multicast Table for MAC addresses supports MAC of the form | |
2439 | * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). | |
2440 | * The MAC DA[7:0] bits are used as a pointer to the Special Multicast | |
2441 | * Table entries in the DA-Filter table. This method set the Special | |
2442 | * Multicast Table appropriate entry. | |
2443 | */ | |
2444 | static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, | |
2445 | unsigned char last_byte, | |
2446 | int queue) | |
2447 | { | |
2448 | unsigned int smc_table_reg; | |
2449 | unsigned int tbl_offset; | |
2450 | unsigned int reg_offset; | |
2451 | ||
2452 | /* Register offset from SMC table base */ | |
2453 | tbl_offset = (last_byte / 4); | |
2454 | /* Entry offset within the above reg */ | |
2455 | reg_offset = last_byte % 4; | |
2456 | ||
2457 | smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST | |
2458 | + tbl_offset * 4)); | |
2459 | ||
2460 | if (queue == -1) | |
2461 | smc_table_reg &= ~(0xff << (8 * reg_offset)); | |
2462 | else { | |
2463 | smc_table_reg &= ~(0xff << (8 * reg_offset)); | |
2464 | smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); | |
2465 | } | |
2466 | ||
2467 | mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, | |
2468 | smc_table_reg); | |
2469 | } | |
2470 | ||
2471 | /* This method controls the network device Other MAC multicast support. | |
2472 | * The Other Multicast Table is used for multicast of another type. | |
2473 | * A CRC-8 is used as an index to the Other Multicast Table entries | |
2474 | * in the DA-Filter table. | |
2475 | * The method gets the CRC-8 value from the calling routine and | |
2476 | * sets the Other Multicast Table appropriate entry according to the | |
2477 | * specified CRC-8 . | |
2478 | */ | |
2479 | static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, | |
2480 | unsigned char crc8, | |
2481 | int queue) | |
2482 | { | |
2483 | unsigned int omc_table_reg; | |
2484 | unsigned int tbl_offset; | |
2485 | unsigned int reg_offset; | |
2486 | ||
2487 | tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ | |
2488 | reg_offset = crc8 % 4; /* Entry offset within the above reg */ | |
2489 | ||
2490 | omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); | |
2491 | ||
2492 | if (queue == -1) { | |
2493 | /* Clear accepts frame bit at specified Other DA table entry */ | |
2494 | omc_table_reg &= ~(0xff << (8 * reg_offset)); | |
2495 | } else { | |
2496 | omc_table_reg &= ~(0xff << (8 * reg_offset)); | |
2497 | omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); | |
2498 | } | |
2499 | ||
2500 | mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); | |
2501 | } | |
2502 | ||
2503 | /* The network device supports multicast using two tables: | |
2504 | * 1) Special Multicast Table for MAC addresses of the form | |
2505 | * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). | |
2506 | * The MAC DA[7:0] bits are used as a pointer to the Special Multicast | |
2507 | * Table entries in the DA-Filter table. | |
2508 | * 2) Other Multicast Table for multicast of another type. A CRC-8 value | |
2509 | * is used as an index to the Other Multicast Table entries in the | |
2510 | * DA-Filter table. | |
2511 | */ | |
2512 | static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, | |
2513 | int queue) | |
2514 | { | |
2515 | unsigned char crc_result = 0; | |
2516 | ||
2517 | if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { | |
2518 | mvneta_set_special_mcast_addr(pp, p_addr[5], queue); | |
2519 | return 0; | |
2520 | } | |
2521 | ||
2522 | crc_result = mvneta_addr_crc(p_addr); | |
2523 | if (queue == -1) { | |
2524 | if (pp->mcast_count[crc_result] == 0) { | |
2525 | netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", | |
2526 | crc_result); | |
2527 | return -EINVAL; | |
2528 | } | |
2529 | ||
2530 | pp->mcast_count[crc_result]--; | |
2531 | if (pp->mcast_count[crc_result] != 0) { | |
2532 | netdev_info(pp->dev, | |
2533 | "After delete there are %d valid Mcast for crc8=0x%02x\n", | |
2534 | pp->mcast_count[crc_result], crc_result); | |
2535 | return -EINVAL; | |
2536 | } | |
2537 | } else | |
2538 | pp->mcast_count[crc_result]++; | |
2539 | ||
2540 | mvneta_set_other_mcast_addr(pp, crc_result, queue); | |
2541 | ||
2542 | return 0; | |
2543 | } | |
2544 | ||
2545 | /* Configure Fitering mode of Ethernet port */ | |
2546 | static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, | |
2547 | int is_promisc) | |
2548 | { | |
2549 | u32 port_cfg_reg, val; | |
2550 | ||
2551 | port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); | |
2552 | ||
2553 | val = mvreg_read(pp, MVNETA_TYPE_PRIO); | |
2554 | ||
2555 | /* Set / Clear UPM bit in port configuration register */ | |
2556 | if (is_promisc) { | |
2557 | /* Accept all Unicast addresses */ | |
2558 | port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; | |
2559 | val |= MVNETA_FORCE_UNI; | |
2560 | mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); | |
2561 | mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); | |
2562 | } else { | |
2563 | /* Reject all Unicast addresses */ | |
2564 | port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; | |
2565 | val &= ~MVNETA_FORCE_UNI; | |
2566 | } | |
2567 | ||
2568 | mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); | |
2569 | mvreg_write(pp, MVNETA_TYPE_PRIO, val); | |
2570 | } | |
2571 | ||
2572 | /* register unicast and multicast addresses */ | |
2573 | static void mvneta_set_rx_mode(struct net_device *dev) | |
2574 | { | |
2575 | struct mvneta_port *pp = netdev_priv(dev); | |
2576 | struct netdev_hw_addr *ha; | |
2577 | ||
2578 | if (dev->flags & IFF_PROMISC) { | |
2579 | /* Accept all: Multicast + Unicast */ | |
2580 | mvneta_rx_unicast_promisc_set(pp, 1); | |
90b74c01 GC |
2581 | mvneta_set_ucast_table(pp, pp->rxq_def); |
2582 | mvneta_set_special_mcast_table(pp, pp->rxq_def); | |
2583 | mvneta_set_other_mcast_table(pp, pp->rxq_def); | |
c5aff182 TP |
2584 | } else { |
2585 | /* Accept single Unicast */ | |
2586 | mvneta_rx_unicast_promisc_set(pp, 0); | |
2587 | mvneta_set_ucast_table(pp, -1); | |
90b74c01 | 2588 | mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); |
c5aff182 TP |
2589 | |
2590 | if (dev->flags & IFF_ALLMULTI) { | |
2591 | /* Accept all multicast */ | |
90b74c01 GC |
2592 | mvneta_set_special_mcast_table(pp, pp->rxq_def); |
2593 | mvneta_set_other_mcast_table(pp, pp->rxq_def); | |
c5aff182 TP |
2594 | } else { |
2595 | /* Accept only initialized multicast */ | |
2596 | mvneta_set_special_mcast_table(pp, -1); | |
2597 | mvneta_set_other_mcast_table(pp, -1); | |
2598 | ||
2599 | if (!netdev_mc_empty(dev)) { | |
2600 | netdev_for_each_mc_addr(ha, dev) { | |
2601 | mvneta_mcast_addr_set(pp, ha->addr, | |
90b74c01 | 2602 | pp->rxq_def); |
c5aff182 TP |
2603 | } |
2604 | } | |
2605 | } | |
2606 | } | |
2607 | } | |
2608 | ||
2609 | /* Interrupt handling - the callback for request_irq() */ | |
2610 | static irqreturn_t mvneta_isr(int irq, void *dev_id) | |
2611 | { | |
12bb03b4 | 2612 | struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id; |
c5aff182 | 2613 | |
12bb03b4 | 2614 | disable_percpu_irq(port->pp->dev->irq); |
12bb03b4 | 2615 | napi_schedule(&port->napi); |
c5aff182 TP |
2616 | |
2617 | return IRQ_HANDLED; | |
2618 | } | |
2619 | ||
898b2970 SS |
2620 | static int mvneta_fixed_link_update(struct mvneta_port *pp, |
2621 | struct phy_device *phy) | |
2622 | { | |
2623 | struct fixed_phy_status status; | |
2624 | struct fixed_phy_status changed = {}; | |
2625 | u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); | |
2626 | ||
2627 | status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); | |
2628 | if (gmac_stat & MVNETA_GMAC_SPEED_1000) | |
2629 | status.speed = SPEED_1000; | |
2630 | else if (gmac_stat & MVNETA_GMAC_SPEED_100) | |
2631 | status.speed = SPEED_100; | |
2632 | else | |
2633 | status.speed = SPEED_10; | |
2634 | status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); | |
2635 | changed.link = 1; | |
2636 | changed.speed = 1; | |
2637 | changed.duplex = 1; | |
2638 | fixed_phy_update_state(phy, &status, &changed); | |
2639 | return 0; | |
2640 | } | |
2641 | ||
c5aff182 TP |
2642 | /* NAPI handler |
2643 | * Bits 0 - 7 of the causeRxTx register indicate that are transmitted | |
2644 | * packets on the corresponding TXQ (Bit 0 is for TX queue 1). | |
2645 | * Bits 8 -15 of the cause Rx Tx register indicate that are received | |
2646 | * packets on the corresponding RXQ (Bit 8 is for RX queue 0). | |
2647 | * Each CPU has its own causeRxTx register | |
2648 | */ | |
2649 | static int mvneta_poll(struct napi_struct *napi, int budget) | |
2650 | { | |
2651 | int rx_done = 0; | |
2652 | u32 cause_rx_tx; | |
2dcf75e2 | 2653 | int rx_queue; |
c5aff182 | 2654 | struct mvneta_port *pp = netdev_priv(napi->dev); |
12bb03b4 | 2655 | struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); |
c5aff182 TP |
2656 | |
2657 | if (!netif_running(pp->dev)) { | |
12bb03b4 | 2658 | napi_complete(&port->napi); |
c5aff182 TP |
2659 | return rx_done; |
2660 | } | |
2661 | ||
2662 | /* Read cause register */ | |
898b2970 SS |
2663 | cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); |
2664 | if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { | |
2665 | u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); | |
2666 | ||
2667 | mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); | |
2668 | if (pp->use_inband_status && (cause_misc & | |
2669 | (MVNETA_CAUSE_PHY_STATUS_CHANGE | | |
2670 | MVNETA_CAUSE_LINK_CHANGE | | |
2671 | MVNETA_CAUSE_PSC_SYNC_CHANGE))) { | |
2672 | mvneta_fixed_link_update(pp, pp->phy_dev); | |
2673 | } | |
2674 | } | |
71f6d1b3 | 2675 | |
2676 | /* Release Tx descriptors */ | |
2677 | if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { | |
0713a86a | 2678 | mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); |
71f6d1b3 | 2679 | cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; |
2680 | } | |
c5aff182 | 2681 | |
6a20c175 | 2682 | /* For the case where the last mvneta_poll did not process all |
c5aff182 TP |
2683 | * RX packets |
2684 | */ | |
2dcf75e2 GC |
2685 | rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); |
2686 | ||
12bb03b4 | 2687 | cause_rx_tx |= port->cause_rx_tx; |
2dcf75e2 GC |
2688 | |
2689 | if (rx_queue) { | |
2690 | rx_queue = rx_queue - 1; | |
dc35a10f MW |
2691 | if (pp->bm_priv) |
2692 | rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]); | |
2693 | else | |
2694 | rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]); | |
2dcf75e2 GC |
2695 | } |
2696 | ||
d8936657 | 2697 | budget -= rx_done; |
c5aff182 TP |
2698 | |
2699 | if (budget > 0) { | |
2700 | cause_rx_tx = 0; | |
12bb03b4 MR |
2701 | napi_complete(&port->napi); |
2702 | enable_percpu_irq(pp->dev->irq, 0); | |
c5aff182 TP |
2703 | } |
2704 | ||
12bb03b4 | 2705 | port->cause_rx_tx = cause_rx_tx; |
c5aff182 TP |
2706 | return rx_done; |
2707 | } | |
2708 | ||
c5aff182 TP |
2709 | /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ |
2710 | static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, | |
2711 | int num) | |
2712 | { | |
c5aff182 TP |
2713 | int i; |
2714 | ||
2715 | for (i = 0; i < num; i++) { | |
a1a65ab1 | 2716 | memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); |
2717 | if (mvneta_rx_refill(pp, rxq->descs + i) != 0) { | |
2718 | netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n", | |
c5aff182 TP |
2719 | __func__, rxq->id, i, num); |
2720 | break; | |
2721 | } | |
c5aff182 TP |
2722 | } |
2723 | ||
2724 | /* Add this number of RX descriptors as non occupied (ready to | |
6a20c175 TP |
2725 | * get packets) |
2726 | */ | |
c5aff182 TP |
2727 | mvneta_rxq_non_occup_desc_add(pp, rxq, i); |
2728 | ||
2729 | return i; | |
2730 | } | |
2731 | ||
2732 | /* Free all packets pending transmit from all TXQs and reset TX port */ | |
2733 | static void mvneta_tx_reset(struct mvneta_port *pp) | |
2734 | { | |
2735 | int queue; | |
2736 | ||
9672850b | 2737 | /* free the skb's in the tx ring */ |
c5aff182 TP |
2738 | for (queue = 0; queue < txq_number; queue++) |
2739 | mvneta_txq_done_force(pp, &pp->txqs[queue]); | |
2740 | ||
2741 | mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); | |
2742 | mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); | |
2743 | } | |
2744 | ||
2745 | static void mvneta_rx_reset(struct mvneta_port *pp) | |
2746 | { | |
2747 | mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); | |
2748 | mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); | |
2749 | } | |
2750 | ||
2751 | /* Rx/Tx queue initialization/cleanup methods */ | |
2752 | ||
2753 | /* Create a specified RX queue */ | |
2754 | static int mvneta_rxq_init(struct mvneta_port *pp, | |
2755 | struct mvneta_rx_queue *rxq) | |
2756 | ||
2757 | { | |
2758 | rxq->size = pp->rx_ring_size; | |
2759 | ||
2760 | /* Allocate memory for RX descriptors */ | |
2761 | rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, | |
2762 | rxq->size * MVNETA_DESC_ALIGNED_SIZE, | |
2763 | &rxq->descs_phys, GFP_KERNEL); | |
d0320f75 | 2764 | if (rxq->descs == NULL) |
c5aff182 | 2765 | return -ENOMEM; |
c5aff182 TP |
2766 | |
2767 | BUG_ON(rxq->descs != | |
2768 | PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); | |
2769 | ||
2770 | rxq->last_desc = rxq->size - 1; | |
2771 | ||
2772 | /* Set Rx descriptors queue starting address */ | |
2773 | mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); | |
2774 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); | |
2775 | ||
2776 | /* Set Offset */ | |
2777 | mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD); | |
2778 | ||
2779 | /* Set coalescing pkts and time */ | |
2780 | mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); | |
2781 | mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); | |
2782 | ||
dc35a10f MW |
2783 | if (!pp->bm_priv) { |
2784 | /* Fill RXQ with buffers from RX pool */ | |
2785 | mvneta_rxq_buf_size_set(pp, rxq, | |
2786 | MVNETA_RX_BUF_SIZE(pp->pkt_size)); | |
2787 | mvneta_rxq_bm_disable(pp, rxq); | |
2788 | } else { | |
2789 | mvneta_rxq_bm_enable(pp, rxq); | |
2790 | mvneta_rxq_long_pool_set(pp, rxq); | |
2791 | mvneta_rxq_short_pool_set(pp, rxq); | |
2792 | } | |
2793 | ||
c5aff182 TP |
2794 | mvneta_rxq_fill(pp, rxq, rxq->size); |
2795 | ||
2796 | return 0; | |
2797 | } | |
2798 | ||
2799 | /* Cleanup Rx queue */ | |
2800 | static void mvneta_rxq_deinit(struct mvneta_port *pp, | |
2801 | struct mvneta_rx_queue *rxq) | |
2802 | { | |
2803 | mvneta_rxq_drop_pkts(pp, rxq); | |
2804 | ||
2805 | if (rxq->descs) | |
2806 | dma_free_coherent(pp->dev->dev.parent, | |
2807 | rxq->size * MVNETA_DESC_ALIGNED_SIZE, | |
2808 | rxq->descs, | |
2809 | rxq->descs_phys); | |
2810 | ||
2811 | rxq->descs = NULL; | |
2812 | rxq->last_desc = 0; | |
2813 | rxq->next_desc_to_proc = 0; | |
2814 | rxq->descs_phys = 0; | |
2815 | } | |
2816 | ||
2817 | /* Create and initialize a tx queue */ | |
2818 | static int mvneta_txq_init(struct mvneta_port *pp, | |
2819 | struct mvneta_tx_queue *txq) | |
2820 | { | |
50bf8cb6 GC |
2821 | int cpu; |
2822 | ||
c5aff182 TP |
2823 | txq->size = pp->tx_ring_size; |
2824 | ||
8eef5f97 EG |
2825 | /* A queue must always have room for at least one skb. |
2826 | * Therefore, stop the queue when the free entries reaches | |
2827 | * the maximum number of descriptors per skb. | |
2828 | */ | |
2829 | txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; | |
2830 | txq->tx_wake_threshold = txq->tx_stop_threshold / 2; | |
2831 | ||
2832 | ||
c5aff182 TP |
2833 | /* Allocate memory for TX descriptors */ |
2834 | txq->descs = dma_alloc_coherent(pp->dev->dev.parent, | |
2835 | txq->size * MVNETA_DESC_ALIGNED_SIZE, | |
2836 | &txq->descs_phys, GFP_KERNEL); | |
d0320f75 | 2837 | if (txq->descs == NULL) |
c5aff182 | 2838 | return -ENOMEM; |
c5aff182 TP |
2839 | |
2840 | /* Make sure descriptor address is cache line size aligned */ | |
2841 | BUG_ON(txq->descs != | |
2842 | PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); | |
2843 | ||
2844 | txq->last_desc = txq->size - 1; | |
2845 | ||
2846 | /* Set maximum bandwidth for enabled TXQs */ | |
2847 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); | |
2848 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); | |
2849 | ||
2850 | /* Set Tx descriptors queue starting address */ | |
2851 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); | |
2852 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); | |
2853 | ||
2854 | txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL); | |
2855 | if (txq->tx_skb == NULL) { | |
2856 | dma_free_coherent(pp->dev->dev.parent, | |
2857 | txq->size * MVNETA_DESC_ALIGNED_SIZE, | |
2858 | txq->descs, txq->descs_phys); | |
2859 | return -ENOMEM; | |
2860 | } | |
2adb719d EG |
2861 | |
2862 | /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ | |
2863 | txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, | |
2864 | txq->size * TSO_HEADER_SIZE, | |
2865 | &txq->tso_hdrs_phys, GFP_KERNEL); | |
2866 | if (txq->tso_hdrs == NULL) { | |
2867 | kfree(txq->tx_skb); | |
2868 | dma_free_coherent(pp->dev->dev.parent, | |
2869 | txq->size * MVNETA_DESC_ALIGNED_SIZE, | |
2870 | txq->descs, txq->descs_phys); | |
2871 | return -ENOMEM; | |
2872 | } | |
c5aff182 TP |
2873 | mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); |
2874 | ||
50bf8cb6 GC |
2875 | /* Setup XPS mapping */ |
2876 | if (txq_number > 1) | |
2877 | cpu = txq->id % num_present_cpus(); | |
2878 | else | |
2879 | cpu = pp->rxq_def % num_present_cpus(); | |
2880 | cpumask_set_cpu(cpu, &txq->affinity_mask); | |
2881 | netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); | |
2882 | ||
c5aff182 TP |
2883 | return 0; |
2884 | } | |
2885 | ||
2886 | /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ | |
2887 | static void mvneta_txq_deinit(struct mvneta_port *pp, | |
2888 | struct mvneta_tx_queue *txq) | |
2889 | { | |
2890 | kfree(txq->tx_skb); | |
2891 | ||
2adb719d EG |
2892 | if (txq->tso_hdrs) |
2893 | dma_free_coherent(pp->dev->dev.parent, | |
2894 | txq->size * TSO_HEADER_SIZE, | |
2895 | txq->tso_hdrs, txq->tso_hdrs_phys); | |
c5aff182 TP |
2896 | if (txq->descs) |
2897 | dma_free_coherent(pp->dev->dev.parent, | |
2898 | txq->size * MVNETA_DESC_ALIGNED_SIZE, | |
2899 | txq->descs, txq->descs_phys); | |
2900 | ||
2901 | txq->descs = NULL; | |
2902 | txq->last_desc = 0; | |
2903 | txq->next_desc_to_proc = 0; | |
2904 | txq->descs_phys = 0; | |
2905 | ||
2906 | /* Set minimum bandwidth for disabled TXQs */ | |
2907 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); | |
2908 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); | |
2909 | ||
2910 | /* Set Tx descriptors queue starting address and size */ | |
2911 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); | |
2912 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); | |
2913 | } | |
2914 | ||
2915 | /* Cleanup all Tx queues */ | |
2916 | static void mvneta_cleanup_txqs(struct mvneta_port *pp) | |
2917 | { | |
2918 | int queue; | |
2919 | ||
2920 | for (queue = 0; queue < txq_number; queue++) | |
2921 | mvneta_txq_deinit(pp, &pp->txqs[queue]); | |
2922 | } | |
2923 | ||
2924 | /* Cleanup all Rx queues */ | |
2925 | static void mvneta_cleanup_rxqs(struct mvneta_port *pp) | |
2926 | { | |
2dcf75e2 GC |
2927 | int queue; |
2928 | ||
2929 | for (queue = 0; queue < txq_number; queue++) | |
2930 | mvneta_rxq_deinit(pp, &pp->rxqs[queue]); | |
c5aff182 TP |
2931 | } |
2932 | ||
2933 | ||
2934 | /* Init all Rx queues */ | |
2935 | static int mvneta_setup_rxqs(struct mvneta_port *pp) | |
2936 | { | |
2dcf75e2 GC |
2937 | int queue; |
2938 | ||
2939 | for (queue = 0; queue < rxq_number; queue++) { | |
2940 | int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); | |
2941 | ||
2942 | if (err) { | |
2943 | netdev_err(pp->dev, "%s: can't create rxq=%d\n", | |
2944 | __func__, queue); | |
2945 | mvneta_cleanup_rxqs(pp); | |
2946 | return err; | |
2947 | } | |
c5aff182 TP |
2948 | } |
2949 | ||
2950 | return 0; | |
2951 | } | |
2952 | ||
2953 | /* Init all tx queues */ | |
2954 | static int mvneta_setup_txqs(struct mvneta_port *pp) | |
2955 | { | |
2956 | int queue; | |
2957 | ||
2958 | for (queue = 0; queue < txq_number; queue++) { | |
2959 | int err = mvneta_txq_init(pp, &pp->txqs[queue]); | |
2960 | if (err) { | |
2961 | netdev_err(pp->dev, "%s: can't create txq=%d\n", | |
2962 | __func__, queue); | |
2963 | mvneta_cleanup_txqs(pp); | |
2964 | return err; | |
2965 | } | |
2966 | } | |
2967 | ||
2968 | return 0; | |
2969 | } | |
2970 | ||
2971 | static void mvneta_start_dev(struct mvneta_port *pp) | |
2972 | { | |
6b125d63 | 2973 | int cpu; |
12bb03b4 | 2974 | |
c5aff182 TP |
2975 | mvneta_max_rx_size_set(pp, pp->pkt_size); |
2976 | mvneta_txq_max_tx_size_set(pp, pp->pkt_size); | |
2977 | ||
2978 | /* start the Rx/Tx activity */ | |
2979 | mvneta_port_enable(pp); | |
2980 | ||
2981 | /* Enable polling on the port */ | |
129219e4 | 2982 | for_each_online_cpu(cpu) { |
12bb03b4 MR |
2983 | struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); |
2984 | ||
2985 | napi_enable(&port->napi); | |
2986 | } | |
c5aff182 | 2987 | |
2dcf75e2 | 2988 | /* Unmask interrupts. It has to be done from each CPU */ |
6b125d63 GC |
2989 | on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); |
2990 | ||
898b2970 SS |
2991 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, |
2992 | MVNETA_CAUSE_PHY_STATUS_CHANGE | | |
2993 | MVNETA_CAUSE_LINK_CHANGE | | |
2994 | MVNETA_CAUSE_PSC_SYNC_CHANGE); | |
c5aff182 TP |
2995 | |
2996 | phy_start(pp->phy_dev); | |
2997 | netif_tx_start_all_queues(pp->dev); | |
2998 | } | |
2999 | ||
3000 | static void mvneta_stop_dev(struct mvneta_port *pp) | |
3001 | { | |
12bb03b4 MR |
3002 | unsigned int cpu; |
3003 | ||
c5aff182 TP |
3004 | phy_stop(pp->phy_dev); |
3005 | ||
129219e4 | 3006 | for_each_online_cpu(cpu) { |
12bb03b4 MR |
3007 | struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); |
3008 | ||
3009 | napi_disable(&port->napi); | |
3010 | } | |
c5aff182 TP |
3011 | |
3012 | netif_carrier_off(pp->dev); | |
3013 | ||
3014 | mvneta_port_down(pp); | |
3015 | netif_tx_stop_all_queues(pp->dev); | |
3016 | ||
3017 | /* Stop the port activity */ | |
3018 | mvneta_port_disable(pp); | |
3019 | ||
3020 | /* Clear all ethernet port interrupts */ | |
db488c10 | 3021 | on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); |
c5aff182 TP |
3022 | |
3023 | /* Mask all ethernet port interrupts */ | |
db488c10 | 3024 | on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); |
c5aff182 TP |
3025 | |
3026 | mvneta_tx_reset(pp); | |
3027 | mvneta_rx_reset(pp); | |
3028 | } | |
3029 | ||
c5aff182 TP |
3030 | /* Return positive if MTU is valid */ |
3031 | static int mvneta_check_mtu_valid(struct net_device *dev, int mtu) | |
3032 | { | |
3033 | if (mtu < 68) { | |
3034 | netdev_err(dev, "cannot change mtu to less than 68\n"); | |
3035 | return -EINVAL; | |
3036 | } | |
3037 | ||
6a20c175 | 3038 | /* 9676 == 9700 - 20 and rounding to 8 */ |
c5aff182 TP |
3039 | if (mtu > 9676) { |
3040 | netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu); | |
3041 | mtu = 9676; | |
3042 | } | |
3043 | ||
3044 | if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { | |
3045 | netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", | |
3046 | mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); | |
3047 | mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); | |
3048 | } | |
3049 | ||
3050 | return mtu; | |
3051 | } | |
3052 | ||
3053 | /* Change the device mtu */ | |
3054 | static int mvneta_change_mtu(struct net_device *dev, int mtu) | |
3055 | { | |
3056 | struct mvneta_port *pp = netdev_priv(dev); | |
3057 | int ret; | |
3058 | ||
3059 | mtu = mvneta_check_mtu_valid(dev, mtu); | |
3060 | if (mtu < 0) | |
3061 | return -EINVAL; | |
3062 | ||
3063 | dev->mtu = mtu; | |
3064 | ||
b65657fc | 3065 | if (!netif_running(dev)) { |
dc35a10f MW |
3066 | if (pp->bm_priv) |
3067 | mvneta_bm_update_mtu(pp, mtu); | |
3068 | ||
b65657fc | 3069 | netdev_update_features(dev); |
c5aff182 | 3070 | return 0; |
b65657fc | 3071 | } |
c5aff182 | 3072 | |
6a20c175 | 3073 | /* The interface is running, so we have to force a |
a92dbd96 | 3074 | * reallocation of the queues |
c5aff182 TP |
3075 | */ |
3076 | mvneta_stop_dev(pp); | |
3077 | ||
3078 | mvneta_cleanup_txqs(pp); | |
3079 | mvneta_cleanup_rxqs(pp); | |
3080 | ||
dc35a10f MW |
3081 | if (pp->bm_priv) |
3082 | mvneta_bm_update_mtu(pp, mtu); | |
3083 | ||
a92dbd96 | 3084 | pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); |
8ec2cd48 | 3085 | pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + |
3086 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
c5aff182 TP |
3087 | |
3088 | ret = mvneta_setup_rxqs(pp); | |
3089 | if (ret) { | |
a92dbd96 | 3090 | netdev_err(dev, "unable to setup rxqs after MTU change\n"); |
c5aff182 TP |
3091 | return ret; |
3092 | } | |
3093 | ||
a92dbd96 EG |
3094 | ret = mvneta_setup_txqs(pp); |
3095 | if (ret) { | |
3096 | netdev_err(dev, "unable to setup txqs after MTU change\n"); | |
3097 | return ret; | |
3098 | } | |
c5aff182 TP |
3099 | |
3100 | mvneta_start_dev(pp); | |
3101 | mvneta_port_up(pp); | |
3102 | ||
b65657fc SG |
3103 | netdev_update_features(dev); |
3104 | ||
c5aff182 TP |
3105 | return 0; |
3106 | } | |
3107 | ||
b65657fc SG |
3108 | static netdev_features_t mvneta_fix_features(struct net_device *dev, |
3109 | netdev_features_t features) | |
3110 | { | |
3111 | struct mvneta_port *pp = netdev_priv(dev); | |
3112 | ||
3113 | if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { | |
3114 | features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); | |
3115 | netdev_info(dev, | |
3116 | "Disable IP checksum for MTU greater than %dB\n", | |
3117 | pp->tx_csum_limit); | |
3118 | } | |
3119 | ||
3120 | return features; | |
3121 | } | |
3122 | ||
8cc3e439 TP |
3123 | /* Get mac address */ |
3124 | static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) | |
3125 | { | |
3126 | u32 mac_addr_l, mac_addr_h; | |
3127 | ||
3128 | mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); | |
3129 | mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); | |
3130 | addr[0] = (mac_addr_h >> 24) & 0xFF; | |
3131 | addr[1] = (mac_addr_h >> 16) & 0xFF; | |
3132 | addr[2] = (mac_addr_h >> 8) & 0xFF; | |
3133 | addr[3] = mac_addr_h & 0xFF; | |
3134 | addr[4] = (mac_addr_l >> 8) & 0xFF; | |
3135 | addr[5] = mac_addr_l & 0xFF; | |
3136 | } | |
3137 | ||
c5aff182 TP |
3138 | /* Handle setting mac address */ |
3139 | static int mvneta_set_mac_addr(struct net_device *dev, void *addr) | |
3140 | { | |
3141 | struct mvneta_port *pp = netdev_priv(dev); | |
e68de360 EG |
3142 | struct sockaddr *sockaddr = addr; |
3143 | int ret; | |
c5aff182 | 3144 | |
e68de360 EG |
3145 | ret = eth_prepare_mac_addr_change(dev, addr); |
3146 | if (ret < 0) | |
3147 | return ret; | |
c5aff182 TP |
3148 | /* Remove previous address table entry */ |
3149 | mvneta_mac_addr_set(pp, dev->dev_addr, -1); | |
3150 | ||
3151 | /* Set new addr in hw */ | |
90b74c01 | 3152 | mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); |
c5aff182 | 3153 | |
e68de360 | 3154 | eth_commit_mac_addr_change(dev, addr); |
c5aff182 TP |
3155 | return 0; |
3156 | } | |
3157 | ||
3158 | static void mvneta_adjust_link(struct net_device *ndev) | |
3159 | { | |
3160 | struct mvneta_port *pp = netdev_priv(ndev); | |
3161 | struct phy_device *phydev = pp->phy_dev; | |
3162 | int status_change = 0; | |
3163 | ||
3164 | if (phydev->link) { | |
3165 | if ((pp->speed != phydev->speed) || | |
3166 | (pp->duplex != phydev->duplex)) { | |
3167 | u32 val; | |
3168 | ||
3169 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); | |
3170 | val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | | |
3171 | MVNETA_GMAC_CONFIG_GMII_SPEED | | |
898b2970 | 3172 | MVNETA_GMAC_CONFIG_FULL_DUPLEX); |
c5aff182 TP |
3173 | |
3174 | if (phydev->duplex) | |
3175 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; | |
3176 | ||
3177 | if (phydev->speed == SPEED_1000) | |
3178 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; | |
4d12bc63 | 3179 | else if (phydev->speed == SPEED_100) |
c5aff182 TP |
3180 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; |
3181 | ||
3182 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | |
3183 | ||
3184 | pp->duplex = phydev->duplex; | |
3185 | pp->speed = phydev->speed; | |
3186 | } | |
3187 | } | |
3188 | ||
3189 | if (phydev->link != pp->link) { | |
3190 | if (!phydev->link) { | |
3191 | pp->duplex = -1; | |
3192 | pp->speed = 0; | |
3193 | } | |
3194 | ||
3195 | pp->link = phydev->link; | |
3196 | status_change = 1; | |
3197 | } | |
3198 | ||
3199 | if (status_change) { | |
3200 | if (phydev->link) { | |
898b2970 SS |
3201 | if (!pp->use_inband_status) { |
3202 | u32 val = mvreg_read(pp, | |
3203 | MVNETA_GMAC_AUTONEG_CONFIG); | |
3204 | val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; | |
3205 | val |= MVNETA_GMAC_FORCE_LINK_PASS; | |
3206 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, | |
3207 | val); | |
3208 | } | |
c5aff182 | 3209 | mvneta_port_up(pp); |
c5aff182 | 3210 | } else { |
898b2970 SS |
3211 | if (!pp->use_inband_status) { |
3212 | u32 val = mvreg_read(pp, | |
3213 | MVNETA_GMAC_AUTONEG_CONFIG); | |
3214 | val &= ~MVNETA_GMAC_FORCE_LINK_PASS; | |
3215 | val |= MVNETA_GMAC_FORCE_LINK_DOWN; | |
3216 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, | |
3217 | val); | |
3218 | } | |
c5aff182 | 3219 | mvneta_port_down(pp); |
c5aff182 | 3220 | } |
0089b745 | 3221 | phy_print_status(phydev); |
c5aff182 TP |
3222 | } |
3223 | } | |
3224 | ||
3225 | static int mvneta_mdio_probe(struct mvneta_port *pp) | |
3226 | { | |
3227 | struct phy_device *phy_dev; | |
3228 | ||
3229 | phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, | |
3230 | pp->phy_interface); | |
3231 | if (!phy_dev) { | |
3232 | netdev_err(pp->dev, "could not find the PHY\n"); | |
3233 | return -ENODEV; | |
3234 | } | |
3235 | ||
3236 | phy_dev->supported &= PHY_GBIT_FEATURES; | |
3237 | phy_dev->advertising = phy_dev->supported; | |
3238 | ||
3239 | pp->phy_dev = phy_dev; | |
3240 | pp->link = 0; | |
3241 | pp->duplex = 0; | |
3242 | pp->speed = 0; | |
3243 | ||
3244 | return 0; | |
3245 | } | |
3246 | ||
3247 | static void mvneta_mdio_remove(struct mvneta_port *pp) | |
3248 | { | |
3249 | phy_disconnect(pp->phy_dev); | |
3250 | pp->phy_dev = NULL; | |
3251 | } | |
3252 | ||
f8642885 MR |
3253 | static void mvneta_percpu_enable(void *arg) |
3254 | { | |
3255 | struct mvneta_port *pp = arg; | |
3256 | ||
3257 | enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); | |
3258 | } | |
3259 | ||
3260 | static void mvneta_percpu_disable(void *arg) | |
3261 | { | |
3262 | struct mvneta_port *pp = arg; | |
3263 | ||
3264 | disable_percpu_irq(pp->dev->irq); | |
3265 | } | |
3266 | ||
120cfa50 GC |
3267 | /* Electing a CPU must be done in an atomic way: it should be done |
3268 | * after or before the removal/insertion of a CPU and this function is | |
3269 | * not reentrant. | |
3270 | */ | |
f8642885 MR |
3271 | static void mvneta_percpu_elect(struct mvneta_port *pp) |
3272 | { | |
cad5d847 GC |
3273 | int elected_cpu = 0, max_cpu, cpu, i = 0; |
3274 | ||
3275 | /* Use the cpu associated to the rxq when it is online, in all | |
3276 | * the other cases, use the cpu 0 which can't be offline. | |
3277 | */ | |
3278 | if (cpu_online(pp->rxq_def)) | |
3279 | elected_cpu = pp->rxq_def; | |
f8642885 | 3280 | |
2dcf75e2 | 3281 | max_cpu = num_present_cpus(); |
f8642885 MR |
3282 | |
3283 | for_each_online_cpu(cpu) { | |
2dcf75e2 GC |
3284 | int rxq_map = 0, txq_map = 0; |
3285 | int rxq; | |
3286 | ||
3287 | for (rxq = 0; rxq < rxq_number; rxq++) | |
3288 | if ((rxq % max_cpu) == cpu) | |
3289 | rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); | |
3290 | ||
cad5d847 | 3291 | if (cpu == elected_cpu) |
50bf8cb6 GC |
3292 | /* Map the default receive queue queue to the |
3293 | * elected CPU | |
f8642885 | 3294 | */ |
2dcf75e2 | 3295 | rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); |
50bf8cb6 GC |
3296 | |
3297 | /* We update the TX queue map only if we have one | |
3298 | * queue. In this case we associate the TX queue to | |
3299 | * the CPU bound to the default RX queue | |
3300 | */ | |
3301 | if (txq_number == 1) | |
cad5d847 | 3302 | txq_map = (cpu == elected_cpu) ? |
50bf8cb6 GC |
3303 | MVNETA_CPU_TXQ_ACCESS(1) : 0; |
3304 | else | |
3305 | txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & | |
3306 | MVNETA_CPU_TXQ_ACCESS_ALL_MASK; | |
3307 | ||
2dcf75e2 GC |
3308 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); |
3309 | ||
3310 | /* Update the interrupt mask on each CPU according the | |
3311 | * new mapping | |
3312 | */ | |
3313 | smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, | |
3314 | pp, true); | |
f8642885 | 3315 | i++; |
2dcf75e2 | 3316 | |
f8642885 MR |
3317 | } |
3318 | }; | |
3319 | ||
3320 | static int mvneta_percpu_notifier(struct notifier_block *nfb, | |
3321 | unsigned long action, void *hcpu) | |
3322 | { | |
3323 | struct mvneta_port *pp = container_of(nfb, struct mvneta_port, | |
3324 | cpu_notifier); | |
3325 | int cpu = (unsigned long)hcpu, other_cpu; | |
3326 | struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); | |
3327 | ||
3328 | switch (action) { | |
3329 | case CPU_ONLINE: | |
3330 | case CPU_ONLINE_FROZEN: | |
0df83e7a AMG |
3331 | case CPU_DOWN_FAILED: |
3332 | case CPU_DOWN_FAILED_FROZEN: | |
120cfa50 GC |
3333 | spin_lock(&pp->lock); |
3334 | /* Configuring the driver for a new CPU while the | |
3335 | * driver is stopping is racy, so just avoid it. | |
3336 | */ | |
3337 | if (pp->is_stopped) { | |
3338 | spin_unlock(&pp->lock); | |
3339 | break; | |
3340 | } | |
f8642885 MR |
3341 | netif_tx_stop_all_queues(pp->dev); |
3342 | ||
3343 | /* We have to synchronise on tha napi of each CPU | |
3344 | * except the one just being waked up | |
3345 | */ | |
3346 | for_each_online_cpu(other_cpu) { | |
3347 | if (other_cpu != cpu) { | |
3348 | struct mvneta_pcpu_port *other_port = | |
3349 | per_cpu_ptr(pp->ports, other_cpu); | |
3350 | ||
3351 | napi_synchronize(&other_port->napi); | |
3352 | } | |
3353 | } | |
3354 | ||
3355 | /* Mask all ethernet port interrupts */ | |
db488c10 | 3356 | on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); |
f8642885 MR |
3357 | napi_enable(&port->napi); |
3358 | ||
2dcf75e2 GC |
3359 | |
3360 | /* Enable per-CPU interrupts on the CPU that is | |
3361 | * brought up. | |
3362 | */ | |
3363 | smp_call_function_single(cpu, mvneta_percpu_enable, | |
3364 | pp, true); | |
3365 | ||
f8642885 MR |
3366 | /* Enable per-CPU interrupt on the one CPU we care |
3367 | * about. | |
3368 | */ | |
3369 | mvneta_percpu_elect(pp); | |
3370 | ||
db488c10 GC |
3371 | /* Unmask all ethernet port interrupts */ |
3372 | on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); | |
f8642885 MR |
3373 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, |
3374 | MVNETA_CAUSE_PHY_STATUS_CHANGE | | |
3375 | MVNETA_CAUSE_LINK_CHANGE | | |
3376 | MVNETA_CAUSE_PSC_SYNC_CHANGE); | |
3377 | netif_tx_start_all_queues(pp->dev); | |
120cfa50 | 3378 | spin_unlock(&pp->lock); |
f8642885 MR |
3379 | break; |
3380 | case CPU_DOWN_PREPARE: | |
3381 | case CPU_DOWN_PREPARE_FROZEN: | |
3382 | netif_tx_stop_all_queues(pp->dev); | |
5888511e GC |
3383 | /* Thanks to this lock we are sure that any pending |
3384 | * cpu election is done | |
3385 | */ | |
3386 | spin_lock(&pp->lock); | |
f8642885 | 3387 | /* Mask all ethernet port interrupts */ |
db488c10 | 3388 | on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); |
5888511e | 3389 | spin_unlock(&pp->lock); |
f8642885 MR |
3390 | |
3391 | napi_synchronize(&port->napi); | |
3392 | napi_disable(&port->napi); | |
3393 | /* Disable per-CPU interrupts on the CPU that is | |
3394 | * brought down. | |
3395 | */ | |
3396 | smp_call_function_single(cpu, mvneta_percpu_disable, | |
3397 | pp, true); | |
3398 | ||
3399 | break; | |
3400 | case CPU_DEAD: | |
3401 | case CPU_DEAD_FROZEN: | |
3402 | /* Check if a new CPU must be elected now this on is down */ | |
120cfa50 | 3403 | spin_lock(&pp->lock); |
f8642885 | 3404 | mvneta_percpu_elect(pp); |
120cfa50 | 3405 | spin_unlock(&pp->lock); |
f8642885 | 3406 | /* Unmask all ethernet port interrupts */ |
db488c10 | 3407 | on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); |
f8642885 MR |
3408 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, |
3409 | MVNETA_CAUSE_PHY_STATUS_CHANGE | | |
3410 | MVNETA_CAUSE_LINK_CHANGE | | |
3411 | MVNETA_CAUSE_PSC_SYNC_CHANGE); | |
3412 | netif_tx_start_all_queues(pp->dev); | |
3413 | break; | |
3414 | } | |
3415 | ||
3416 | return NOTIFY_OK; | |
3417 | } | |
3418 | ||
c5aff182 TP |
3419 | static int mvneta_open(struct net_device *dev) |
3420 | { | |
3421 | struct mvneta_port *pp = netdev_priv(dev); | |
6b125d63 | 3422 | int ret; |
c5aff182 | 3423 | |
c5aff182 | 3424 | pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); |
8ec2cd48 | 3425 | pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + |
3426 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
c5aff182 TP |
3427 | |
3428 | ret = mvneta_setup_rxqs(pp); | |
3429 | if (ret) | |
3430 | return ret; | |
3431 | ||
3432 | ret = mvneta_setup_txqs(pp); | |
3433 | if (ret) | |
3434 | goto err_cleanup_rxqs; | |
3435 | ||
3436 | /* Connect to port interrupt line */ | |
12bb03b4 MR |
3437 | ret = request_percpu_irq(pp->dev->irq, mvneta_isr, |
3438 | MVNETA_DRIVER_NAME, pp->ports); | |
c5aff182 TP |
3439 | if (ret) { |
3440 | netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); | |
3441 | goto err_cleanup_txqs; | |
3442 | } | |
3443 | ||
2dcf75e2 GC |
3444 | /* Enable per-CPU interrupt on all the CPU to handle our RX |
3445 | * queue interrupts | |
3446 | */ | |
6b125d63 | 3447 | on_each_cpu(mvneta_percpu_enable, pp, true); |
2dcf75e2 | 3448 | |
120cfa50 | 3449 | pp->is_stopped = false; |
f8642885 MR |
3450 | /* Register a CPU notifier to handle the case where our CPU |
3451 | * might be taken offline. | |
3452 | */ | |
3453 | register_cpu_notifier(&pp->cpu_notifier); | |
3454 | ||
c5aff182 TP |
3455 | /* In default link is down */ |
3456 | netif_carrier_off(pp->dev); | |
3457 | ||
3458 | ret = mvneta_mdio_probe(pp); | |
3459 | if (ret < 0) { | |
3460 | netdev_err(dev, "cannot probe MDIO bus\n"); | |
3461 | goto err_free_irq; | |
3462 | } | |
3463 | ||
3464 | mvneta_start_dev(pp); | |
3465 | ||
3466 | return 0; | |
3467 | ||
3468 | err_free_irq: | |
12bb03b4 | 3469 | free_percpu_irq(pp->dev->irq, pp->ports); |
c5aff182 TP |
3470 | err_cleanup_txqs: |
3471 | mvneta_cleanup_txqs(pp); | |
3472 | err_cleanup_rxqs: | |
3473 | mvneta_cleanup_rxqs(pp); | |
3474 | return ret; | |
3475 | } | |
3476 | ||
3477 | /* Stop the port, free port interrupt line */ | |
3478 | static int mvneta_stop(struct net_device *dev) | |
3479 | { | |
3480 | struct mvneta_port *pp = netdev_priv(dev); | |
3481 | ||
120cfa50 | 3482 | /* Inform that we are stopping so we don't want to setup the |
1c2722a9 GC |
3483 | * driver for new CPUs in the notifiers. The code of the |
3484 | * notifier for CPU online is protected by the same spinlock, | |
3485 | * so when we get the lock, the notifer work is done. | |
120cfa50 GC |
3486 | */ |
3487 | spin_lock(&pp->lock); | |
3488 | pp->is_stopped = true; | |
1c2722a9 GC |
3489 | spin_unlock(&pp->lock); |
3490 | ||
c5aff182 TP |
3491 | mvneta_stop_dev(pp); |
3492 | mvneta_mdio_remove(pp); | |
f8642885 | 3493 | unregister_cpu_notifier(&pp->cpu_notifier); |
129219e4 | 3494 | on_each_cpu(mvneta_percpu_disable, pp, true); |
12bb03b4 | 3495 | free_percpu_irq(dev->irq, pp->ports); |
c5aff182 TP |
3496 | mvneta_cleanup_rxqs(pp); |
3497 | mvneta_cleanup_txqs(pp); | |
c5aff182 TP |
3498 | |
3499 | return 0; | |
3500 | } | |
3501 | ||
15f59456 TP |
3502 | static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
3503 | { | |
3504 | struct mvneta_port *pp = netdev_priv(dev); | |
15f59456 TP |
3505 | |
3506 | if (!pp->phy_dev) | |
3507 | return -ENOTSUPP; | |
3508 | ||
ecf7b361 | 3509 | return phy_mii_ioctl(pp->phy_dev, ifr, cmd); |
15f59456 TP |
3510 | } |
3511 | ||
c5aff182 TP |
3512 | /* Ethtool methods */ |
3513 | ||
3514 | /* Get settings (phy address, speed) for ethtools */ | |
3515 | int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
3516 | { | |
3517 | struct mvneta_port *pp = netdev_priv(dev); | |
3518 | ||
3519 | if (!pp->phy_dev) | |
3520 | return -ENODEV; | |
3521 | ||
3522 | return phy_ethtool_gset(pp->phy_dev, cmd); | |
3523 | } | |
3524 | ||
3525 | /* Set settings (phy address, speed) for ethtools */ | |
3526 | int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
3527 | { | |
3528 | struct mvneta_port *pp = netdev_priv(dev); | |
0c0744fc | 3529 | struct phy_device *phydev = pp->phy_dev; |
c5aff182 | 3530 | |
0c0744fc | 3531 | if (!phydev) |
c5aff182 TP |
3532 | return -ENODEV; |
3533 | ||
0c0744fc SS |
3534 | if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) { |
3535 | u32 val; | |
3536 | ||
3537 | mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE); | |
3538 | ||
3539 | if (cmd->autoneg == AUTONEG_DISABLE) { | |
3540 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); | |
3541 | val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | | |
3542 | MVNETA_GMAC_CONFIG_GMII_SPEED | | |
3543 | MVNETA_GMAC_CONFIG_FULL_DUPLEX); | |
3544 | ||
3545 | if (phydev->duplex) | |
3546 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; | |
3547 | ||
3548 | if (phydev->speed == SPEED_1000) | |
3549 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; | |
3550 | else if (phydev->speed == SPEED_100) | |
3551 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; | |
3552 | ||
3553 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); | |
3554 | } | |
3555 | ||
3556 | pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE); | |
3557 | netdev_info(pp->dev, "autoneg status set to %i\n", | |
3558 | pp->use_inband_status); | |
3559 | ||
3560 | if (netif_running(dev)) { | |
3561 | mvneta_port_down(pp); | |
3562 | mvneta_port_up(pp); | |
3563 | } | |
3564 | } | |
3565 | ||
c5aff182 TP |
3566 | return phy_ethtool_sset(pp->phy_dev, cmd); |
3567 | } | |
3568 | ||
3569 | /* Set interrupt coalescing for ethtools */ | |
3570 | static int mvneta_ethtool_set_coalesce(struct net_device *dev, | |
3571 | struct ethtool_coalesce *c) | |
3572 | { | |
3573 | struct mvneta_port *pp = netdev_priv(dev); | |
3574 | int queue; | |
3575 | ||
3576 | for (queue = 0; queue < rxq_number; queue++) { | |
3577 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; | |
3578 | rxq->time_coal = c->rx_coalesce_usecs; | |
3579 | rxq->pkts_coal = c->rx_max_coalesced_frames; | |
3580 | mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); | |
3581 | mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); | |
3582 | } | |
3583 | ||
3584 | for (queue = 0; queue < txq_number; queue++) { | |
3585 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; | |
3586 | txq->done_pkts_coal = c->tx_max_coalesced_frames; | |
3587 | mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); | |
3588 | } | |
3589 | ||
3590 | return 0; | |
3591 | } | |
3592 | ||
3593 | /* get coalescing for ethtools */ | |
3594 | static int mvneta_ethtool_get_coalesce(struct net_device *dev, | |
3595 | struct ethtool_coalesce *c) | |
3596 | { | |
3597 | struct mvneta_port *pp = netdev_priv(dev); | |
3598 | ||
3599 | c->rx_coalesce_usecs = pp->rxqs[0].time_coal; | |
3600 | c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; | |
3601 | ||
3602 | c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; | |
3603 | return 0; | |
3604 | } | |
3605 | ||
3606 | ||
3607 | static void mvneta_ethtool_get_drvinfo(struct net_device *dev, | |
3608 | struct ethtool_drvinfo *drvinfo) | |
3609 | { | |
3610 | strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, | |
3611 | sizeof(drvinfo->driver)); | |
3612 | strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, | |
3613 | sizeof(drvinfo->version)); | |
3614 | strlcpy(drvinfo->bus_info, dev_name(&dev->dev), | |
3615 | sizeof(drvinfo->bus_info)); | |
3616 | } | |
3617 | ||
3618 | ||
3619 | static void mvneta_ethtool_get_ringparam(struct net_device *netdev, | |
3620 | struct ethtool_ringparam *ring) | |
3621 | { | |
3622 | struct mvneta_port *pp = netdev_priv(netdev); | |
3623 | ||
3624 | ring->rx_max_pending = MVNETA_MAX_RXD; | |
3625 | ring->tx_max_pending = MVNETA_MAX_TXD; | |
3626 | ring->rx_pending = pp->rx_ring_size; | |
3627 | ring->tx_pending = pp->tx_ring_size; | |
3628 | } | |
3629 | ||
3630 | static int mvneta_ethtool_set_ringparam(struct net_device *dev, | |
3631 | struct ethtool_ringparam *ring) | |
3632 | { | |
3633 | struct mvneta_port *pp = netdev_priv(dev); | |
3634 | ||
3635 | if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) | |
3636 | return -EINVAL; | |
3637 | pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? | |
3638 | ring->rx_pending : MVNETA_MAX_RXD; | |
8eef5f97 EG |
3639 | |
3640 | pp->tx_ring_size = clamp_t(u16, ring->tx_pending, | |
3641 | MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); | |
3642 | if (pp->tx_ring_size != ring->tx_pending) | |
3643 | netdev_warn(dev, "TX queue size set to %u (requested %u)\n", | |
3644 | pp->tx_ring_size, ring->tx_pending); | |
c5aff182 TP |
3645 | |
3646 | if (netif_running(dev)) { | |
3647 | mvneta_stop(dev); | |
3648 | if (mvneta_open(dev)) { | |
3649 | netdev_err(dev, | |
3650 | "error on opening device after ring param change\n"); | |
3651 | return -ENOMEM; | |
3652 | } | |
3653 | } | |
3654 | ||
3655 | return 0; | |
3656 | } | |
3657 | ||
9b0cdefa RK |
3658 | static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, |
3659 | u8 *data) | |
3660 | { | |
3661 | if (sset == ETH_SS_STATS) { | |
3662 | int i; | |
3663 | ||
3664 | for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) | |
3665 | memcpy(data + i * ETH_GSTRING_LEN, | |
3666 | mvneta_statistics[i].name, ETH_GSTRING_LEN); | |
3667 | } | |
3668 | } | |
3669 | ||
3670 | static void mvneta_ethtool_update_stats(struct mvneta_port *pp) | |
3671 | { | |
3672 | const struct mvneta_statistic *s; | |
3673 | void __iomem *base = pp->base; | |
3674 | u32 high, low, val; | |
2c832293 | 3675 | u64 val64; |
9b0cdefa RK |
3676 | int i; |
3677 | ||
3678 | for (i = 0, s = mvneta_statistics; | |
3679 | s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); | |
3680 | s++, i++) { | |
9b0cdefa RK |
3681 | switch (s->type) { |
3682 | case T_REG_32: | |
3683 | val = readl_relaxed(base + s->offset); | |
2c832293 | 3684 | pp->ethtool_stats[i] += val; |
9b0cdefa RK |
3685 | break; |
3686 | case T_REG_64: | |
3687 | /* Docs say to read low 32-bit then high */ | |
3688 | low = readl_relaxed(base + s->offset); | |
3689 | high = readl_relaxed(base + s->offset + 4); | |
2c832293 JZ |
3690 | val64 = (u64)high << 32 | low; |
3691 | pp->ethtool_stats[i] += val64; | |
9b0cdefa RK |
3692 | break; |
3693 | } | |
9b0cdefa RK |
3694 | } |
3695 | } | |
3696 | ||
3697 | static void mvneta_ethtool_get_stats(struct net_device *dev, | |
3698 | struct ethtool_stats *stats, u64 *data) | |
3699 | { | |
3700 | struct mvneta_port *pp = netdev_priv(dev); | |
3701 | int i; | |
3702 | ||
3703 | mvneta_ethtool_update_stats(pp); | |
3704 | ||
3705 | for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) | |
3706 | *data++ = pp->ethtool_stats[i]; | |
3707 | } | |
3708 | ||
3709 | static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) | |
3710 | { | |
3711 | if (sset == ETH_SS_STATS) | |
3712 | return ARRAY_SIZE(mvneta_statistics); | |
3713 | return -EOPNOTSUPP; | |
3714 | } | |
3715 | ||
9a401dea GC |
3716 | static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) |
3717 | { | |
3718 | return MVNETA_RSS_LU_TABLE_SIZE; | |
3719 | } | |
3720 | ||
3721 | static int mvneta_ethtool_get_rxnfc(struct net_device *dev, | |
3722 | struct ethtool_rxnfc *info, | |
3723 | u32 *rules __always_unused) | |
3724 | { | |
3725 | switch (info->cmd) { | |
3726 | case ETHTOOL_GRXRINGS: | |
3727 | info->data = rxq_number; | |
3728 | return 0; | |
3729 | case ETHTOOL_GRXFH: | |
3730 | return -EOPNOTSUPP; | |
3731 | default: | |
3732 | return -EOPNOTSUPP; | |
3733 | } | |
3734 | } | |
3735 | ||
3736 | static int mvneta_config_rss(struct mvneta_port *pp) | |
3737 | { | |
3738 | int cpu; | |
3739 | u32 val; | |
3740 | ||
3741 | netif_tx_stop_all_queues(pp->dev); | |
3742 | ||
6b125d63 | 3743 | on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); |
9a401dea GC |
3744 | |
3745 | /* We have to synchronise on the napi of each CPU */ | |
3746 | for_each_online_cpu(cpu) { | |
3747 | struct mvneta_pcpu_port *pcpu_port = | |
3748 | per_cpu_ptr(pp->ports, cpu); | |
3749 | ||
3750 | napi_synchronize(&pcpu_port->napi); | |
3751 | napi_disable(&pcpu_port->napi); | |
3752 | } | |
3753 | ||
3754 | pp->rxq_def = pp->indir[0]; | |
3755 | ||
3756 | /* Update unicast mapping */ | |
3757 | mvneta_set_rx_mode(pp->dev); | |
3758 | ||
3759 | /* Update val of portCfg register accordingly with all RxQueue types */ | |
3760 | val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); | |
3761 | mvreg_write(pp, MVNETA_PORT_CONFIG, val); | |
3762 | ||
3763 | /* Update the elected CPU matching the new rxq_def */ | |
120cfa50 | 3764 | spin_lock(&pp->lock); |
9a401dea | 3765 | mvneta_percpu_elect(pp); |
120cfa50 | 3766 | spin_unlock(&pp->lock); |
9a401dea GC |
3767 | |
3768 | /* We have to synchronise on the napi of each CPU */ | |
3769 | for_each_online_cpu(cpu) { | |
3770 | struct mvneta_pcpu_port *pcpu_port = | |
3771 | per_cpu_ptr(pp->ports, cpu); | |
3772 | ||
3773 | napi_enable(&pcpu_port->napi); | |
3774 | } | |
3775 | ||
3776 | netif_tx_start_all_queues(pp->dev); | |
3777 | ||
3778 | return 0; | |
3779 | } | |
3780 | ||
3781 | static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, | |
3782 | const u8 *key, const u8 hfunc) | |
3783 | { | |
3784 | struct mvneta_port *pp = netdev_priv(dev); | |
3785 | /* We require at least one supported parameter to be changed | |
3786 | * and no change in any of the unsupported parameters | |
3787 | */ | |
3788 | if (key || | |
3789 | (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) | |
3790 | return -EOPNOTSUPP; | |
3791 | ||
3792 | if (!indir) | |
3793 | return 0; | |
3794 | ||
3795 | memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); | |
3796 | ||
3797 | return mvneta_config_rss(pp); | |
3798 | } | |
3799 | ||
3800 | static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, | |
3801 | u8 *hfunc) | |
3802 | { | |
3803 | struct mvneta_port *pp = netdev_priv(dev); | |
3804 | ||
3805 | if (hfunc) | |
3806 | *hfunc = ETH_RSS_HASH_TOP; | |
3807 | ||
3808 | if (!indir) | |
3809 | return 0; | |
3810 | ||
3811 | memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); | |
3812 | ||
3813 | return 0; | |
3814 | } | |
3815 | ||
c5aff182 TP |
3816 | static const struct net_device_ops mvneta_netdev_ops = { |
3817 | .ndo_open = mvneta_open, | |
3818 | .ndo_stop = mvneta_stop, | |
3819 | .ndo_start_xmit = mvneta_tx, | |
3820 | .ndo_set_rx_mode = mvneta_set_rx_mode, | |
3821 | .ndo_set_mac_address = mvneta_set_mac_addr, | |
3822 | .ndo_change_mtu = mvneta_change_mtu, | |
b65657fc | 3823 | .ndo_fix_features = mvneta_fix_features, |
c5aff182 | 3824 | .ndo_get_stats64 = mvneta_get_stats64, |
15f59456 | 3825 | .ndo_do_ioctl = mvneta_ioctl, |
c5aff182 TP |
3826 | }; |
3827 | ||
3828 | const struct ethtool_ops mvneta_eth_tool_ops = { | |
3829 | .get_link = ethtool_op_get_link, | |
3830 | .get_settings = mvneta_ethtool_get_settings, | |
3831 | .set_settings = mvneta_ethtool_set_settings, | |
3832 | .set_coalesce = mvneta_ethtool_set_coalesce, | |
3833 | .get_coalesce = mvneta_ethtool_get_coalesce, | |
3834 | .get_drvinfo = mvneta_ethtool_get_drvinfo, | |
3835 | .get_ringparam = mvneta_ethtool_get_ringparam, | |
3836 | .set_ringparam = mvneta_ethtool_set_ringparam, | |
9b0cdefa RK |
3837 | .get_strings = mvneta_ethtool_get_strings, |
3838 | .get_ethtool_stats = mvneta_ethtool_get_stats, | |
3839 | .get_sset_count = mvneta_ethtool_get_sset_count, | |
9a401dea GC |
3840 | .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, |
3841 | .get_rxnfc = mvneta_ethtool_get_rxnfc, | |
3842 | .get_rxfh = mvneta_ethtool_get_rxfh, | |
3843 | .set_rxfh = mvneta_ethtool_set_rxfh, | |
c5aff182 TP |
3844 | }; |
3845 | ||
3846 | /* Initialize hw */ | |
9672850b | 3847 | static int mvneta_init(struct device *dev, struct mvneta_port *pp) |
c5aff182 TP |
3848 | { |
3849 | int queue; | |
3850 | ||
3851 | /* Disable port */ | |
3852 | mvneta_port_disable(pp); | |
3853 | ||
3854 | /* Set port default values */ | |
3855 | mvneta_defaults_set(pp); | |
3856 | ||
9672850b EG |
3857 | pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue), |
3858 | GFP_KERNEL); | |
c5aff182 TP |
3859 | if (!pp->txqs) |
3860 | return -ENOMEM; | |
3861 | ||
3862 | /* Initialize TX descriptor rings */ | |
3863 | for (queue = 0; queue < txq_number; queue++) { | |
3864 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; | |
3865 | txq->id = queue; | |
3866 | txq->size = pp->tx_ring_size; | |
3867 | txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; | |
3868 | } | |
3869 | ||
9672850b EG |
3870 | pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue), |
3871 | GFP_KERNEL); | |
3872 | if (!pp->rxqs) | |
c5aff182 | 3873 | return -ENOMEM; |
c5aff182 TP |
3874 | |
3875 | /* Create Rx descriptor rings */ | |
3876 | for (queue = 0; queue < rxq_number; queue++) { | |
3877 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; | |
3878 | rxq->id = queue; | |
3879 | rxq->size = pp->rx_ring_size; | |
3880 | rxq->pkts_coal = MVNETA_RX_COAL_PKTS; | |
3881 | rxq->time_coal = MVNETA_RX_COAL_USEC; | |
3882 | } | |
3883 | ||
3884 | return 0; | |
3885 | } | |
3886 | ||
c5aff182 | 3887 | /* platform glue : initialize decoding windows */ |
03ce758e GK |
3888 | static void mvneta_conf_mbus_windows(struct mvneta_port *pp, |
3889 | const struct mbus_dram_target_info *dram) | |
c5aff182 TP |
3890 | { |
3891 | u32 win_enable; | |
3892 | u32 win_protect; | |
3893 | int i; | |
3894 | ||
3895 | for (i = 0; i < 6; i++) { | |
3896 | mvreg_write(pp, MVNETA_WIN_BASE(i), 0); | |
3897 | mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); | |
3898 | ||
3899 | if (i < 4) | |
3900 | mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); | |
3901 | } | |
3902 | ||
3903 | win_enable = 0x3f; | |
3904 | win_protect = 0; | |
3905 | ||
3906 | for (i = 0; i < dram->num_cs; i++) { | |
3907 | const struct mbus_dram_window *cs = dram->cs + i; | |
3908 | mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | | |
3909 | (cs->mbus_attr << 8) | dram->mbus_dram_target_id); | |
3910 | ||
3911 | mvreg_write(pp, MVNETA_WIN_SIZE(i), | |
3912 | (cs->size - 1) & 0xffff0000); | |
3913 | ||
3914 | win_enable &= ~(1 << i); | |
3915 | win_protect |= 3 << (2 * i); | |
3916 | } | |
3917 | ||
3918 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); | |
db6ba9a5 | 3919 | mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); |
c5aff182 TP |
3920 | } |
3921 | ||
3922 | /* Power up the port */ | |
3f1dd4bc | 3923 | static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) |
c5aff182 | 3924 | { |
3f1dd4bc | 3925 | u32 ctrl; |
c5aff182 TP |
3926 | |
3927 | /* MAC Cause register should be cleared */ | |
3928 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); | |
3929 | ||
3f1dd4bc | 3930 | ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); |
c5aff182 | 3931 | |
3f1dd4bc TP |
3932 | /* Even though it might look weird, when we're configured in |
3933 | * SGMII or QSGMII mode, the RGMII bit needs to be set. | |
3934 | */ | |
3935 | switch(phy_mode) { | |
3936 | case PHY_INTERFACE_MODE_QSGMII: | |
3937 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); | |
3938 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; | |
3939 | break; | |
3940 | case PHY_INTERFACE_MODE_SGMII: | |
3941 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); | |
3942 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; | |
3943 | break; | |
3944 | case PHY_INTERFACE_MODE_RGMII: | |
3945 | case PHY_INTERFACE_MODE_RGMII_ID: | |
3946 | ctrl |= MVNETA_GMAC2_PORT_RGMII; | |
3947 | break; | |
3948 | default: | |
3949 | return -EINVAL; | |
3950 | } | |
c5aff182 TP |
3951 | |
3952 | /* Cancel Port Reset */ | |
3f1dd4bc TP |
3953 | ctrl &= ~MVNETA_GMAC2_PORT_RESET; |
3954 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); | |
c5aff182 TP |
3955 | |
3956 | while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & | |
3957 | MVNETA_GMAC2_PORT_RESET) != 0) | |
3958 | continue; | |
3f1dd4bc TP |
3959 | |
3960 | return 0; | |
c5aff182 TP |
3961 | } |
3962 | ||
3963 | /* Device initialization routine */ | |
03ce758e | 3964 | static int mvneta_probe(struct platform_device *pdev) |
c5aff182 TP |
3965 | { |
3966 | const struct mbus_dram_target_info *dram_target_info; | |
c3f0dd38 | 3967 | struct resource *res; |
c5aff182 TP |
3968 | struct device_node *dn = pdev->dev.of_node; |
3969 | struct device_node *phy_node; | |
dc35a10f | 3970 | struct device_node *bm_node; |
c5aff182 TP |
3971 | struct mvneta_port *pp; |
3972 | struct net_device *dev; | |
8cc3e439 TP |
3973 | const char *dt_mac_addr; |
3974 | char hw_mac_addr[ETH_ALEN]; | |
3975 | const char *mac_from; | |
f8af8e6e | 3976 | const char *managed; |
9110ee07 | 3977 | int tx_csum_limit; |
c5aff182 TP |
3978 | int phy_mode; |
3979 | int err; | |
12bb03b4 | 3980 | int cpu; |
c5aff182 | 3981 | |
ee40a116 | 3982 | dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); |
c5aff182 TP |
3983 | if (!dev) |
3984 | return -ENOMEM; | |
3985 | ||
3986 | dev->irq = irq_of_parse_and_map(dn, 0); | |
3987 | if (dev->irq == 0) { | |
3988 | err = -EINVAL; | |
3989 | goto err_free_netdev; | |
3990 | } | |
3991 | ||
3992 | phy_node = of_parse_phandle(dn, "phy", 0); | |
3993 | if (!phy_node) { | |
83895bed TP |
3994 | if (!of_phy_is_fixed_link(dn)) { |
3995 | dev_err(&pdev->dev, "no PHY specified\n"); | |
3996 | err = -ENODEV; | |
3997 | goto err_free_irq; | |
3998 | } | |
3999 | ||
4000 | err = of_phy_register_fixed_link(dn); | |
4001 | if (err < 0) { | |
4002 | dev_err(&pdev->dev, "cannot register fixed PHY\n"); | |
4003 | goto err_free_irq; | |
4004 | } | |
4005 | ||
4006 | /* In the case of a fixed PHY, the DT node associated | |
4007 | * to the PHY is the Ethernet MAC DT node. | |
4008 | */ | |
c891c24c | 4009 | phy_node = of_node_get(dn); |
c5aff182 TP |
4010 | } |
4011 | ||
4012 | phy_mode = of_get_phy_mode(dn); | |
4013 | if (phy_mode < 0) { | |
4014 | dev_err(&pdev->dev, "incorrect phy-mode\n"); | |
4015 | err = -EINVAL; | |
c891c24c | 4016 | goto err_put_phy_node; |
c5aff182 TP |
4017 | } |
4018 | ||
c5aff182 TP |
4019 | dev->tx_queue_len = MVNETA_MAX_TXD; |
4020 | dev->watchdog_timeo = 5 * HZ; | |
4021 | dev->netdev_ops = &mvneta_netdev_ops; | |
4022 | ||
7ad24ea4 | 4023 | dev->ethtool_ops = &mvneta_eth_tool_ops; |
c5aff182 TP |
4024 | |
4025 | pp = netdev_priv(dev); | |
1c2722a9 | 4026 | spin_lock_init(&pp->lock); |
c5aff182 TP |
4027 | pp->phy_node = phy_node; |
4028 | pp->phy_interface = phy_mode; | |
f8af8e6e SS |
4029 | |
4030 | err = of_property_read_string(dn, "managed", &managed); | |
4031 | pp->use_inband_status = (err == 0 && | |
4032 | strcmp(managed, "in-band-status") == 0); | |
f8642885 | 4033 | pp->cpu_notifier.notifier_call = mvneta_percpu_notifier; |
c5aff182 | 4034 | |
90b74c01 GC |
4035 | pp->rxq_def = rxq_def; |
4036 | ||
9a401dea GC |
4037 | pp->indir[0] = rxq_def; |
4038 | ||
2804ba4e JZ |
4039 | pp->clk = devm_clk_get(&pdev->dev, "core"); |
4040 | if (IS_ERR(pp->clk)) | |
4041 | pp->clk = devm_clk_get(&pdev->dev, NULL); | |
189dd626 TP |
4042 | if (IS_ERR(pp->clk)) { |
4043 | err = PTR_ERR(pp->clk); | |
c891c24c | 4044 | goto err_put_phy_node; |
189dd626 TP |
4045 | } |
4046 | ||
4047 | clk_prepare_enable(pp->clk); | |
4048 | ||
15cc4a4a JZ |
4049 | pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); |
4050 | if (!IS_ERR(pp->clk_bus)) | |
4051 | clk_prepare_enable(pp->clk_bus); | |
4052 | ||
c3f0dd38 TP |
4053 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
4054 | pp->base = devm_ioremap_resource(&pdev->dev, res); | |
4055 | if (IS_ERR(pp->base)) { | |
4056 | err = PTR_ERR(pp->base); | |
5445eaf3 APR |
4057 | goto err_clk; |
4058 | } | |
4059 | ||
12bb03b4 MR |
4060 | /* Alloc per-cpu port structure */ |
4061 | pp->ports = alloc_percpu(struct mvneta_pcpu_port); | |
4062 | if (!pp->ports) { | |
4063 | err = -ENOMEM; | |
4064 | goto err_clk; | |
4065 | } | |
4066 | ||
74c41b04 | 4067 | /* Alloc per-cpu stats */ |
1c213bd2 | 4068 | pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); |
74c41b04 | 4069 | if (!pp->stats) { |
4070 | err = -ENOMEM; | |
12bb03b4 | 4071 | goto err_free_ports; |
74c41b04 | 4072 | } |
4073 | ||
8cc3e439 | 4074 | dt_mac_addr = of_get_mac_address(dn); |
6c7a9a3c | 4075 | if (dt_mac_addr) { |
8cc3e439 TP |
4076 | mac_from = "device tree"; |
4077 | memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN); | |
4078 | } else { | |
4079 | mvneta_get_mac_addr(pp, hw_mac_addr); | |
4080 | if (is_valid_ether_addr(hw_mac_addr)) { | |
4081 | mac_from = "hardware"; | |
4082 | memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); | |
4083 | } else { | |
4084 | mac_from = "random"; | |
4085 | eth_hw_addr_random(dev); | |
4086 | } | |
4087 | } | |
4088 | ||
9110ee07 MW |
4089 | if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { |
4090 | if (tx_csum_limit < 0 || | |
4091 | tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { | |
4092 | tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; | |
4093 | dev_info(&pdev->dev, | |
4094 | "Wrong TX csum limit in DT, set to %dB\n", | |
4095 | MVNETA_TX_CSUM_DEF_SIZE); | |
4096 | } | |
4097 | } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { | |
4098 | tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; | |
4099 | } else { | |
4100 | tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; | |
4101 | } | |
4102 | ||
4103 | pp->tx_csum_limit = tx_csum_limit; | |
b65657fc | 4104 | |
dc35a10f MW |
4105 | dram_target_info = mv_mbus_dram_info(); |
4106 | if (dram_target_info) | |
4107 | mvneta_conf_mbus_windows(pp, dram_target_info); | |
4108 | ||
c5aff182 TP |
4109 | pp->tx_ring_size = MVNETA_MAX_TXD; |
4110 | pp->rx_ring_size = MVNETA_MAX_RXD; | |
4111 | ||
4112 | pp->dev = dev; | |
4113 | SET_NETDEV_DEV(dev, &pdev->dev); | |
4114 | ||
dc35a10f MW |
4115 | pp->id = global_port_id++; |
4116 | ||
4117 | /* Obtain access to BM resources if enabled and already initialized */ | |
4118 | bm_node = of_parse_phandle(dn, "buffer-manager", 0); | |
4119 | if (bm_node && bm_node->data) { | |
4120 | pp->bm_priv = bm_node->data; | |
4121 | err = mvneta_bm_port_init(pdev, pp); | |
4122 | if (err < 0) { | |
4123 | dev_info(&pdev->dev, "use SW buffer management\n"); | |
4124 | pp->bm_priv = NULL; | |
4125 | } | |
4126 | } | |
4127 | ||
9672850b EG |
4128 | err = mvneta_init(&pdev->dev, pp); |
4129 | if (err < 0) | |
dc35a10f | 4130 | goto err_netdev; |
3f1dd4bc TP |
4131 | |
4132 | err = mvneta_port_power_up(pp, phy_mode); | |
4133 | if (err < 0) { | |
4134 | dev_err(&pdev->dev, "can't power up port\n"); | |
dc35a10f | 4135 | goto err_netdev; |
3f1dd4bc | 4136 | } |
c5aff182 | 4137 | |
12bb03b4 MR |
4138 | for_each_present_cpu(cpu) { |
4139 | struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); | |
4140 | ||
4141 | netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT); | |
4142 | port->pp = pp; | |
4143 | } | |
c5aff182 | 4144 | |
2adb719d | 4145 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; |
01ef26ca EG |
4146 | dev->hw_features |= dev->features; |
4147 | dev->vlan_features |= dev->features; | |
928b6519 | 4148 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
8eef5f97 | 4149 | dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; |
b50b72de | 4150 | |
c5aff182 TP |
4151 | err = register_netdev(dev); |
4152 | if (err < 0) { | |
4153 | dev_err(&pdev->dev, "failed to register\n"); | |
9672850b | 4154 | goto err_free_stats; |
c5aff182 TP |
4155 | } |
4156 | ||
8cc3e439 TP |
4157 | netdev_info(dev, "Using %s mac address %pM\n", mac_from, |
4158 | dev->dev_addr); | |
c5aff182 TP |
4159 | |
4160 | platform_set_drvdata(pdev, pp->dev); | |
4161 | ||
898b2970 SS |
4162 | if (pp->use_inband_status) { |
4163 | struct phy_device *phy = of_phy_find_device(dn); | |
4164 | ||
4165 | mvneta_fixed_link_update(pp, phy); | |
04d53b20 | 4166 | |
e5a03bfd | 4167 | put_device(&phy->mdio.dev); |
898b2970 SS |
4168 | } |
4169 | ||
c5aff182 TP |
4170 | return 0; |
4171 | ||
dc35a10f MW |
4172 | err_netdev: |
4173 | unregister_netdev(dev); | |
4174 | if (pp->bm_priv) { | |
4175 | mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); | |
4176 | mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, | |
4177 | 1 << pp->id); | |
4178 | } | |
74c41b04 | 4179 | err_free_stats: |
4180 | free_percpu(pp->stats); | |
12bb03b4 MR |
4181 | err_free_ports: |
4182 | free_percpu(pp->ports); | |
5445eaf3 | 4183 | err_clk: |
15cc4a4a | 4184 | clk_disable_unprepare(pp->clk_bus); |
5445eaf3 | 4185 | clk_disable_unprepare(pp->clk); |
c891c24c UKK |
4186 | err_put_phy_node: |
4187 | of_node_put(phy_node); | |
c5aff182 TP |
4188 | err_free_irq: |
4189 | irq_dispose_mapping(dev->irq); | |
4190 | err_free_netdev: | |
4191 | free_netdev(dev); | |
4192 | return err; | |
4193 | } | |
4194 | ||
4195 | /* Device removal routine */ | |
03ce758e | 4196 | static int mvneta_remove(struct platform_device *pdev) |
c5aff182 TP |
4197 | { |
4198 | struct net_device *dev = platform_get_drvdata(pdev); | |
4199 | struct mvneta_port *pp = netdev_priv(dev); | |
4200 | ||
4201 | unregister_netdev(dev); | |
15cc4a4a | 4202 | clk_disable_unprepare(pp->clk_bus); |
189dd626 | 4203 | clk_disable_unprepare(pp->clk); |
12bb03b4 | 4204 | free_percpu(pp->ports); |
74c41b04 | 4205 | free_percpu(pp->stats); |
c5aff182 | 4206 | irq_dispose_mapping(dev->irq); |
c891c24c | 4207 | of_node_put(pp->phy_node); |
c5aff182 TP |
4208 | free_netdev(dev); |
4209 | ||
dc35a10f MW |
4210 | if (pp->bm_priv) { |
4211 | mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); | |
4212 | mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, | |
4213 | 1 << pp->id); | |
4214 | } | |
4215 | ||
c5aff182 TP |
4216 | return 0; |
4217 | } | |
4218 | ||
4219 | static const struct of_device_id mvneta_match[] = { | |
4220 | { .compatible = "marvell,armada-370-neta" }, | |
f522a975 | 4221 | { .compatible = "marvell,armada-xp-neta" }, |
c5aff182 TP |
4222 | { } |
4223 | }; | |
4224 | MODULE_DEVICE_TABLE(of, mvneta_match); | |
4225 | ||
4226 | static struct platform_driver mvneta_driver = { | |
4227 | .probe = mvneta_probe, | |
03ce758e | 4228 | .remove = mvneta_remove, |
c5aff182 TP |
4229 | .driver = { |
4230 | .name = MVNETA_DRIVER_NAME, | |
4231 | .of_match_table = mvneta_match, | |
4232 | }, | |
4233 | }; | |
4234 | ||
4235 | module_platform_driver(mvneta_driver); | |
4236 | ||
4237 | MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); | |
4238 | MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); | |
4239 | MODULE_LICENSE("GPL"); | |
4240 | ||
4241 | module_param(rxq_number, int, S_IRUGO); | |
4242 | module_param(txq_number, int, S_IRUGO); | |
4243 | ||
4244 | module_param(rxq_def, int, S_IRUGO); | |
f19fadfc | 4245 | module_param(rx_copybreak, int, S_IRUGO | S_IWUSR); |