Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
89e5785f | 2 | /* |
f75ba50b | 3 | * Cadence MACB/GEM Ethernet Controller driver |
89e5785f HS |
4 | * |
5 | * Copyright (C) 2004-2006 Atmel Corporation | |
89e5785f HS |
6 | */ |
7 | ||
c220f8cd | 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
89e5785f | 9 | #include <linux/clk.h> |
c218ad55 | 10 | #include <linux/clk-provider.h> |
653e92a9 | 11 | #include <linux/crc32.h> |
89e5785f HS |
12 | #include <linux/module.h> |
13 | #include <linux/moduleparam.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/types.h> | |
909a8583 | 16 | #include <linux/circ_buf.h> |
89e5785f HS |
17 | #include <linux/slab.h> |
18 | #include <linux/init.h> | |
60fe716f | 19 | #include <linux/io.h> |
2dbfdbb9 | 20 | #include <linux/gpio.h> |
270c499f | 21 | #include <linux/gpio/consumer.h> |
a6b7a407 | 22 | #include <linux/interrupt.h> |
89e5785f HS |
23 | #include <linux/netdevice.h> |
24 | #include <linux/etherdevice.h> | |
89e5785f | 25 | #include <linux/dma-mapping.h> |
89e5785f | 26 | #include <linux/platform_device.h> |
7897b071 | 27 | #include <linux/phylink.h> |
b17471f5 | 28 | #include <linux/of.h> |
fb97a846 | 29 | #include <linux/of_device.h> |
270c499f | 30 | #include <linux/of_gpio.h> |
148cbb53 | 31 | #include <linux/of_mdio.h> |
fb97a846 | 32 | #include <linux/of_net.h> |
1629dd4f RO |
33 | #include <linux/ip.h> |
34 | #include <linux/udp.h> | |
35 | #include <linux/tcp.h> | |
8beb79b7 | 36 | #include <linux/iopoll.h> |
8b73fa3a | 37 | #include <linux/phy/phy.h> |
d54f89af | 38 | #include <linux/pm_runtime.h> |
5cebb40b | 39 | #include <linux/ptp_classify.h> |
8b73fa3a | 40 | #include <linux/reset.h> |
89e5785f HS |
41 | #include "macb.h" |
42 | ||
c218ad55 YS |
43 | /* This structure is only used for MACB on SiFive FU540 devices */ |
44 | struct sifive_fu540_macb_mgmt { | |
45 | void __iomem *reg; | |
46 | unsigned long rate; | |
47 | struct clk_hw hw; | |
48 | }; | |
49 | ||
1b44791a | 50 | #define MACB_RX_BUFFER_SIZE 128 |
1b44791a | 51 | #define RX_BUFFER_MULTIPLE 64 /* bytes */ |
8441bb33 | 52 | |
b410d13e | 53 | #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ |
8441bb33 ZB |
54 | #define MIN_RX_RING_SIZE 64 |
55 | #define MAX_RX_RING_SIZE 8192 | |
dc97a89e | 56 | #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ |
b410d13e | 57 | * (bp)->rx_ring_size) |
89e5785f | 58 | |
b410d13e | 59 | #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ |
8441bb33 ZB |
60 | #define MIN_TX_RING_SIZE 64 |
61 | #define MAX_TX_RING_SIZE 4096 | |
dc97a89e | 62 | #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ |
b410d13e | 63 | * (bp)->tx_ring_size) |
89e5785f | 64 | |
909a8583 | 65 | /* level of occupied TX descriptors under which we wake up TX process */ |
b410d13e | 66 | #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) |
89e5785f | 67 | |
e501070e | 68 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) |
e86cd53a NF |
69 | #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ |
70 | | MACB_BIT(ISR_RLE) \ | |
71 | | MACB_BIT(TXERR)) | |
42983885 CB |
72 | #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \ |
73 | | MACB_BIT(TXUBR)) | |
e86cd53a | 74 | |
1629dd4f RO |
75 | /* Max length of transmit frame must be a multiple of 8 bytes */ |
76 | #define MACB_TX_LEN_ALIGN 8 | |
77 | #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) | |
f822e9c4 HK |
78 | /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a |
79 | * false amba_error in TX path from the DMA assuming there is not enough | |
80 | * space in the SRAM (16KB) even when there is. | |
81 | */ | |
82 | #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0) | |
a4c35ed3 | 83 | |
44770e11 | 84 | #define GEM_MTU_MIN_SIZE ETH_MIN_MTU |
f9c45ae0 | 85 | #define MACB_NETIF_LSO NETIF_F_TSO |
a5898ea0 | 86 | |
3e2a5e15 SP |
87 | #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) |
88 | #define MACB_WOL_ENABLED (0x1 << 1) | |
89 | ||
e4e143e2 PT |
90 | #define HS_SPEED_10000M 4 |
91 | #define MACB_SERDES_RATE_10G 1 | |
92 | ||
64ec42fe | 93 | /* Graceful stop timeouts in us. We should allow up to |
e86cd53a NF |
94 | * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) |
95 | */ | |
96 | #define MACB_HALT_TIMEOUT 1230 | |
89e5785f | 97 | |
d54f89af HK |
98 | #define MACB_PM_TIMEOUT 100 /* ms */ |
99 | ||
8beb79b7 HK |
100 | #define MACB_MDIO_TIMEOUT 1000000 /* in usecs */ |
101 | ||
dc97a89e | 102 | /* DMA buffer descriptor might be different size |
7b429614 RO |
103 | * depends on hardware configuration: |
104 | * | |
105 | * 1. dma address width 32 bits: | |
106 | * word 1: 32 bit address of Data Buffer | |
107 | * word 2: control | |
108 | * | |
109 | * 2. dma address width 64 bits: | |
110 | * word 1: 32 bit address of Data Buffer | |
111 | * word 2: control | |
112 | * word 3: upper 32 bit address of Data Buffer | |
113 | * word 4: unused | |
114 | * | |
115 | * 3. dma address width 32 bits with hardware timestamping: | |
116 | * word 1: 32 bit address of Data Buffer | |
117 | * word 2: control | |
118 | * word 3: timestamp word 1 | |
119 | * word 4: timestamp word 2 | |
120 | * | |
121 | * 4. dma address width 64 bits with hardware timestamping: | |
122 | * word 1: 32 bit address of Data Buffer | |
123 | * word 2: control | |
124 | * word 3: upper 32 bit address of Data Buffer | |
125 | * word 4: unused | |
126 | * word 5: timestamp word 1 | |
127 | * word 6: timestamp word 2 | |
dc97a89e RO |
128 | */ |
129 | static unsigned int macb_dma_desc_get_size(struct macb *bp) | |
130 | { | |
7b429614 RO |
131 | #ifdef MACB_EXT_DESC |
132 | unsigned int desc_size; | |
133 | ||
134 | switch (bp->hw_dma_cap) { | |
135 | case HW_DMA_CAP_64B: | |
136 | desc_size = sizeof(struct macb_dma_desc) | |
137 | + sizeof(struct macb_dma_desc_64); | |
138 | break; | |
139 | case HW_DMA_CAP_PTP: | |
140 | desc_size = sizeof(struct macb_dma_desc) | |
141 | + sizeof(struct macb_dma_desc_ptp); | |
142 | break; | |
143 | case HW_DMA_CAP_64B_PTP: | |
144 | desc_size = sizeof(struct macb_dma_desc) | |
145 | + sizeof(struct macb_dma_desc_64) | |
146 | + sizeof(struct macb_dma_desc_ptp); | |
147 | break; | |
148 | default: | |
149 | desc_size = sizeof(struct macb_dma_desc); | |
150 | } | |
151 | return desc_size; | |
dc97a89e RO |
152 | #endif |
153 | return sizeof(struct macb_dma_desc); | |
154 | } | |
155 | ||
7b429614 | 156 | static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) |
dc97a89e | 157 | { |
7b429614 RO |
158 | #ifdef MACB_EXT_DESC |
159 | switch (bp->hw_dma_cap) { | |
160 | case HW_DMA_CAP_64B: | |
161 | case HW_DMA_CAP_PTP: | |
162 | desc_idx <<= 1; | |
163 | break; | |
164 | case HW_DMA_CAP_64B_PTP: | |
165 | desc_idx *= 3; | |
166 | break; | |
167 | default: | |
168 | break; | |
169 | } | |
dc97a89e | 170 | #endif |
7b429614 | 171 | return desc_idx; |
dc97a89e RO |
172 | } |
173 | ||
174 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
175 | static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) | |
176 | { | |
99dcb843 SD |
177 | return (struct macb_dma_desc_64 *)((void *)desc |
178 | + sizeof(struct macb_dma_desc)); | |
dc97a89e RO |
179 | } |
180 | #endif | |
181 | ||
55054a16 | 182 | /* Ring buffer accessors */ |
b410d13e | 183 | static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) |
55054a16 | 184 | { |
b410d13e | 185 | return index & (bp->tx_ring_size - 1); |
55054a16 HS |
186 | } |
187 | ||
02c958dd CP |
188 | static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, |
189 | unsigned int index) | |
55054a16 | 190 | { |
dc97a89e RO |
191 | index = macb_tx_ring_wrap(queue->bp, index); |
192 | index = macb_adj_dma_desc_idx(queue->bp, index); | |
193 | return &queue->tx_ring[index]; | |
55054a16 HS |
194 | } |
195 | ||
02c958dd CP |
196 | static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, |
197 | unsigned int index) | |
55054a16 | 198 | { |
b410d13e | 199 | return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; |
55054a16 HS |
200 | } |
201 | ||
02c958dd | 202 | static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) |
55054a16 HS |
203 | { |
204 | dma_addr_t offset; | |
205 | ||
b410d13e | 206 | offset = macb_tx_ring_wrap(queue->bp, index) * |
dc97a89e | 207 | macb_dma_desc_get_size(queue->bp); |
55054a16 | 208 | |
02c958dd | 209 | return queue->tx_ring_dma + offset; |
55054a16 HS |
210 | } |
211 | ||
b410d13e | 212 | static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) |
55054a16 | 213 | { |
b410d13e | 214 | return index & (bp->rx_ring_size - 1); |
55054a16 HS |
215 | } |
216 | ||
ae1f2a56 | 217 | static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index) |
55054a16 | 218 | { |
ae1f2a56 RO |
219 | index = macb_rx_ring_wrap(queue->bp, index); |
220 | index = macb_adj_dma_desc_idx(queue->bp, index); | |
221 | return &queue->rx_ring[index]; | |
55054a16 HS |
222 | } |
223 | ||
ae1f2a56 | 224 | static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index) |
55054a16 | 225 | { |
ae1f2a56 RO |
226 | return queue->rx_buffers + queue->bp->rx_buffer_size * |
227 | macb_rx_ring_wrap(queue->bp, index); | |
55054a16 HS |
228 | } |
229 | ||
f2ce8a9e AS |
230 | /* I/O accessors */ |
231 | static u32 hw_readl_native(struct macb *bp, int offset) | |
232 | { | |
233 | return __raw_readl(bp->regs + offset); | |
234 | } | |
235 | ||
236 | static void hw_writel_native(struct macb *bp, int offset, u32 value) | |
237 | { | |
238 | __raw_writel(value, bp->regs + offset); | |
239 | } | |
240 | ||
241 | static u32 hw_readl(struct macb *bp, int offset) | |
242 | { | |
243 | return readl_relaxed(bp->regs + offset); | |
244 | } | |
245 | ||
246 | static void hw_writel(struct macb *bp, int offset, u32 value) | |
247 | { | |
248 | writel_relaxed(value, bp->regs + offset); | |
249 | } | |
250 | ||
64ec42fe | 251 | /* Find the CPU endianness by using the loopback bit of NCR register. When the |
88023beb | 252 | * CPU is in big endian we need to program swapped mode for management |
f2ce8a9e AS |
253 | * descriptor access. |
254 | */ | |
255 | static bool hw_is_native_io(void __iomem *addr) | |
256 | { | |
257 | u32 value = MACB_BIT(LLB); | |
258 | ||
259 | __raw_writel(value, addr + MACB_NCR); | |
260 | value = __raw_readl(addr + MACB_NCR); | |
261 | ||
262 | /* Write 0 back to disable everything */ | |
263 | __raw_writel(0, addr + MACB_NCR); | |
264 | ||
265 | return value == MACB_BIT(LLB); | |
266 | } | |
267 | ||
268 | static bool hw_is_gem(void __iomem *addr, bool native_io) | |
269 | { | |
270 | u32 id; | |
271 | ||
272 | if (native_io) | |
273 | id = __raw_readl(addr + MACB_MID); | |
274 | else | |
275 | id = readl_relaxed(addr + MACB_MID); | |
276 | ||
277 | return MACB_BFEXT(IDNUM, id) >= 0x2; | |
278 | } | |
279 | ||
421d9df0 | 280 | static void macb_set_hwaddr(struct macb *bp) |
89e5785f HS |
281 | { |
282 | u32 bottom; | |
283 | u16 top; | |
284 | ||
285 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); | |
f75ba50b | 286 | macb_or_gem_writel(bp, SA1B, bottom); |
89e5785f | 287 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); |
f75ba50b | 288 | macb_or_gem_writel(bp, SA1T, top); |
3629a6ce JE |
289 | |
290 | /* Clear unused address register sets */ | |
291 | macb_or_gem_writel(bp, SA2B, 0); | |
292 | macb_or_gem_writel(bp, SA2T, 0); | |
293 | macb_or_gem_writel(bp, SA3B, 0); | |
294 | macb_or_gem_writel(bp, SA3T, 0); | |
295 | macb_or_gem_writel(bp, SA4B, 0); | |
296 | macb_or_gem_writel(bp, SA4T, 0); | |
89e5785f HS |
297 | } |
298 | ||
421d9df0 | 299 | static void macb_get_hwaddr(struct macb *bp) |
89e5785f HS |
300 | { |
301 | u32 bottom; | |
302 | u16 top; | |
303 | u8 addr[6]; | |
17b8bb3e JE |
304 | int i; |
305 | ||
aa50b552 | 306 | /* Check all 4 address register for valid address */ |
17b8bb3e JE |
307 | for (i = 0; i < 4; i++) { |
308 | bottom = macb_or_gem_readl(bp, SA1B + i * 8); | |
309 | top = macb_or_gem_readl(bp, SA1T + i * 8); | |
310 | ||
8b952747 NF |
311 | addr[0] = bottom & 0xff; |
312 | addr[1] = (bottom >> 8) & 0xff; | |
313 | addr[2] = (bottom >> 16) & 0xff; | |
314 | addr[3] = (bottom >> 24) & 0xff; | |
315 | addr[4] = top & 0xff; | |
316 | addr[5] = (top >> 8) & 0xff; | |
17b8bb3e JE |
317 | |
318 | if (is_valid_ether_addr(addr)) { | |
c51e5062 | 319 | eth_hw_addr_set(bp->dev, addr); |
17b8bb3e JE |
320 | return; |
321 | } | |
d1d5741d | 322 | } |
17b8bb3e | 323 | |
a35919e1 | 324 | dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); |
17b8bb3e | 325 | eth_hw_addr_random(bp->dev); |
89e5785f HS |
326 | } |
327 | ||
8beb79b7 HK |
328 | static int macb_mdio_wait_for_idle(struct macb *bp) |
329 | { | |
330 | u32 val; | |
331 | ||
332 | return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), | |
333 | 1, MACB_MDIO_TIMEOUT); | |
334 | } | |
335 | ||
6c36a707 | 336 | static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
89e5785f | 337 | { |
6c36a707 | 338 | struct macb *bp = bus->priv; |
d54f89af | 339 | int status; |
8beb79b7 | 340 | |
b66bfc13 MC |
341 | status = pm_runtime_resume_and_get(&bp->pdev->dev); |
342 | if (status < 0) | |
d54f89af HK |
343 | goto mdio_pm_exit; |
344 | ||
345 | status = macb_mdio_wait_for_idle(bp); | |
346 | if (status < 0) | |
347 | goto mdio_read_exit; | |
89e5785f | 348 | |
43ad352d MP |
349 | if (regnum & MII_ADDR_C45) { |
350 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) | |
351 | | MACB_BF(RW, MACB_MAN_C45_ADDR) | |
352 | | MACB_BF(PHYA, mii_id) | |
353 | | MACB_BF(REGA, (regnum >> 16) & 0x1F) | |
354 | | MACB_BF(DATA, regnum & 0xFFFF) | |
355 | | MACB_BF(CODE, MACB_MAN_C45_CODE))); | |
356 | ||
357 | status = macb_mdio_wait_for_idle(bp); | |
358 | if (status < 0) | |
359 | goto mdio_read_exit; | |
360 | ||
361 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) | |
362 | | MACB_BF(RW, MACB_MAN_C45_READ) | |
363 | | MACB_BF(PHYA, mii_id) | |
364 | | MACB_BF(REGA, (regnum >> 16) & 0x1F) | |
365 | | MACB_BF(CODE, MACB_MAN_C45_CODE))); | |
366 | } else { | |
367 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) | |
368 | | MACB_BF(RW, MACB_MAN_C22_READ) | |
369 | | MACB_BF(PHYA, mii_id) | |
370 | | MACB_BF(REGA, regnum) | |
371 | | MACB_BF(CODE, MACB_MAN_C22_CODE))); | |
372 | } | |
89e5785f | 373 | |
d54f89af HK |
374 | status = macb_mdio_wait_for_idle(bp); |
375 | if (status < 0) | |
376 | goto mdio_read_exit; | |
89e5785f | 377 | |
d54f89af | 378 | status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); |
89e5785f | 379 | |
d54f89af HK |
380 | mdio_read_exit: |
381 | pm_runtime_mark_last_busy(&bp->pdev->dev); | |
382 | pm_runtime_put_autosuspend(&bp->pdev->dev); | |
383 | mdio_pm_exit: | |
384 | return status; | |
89e5785f HS |
385 | } |
386 | ||
6c36a707 R |
387 | static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
388 | u16 value) | |
89e5785f | 389 | { |
6c36a707 | 390 | struct macb *bp = bus->priv; |
d54f89af | 391 | int status; |
8beb79b7 | 392 | |
b66bfc13 MC |
393 | status = pm_runtime_resume_and_get(&bp->pdev->dev); |
394 | if (status < 0) | |
d54f89af HK |
395 | goto mdio_pm_exit; |
396 | ||
397 | status = macb_mdio_wait_for_idle(bp); | |
398 | if (status < 0) | |
399 | goto mdio_write_exit; | |
89e5785f | 400 | |
43ad352d MP |
401 | if (regnum & MII_ADDR_C45) { |
402 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) | |
403 | | MACB_BF(RW, MACB_MAN_C45_ADDR) | |
404 | | MACB_BF(PHYA, mii_id) | |
405 | | MACB_BF(REGA, (regnum >> 16) & 0x1F) | |
406 | | MACB_BF(DATA, regnum & 0xFFFF) | |
407 | | MACB_BF(CODE, MACB_MAN_C45_CODE))); | |
408 | ||
409 | status = macb_mdio_wait_for_idle(bp); | |
410 | if (status < 0) | |
411 | goto mdio_write_exit; | |
412 | ||
413 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) | |
414 | | MACB_BF(RW, MACB_MAN_C45_WRITE) | |
415 | | MACB_BF(PHYA, mii_id) | |
416 | | MACB_BF(REGA, (regnum >> 16) & 0x1F) | |
417 | | MACB_BF(CODE, MACB_MAN_C45_CODE) | |
418 | | MACB_BF(DATA, value))); | |
419 | } else { | |
420 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) | |
421 | | MACB_BF(RW, MACB_MAN_C22_WRITE) | |
422 | | MACB_BF(PHYA, mii_id) | |
423 | | MACB_BF(REGA, regnum) | |
424 | | MACB_BF(CODE, MACB_MAN_C22_CODE) | |
425 | | MACB_BF(DATA, value))); | |
426 | } | |
89e5785f | 427 | |
d54f89af HK |
428 | status = macb_mdio_wait_for_idle(bp); |
429 | if (status < 0) | |
430 | goto mdio_write_exit; | |
6c36a707 | 431 | |
d54f89af HK |
432 | mdio_write_exit: |
433 | pm_runtime_mark_last_busy(&bp->pdev->dev); | |
434 | pm_runtime_put_autosuspend(&bp->pdev->dev); | |
435 | mdio_pm_exit: | |
436 | return status; | |
6c36a707 | 437 | } |
89e5785f | 438 | |
6e952d95 AT |
439 | static void macb_init_buffers(struct macb *bp) |
440 | { | |
441 | struct macb_queue *queue; | |
442 | unsigned int q; | |
443 | ||
444 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | |
445 | queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); | |
446 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
447 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) | |
448 | queue_writel(queue, RBQPH, | |
449 | upper_32_bits(queue->rx_ring_dma)); | |
450 | #endif | |
451 | queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); | |
452 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
453 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) | |
454 | queue_writel(queue, TBQPH, | |
455 | upper_32_bits(queue->tx_ring_dma)); | |
456 | #endif | |
457 | } | |
458 | } | |
459 | ||
e1824dfe SB |
460 | /** |
461 | * macb_set_tx_clk() - Set a clock to a new frequency | |
daafa1d3 | 462 | * @bp: pointer to struct macb |
d0ea5cbd | 463 | * @speed: New frequency in Hz |
e1824dfe | 464 | */ |
daafa1d3 | 465 | static void macb_set_tx_clk(struct macb *bp, int speed) |
e1824dfe SB |
466 | { |
467 | long ferr, rate, rate_rounded; | |
468 | ||
1d0d561a | 469 | if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG)) |
93b31f48 CP |
470 | return; |
471 | ||
43e57631 MW |
472 | /* In case of MII the PHY is the clock master */ |
473 | if (bp->phy_interface == PHY_INTERFACE_MODE_MII) | |
474 | return; | |
475 | ||
e1824dfe SB |
476 | switch (speed) { |
477 | case SPEED_10: | |
478 | rate = 2500000; | |
479 | break; | |
480 | case SPEED_100: | |
481 | rate = 25000000; | |
482 | break; | |
483 | case SPEED_1000: | |
484 | rate = 125000000; | |
485 | break; | |
486 | default: | |
9319e47c | 487 | return; |
e1824dfe SB |
488 | } |
489 | ||
daafa1d3 | 490 | rate_rounded = clk_round_rate(bp->tx_clk, rate); |
e1824dfe SB |
491 | if (rate_rounded < 0) |
492 | return; | |
493 | ||
494 | /* RGMII allows 50 ppm frequency error. Test and warn if this limit | |
495 | * is not satisfied. | |
496 | */ | |
497 | ferr = abs(rate_rounded - rate); | |
498 | ferr = DIV_ROUND_UP(ferr, rate / 100000); | |
499 | if (ferr > 5) | |
daafa1d3 CB |
500 | netdev_warn(bp->dev, |
501 | "unable to generate target frequency: %ld Hz\n", | |
aa50b552 | 502 | rate); |
e1824dfe | 503 | |
daafa1d3 CB |
504 | if (clk_set_rate(bp->tx_clk, rate_rounded)) |
505 | netdev_err(bp->dev, "adjusting tx_clk failed.\n"); | |
e1824dfe SB |
506 | } |
507 | ||
e4e143e2 PT |
508 | static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, |
509 | phy_interface_t interface, int speed, | |
510 | int duplex) | |
511 | { | |
8876769b | 512 | struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); |
e4e143e2 PT |
513 | u32 config; |
514 | ||
515 | config = gem_readl(bp, USX_CONTROL); | |
516 | config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config); | |
517 | config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config); | |
518 | config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS)); | |
519 | config |= GEM_BIT(TX_EN); | |
520 | gem_writel(bp, USX_CONTROL, config); | |
521 | } | |
522 | ||
523 | static void macb_usx_pcs_get_state(struct phylink_pcs *pcs, | |
d46b7e4f | 524 | struct phylink_link_state *state) |
e4e143e2 | 525 | { |
8876769b | 526 | struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); |
e4e143e2 PT |
527 | u32 val; |
528 | ||
529 | state->speed = SPEED_10000; | |
530 | state->duplex = 1; | |
531 | state->an_complete = 1; | |
532 | ||
533 | val = gem_readl(bp, USX_STATUS); | |
534 | state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK)); | |
535 | val = gem_readl(bp, NCFGR); | |
536 | if (val & GEM_BIT(PAE)) | |
537 | state->pause = MLO_PAUSE_RX; | |
538 | } | |
539 | ||
540 | static int macb_usx_pcs_config(struct phylink_pcs *pcs, | |
541 | unsigned int mode, | |
542 | phy_interface_t interface, | |
543 | const unsigned long *advertising, | |
544 | bool permit_pause_to_mac) | |
545 | { | |
8876769b | 546 | struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); |
e4e143e2 PT |
547 | |
548 | gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) | | |
549 | GEM_BIT(SIGNAL_OK)); | |
550 | ||
551 | return 0; | |
552 | } | |
553 | ||
554 | static void macb_pcs_get_state(struct phylink_pcs *pcs, | |
555 | struct phylink_link_state *state) | |
7897b071 | 556 | { |
d46b7e4f | 557 | state->link = 0; |
7897b071 AT |
558 | } |
559 | ||
e4e143e2 | 560 | static void macb_pcs_an_restart(struct phylink_pcs *pcs) |
7897b071 AT |
561 | { |
562 | /* Not supported */ | |
563 | } | |
564 | ||
0012eeb3 PT |
565 | static int macb_pcs_config(struct phylink_pcs *pcs, |
566 | unsigned int mode, | |
567 | phy_interface_t interface, | |
568 | const unsigned long *advertising, | |
569 | bool permit_pause_to_mac) | |
570 | { | |
571 | return 0; | |
572 | } | |
573 | ||
e4e143e2 PT |
574 | static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = { |
575 | .pcs_get_state = macb_usx_pcs_get_state, | |
576 | .pcs_config = macb_usx_pcs_config, | |
577 | .pcs_link_up = macb_usx_pcs_link_up, | |
578 | }; | |
579 | ||
580 | static const struct phylink_pcs_ops macb_phylink_pcs_ops = { | |
581 | .pcs_get_state = macb_pcs_get_state, | |
582 | .pcs_an_restart = macb_pcs_an_restart, | |
0012eeb3 | 583 | .pcs_config = macb_pcs_config, |
e4e143e2 PT |
584 | }; |
585 | ||
7897b071 AT |
586 | static void macb_mac_config(struct phylink_config *config, unsigned int mode, |
587 | const struct phylink_link_state *state) | |
588 | { | |
589 | struct net_device *ndev = to_net_dev(config->dev); | |
590 | struct macb *bp = netdev_priv(ndev); | |
6c36a707 | 591 | unsigned long flags; |
7897b071 | 592 | u32 old_ctrl, ctrl; |
e4e143e2 | 593 | u32 old_ncr, ncr; |
89e5785f | 594 | |
6c36a707 R |
595 | spin_lock_irqsave(&bp->lock, flags); |
596 | ||
7897b071 | 597 | old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); |
e4e143e2 | 598 | old_ncr = ncr = macb_or_gem_readl(bp, NCR); |
6c36a707 | 599 | |
ac2fcfa9 AB |
600 | if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { |
601 | if (state->interface == PHY_INTERFACE_MODE_RMII) | |
602 | ctrl |= MACB_BIT(RM9200_RMII); | |
f7ba7dbf | 603 | } else if (macb_is_gem(bp)) { |
633e98a7 | 604 | ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); |
e4e143e2 | 605 | ncr &= ~GEM_BIT(ENABLE_HS_MAC); |
ac2fcfa9 | 606 | |
e4e143e2 | 607 | if (state->interface == PHY_INTERFACE_MODE_SGMII) { |
ac2fcfa9 | 608 | ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); |
e4e143e2 PT |
609 | } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) { |
610 | ctrl |= GEM_BIT(PCSSEL); | |
611 | ncr |= GEM_BIT(ENABLE_HS_MAC); | |
1a9b5a26 CB |
612 | } else if (bp->caps & MACB_CAPS_MIIONRGMII && |
613 | bp->phy_interface == PHY_INTERFACE_MODE_MII) { | |
614 | ncr |= MACB_BIT(MIIONRGMII); | |
e4e143e2 | 615 | } |
ac2fcfa9 | 616 | } |
6c36a707 | 617 | |
7897b071 AT |
618 | /* Apply the new configuration, if any */ |
619 | if (old_ctrl ^ ctrl) | |
620 | macb_or_gem_writel(bp, NCFGR, ctrl); | |
621 | ||
e4e143e2 PT |
622 | if (old_ncr ^ ncr) |
623 | macb_or_gem_writel(bp, NCR, ncr); | |
624 | ||
e276e5e4 RH |
625 | /* Disable AN for SGMII fixed link configuration, enable otherwise. |
626 | * Must be written after PCSSEL is set in NCFGR, | |
627 | * otherwise writes will not take effect. | |
628 | */ | |
629 | if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) { | |
630 | u32 pcsctrl, old_pcsctrl; | |
631 | ||
632 | old_pcsctrl = gem_readl(bp, PCSCNTRL); | |
633 | if (mode == MLO_AN_FIXED) | |
634 | pcsctrl = old_pcsctrl & ~GEM_BIT(PCSAUTONEG); | |
635 | else | |
636 | pcsctrl = old_pcsctrl | GEM_BIT(PCSAUTONEG); | |
637 | if (old_pcsctrl != pcsctrl) | |
638 | gem_writel(bp, PCSCNTRL, pcsctrl); | |
639 | } | |
640 | ||
6c36a707 | 641 | spin_unlock_irqrestore(&bp->lock, flags); |
7897b071 | 642 | } |
6c36a707 | 643 | |
7897b071 AT |
644 | static void macb_mac_link_down(struct phylink_config *config, unsigned int mode, |
645 | phy_interface_t interface) | |
646 | { | |
647 | struct net_device *ndev = to_net_dev(config->dev); | |
648 | struct macb *bp = netdev_priv(ndev); | |
649 | struct macb_queue *queue; | |
650 | unsigned int q; | |
651 | u32 ctrl; | |
2c29b235 | 652 | |
ac2fcfa9 AB |
653 | if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) |
654 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) | |
655 | queue_writel(queue, IDR, | |
656 | bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); | |
7897b071 AT |
657 | |
658 | /* Disable Rx and Tx */ | |
659 | ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); | |
660 | macb_writel(bp, NCR, ctrl); | |
661 | ||
662 | netif_tx_stop_all_queues(ndev); | |
89e5785f HS |
663 | } |
664 | ||
91a208f2 RK |
665 | static void macb_mac_link_up(struct phylink_config *config, |
666 | struct phy_device *phy, | |
667 | unsigned int mode, phy_interface_t interface, | |
668 | int speed, int duplex, | |
669 | bool tx_pause, bool rx_pause) | |
89e5785f | 670 | { |
7897b071 AT |
671 | struct net_device *ndev = to_net_dev(config->dev); |
672 | struct macb *bp = netdev_priv(ndev); | |
673 | struct macb_queue *queue; | |
633e98a7 | 674 | unsigned long flags; |
7897b071 | 675 | unsigned int q; |
633e98a7 RK |
676 | u32 ctrl; |
677 | ||
678 | spin_lock_irqsave(&bp->lock, flags); | |
679 | ||
680 | ctrl = macb_or_gem_readl(bp, NCFGR); | |
681 | ||
682 | ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); | |
683 | ||
684 | if (speed == SPEED_100) | |
685 | ctrl |= MACB_BIT(SPD); | |
686 | ||
687 | if (duplex) | |
688 | ctrl |= MACB_BIT(FD); | |
739de9a1 | 689 | |
ac2fcfa9 | 690 | if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { |
f7ba7dbf SR |
691 | ctrl &= ~MACB_BIT(PAE); |
692 | if (macb_is_gem(bp)) { | |
693 | ctrl &= ~GEM_BIT(GBE); | |
633e98a7 | 694 | |
f7ba7dbf SR |
695 | if (speed == SPEED_1000) |
696 | ctrl |= GEM_BIT(GBE); | |
697 | } | |
633e98a7 | 698 | |
d7739b0b | 699 | if (rx_pause) |
633e98a7 RK |
700 | ctrl |= MACB_BIT(PAE); |
701 | ||
daafa1d3 | 702 | macb_set_tx_clk(bp, speed); |
739de9a1 | 703 | |
ac2fcfa9 AB |
704 | /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down |
705 | * cleared the pipeline and control registers. | |
706 | */ | |
707 | bp->macbgem_ops.mog_init_rings(bp); | |
708 | macb_init_buffers(bp); | |
739de9a1 | 709 | |
ac2fcfa9 AB |
710 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
711 | queue_writel(queue, IER, | |
712 | bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); | |
713 | } | |
6c36a707 | 714 | |
633e98a7 RK |
715 | macb_or_gem_writel(bp, NCFGR, ctrl); |
716 | ||
e4e143e2 PT |
717 | if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER) |
718 | gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M, | |
719 | gem_readl(bp, HS_MAC_CONFIG))); | |
720 | ||
633e98a7 RK |
721 | spin_unlock_irqrestore(&bp->lock, flags); |
722 | ||
7897b071 AT |
723 | /* Enable Rx and Tx */ |
724 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); | |
725 | ||
726 | netif_tx_wake_all_queues(ndev); | |
727 | } | |
728 | ||
8876769b RKO |
729 | static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config, |
730 | phy_interface_t interface) | |
e4e143e2 PT |
731 | { |
732 | struct net_device *ndev = to_net_dev(config->dev); | |
733 | struct macb *bp = netdev_priv(ndev); | |
734 | ||
735 | if (interface == PHY_INTERFACE_MODE_10GBASER) | |
8876769b | 736 | return &bp->phylink_usx_pcs; |
0012eeb3 | 737 | else if (interface == PHY_INTERFACE_MODE_SGMII) |
8876769b | 738 | return &bp->phylink_sgmii_pcs; |
0012eeb3 | 739 | else |
8876769b | 740 | return NULL; |
e4e143e2 PT |
741 | } |
742 | ||
7897b071 | 743 | static const struct phylink_mac_ops macb_phylink_ops = { |
cc0a75eb | 744 | .validate = phylink_generic_validate, |
8876769b | 745 | .mac_select_pcs = macb_mac_select_pcs, |
7897b071 AT |
746 | .mac_config = macb_mac_config, |
747 | .mac_link_down = macb_mac_link_down, | |
748 | .mac_link_up = macb_mac_link_up, | |
749 | }; | |
750 | ||
fd2a8914 MP |
751 | static bool macb_phy_handle_exists(struct device_node *dn) |
752 | { | |
753 | dn = of_parse_phandle(dn, "phy-handle", 0); | |
754 | of_node_put(dn); | |
755 | return dn != NULL; | |
756 | } | |
757 | ||
7897b071 AT |
758 | static int macb_phylink_connect(struct macb *bp) |
759 | { | |
fd2a8914 | 760 | struct device_node *dn = bp->pdev->dev.of_node; |
7897b071 AT |
761 | struct net_device *dev = bp->dev; |
762 | struct phy_device *phydev; | |
763 | int ret; | |
764 | ||
fd2a8914 MP |
765 | if (dn) |
766 | ret = phylink_of_phy_connect(bp->phylink, dn, 0); | |
767 | ||
768 | if (!dn || (ret && !macb_phy_handle_exists(dn))) { | |
dacdbb4d MG |
769 | phydev = phy_find_first(bp->mii_bus); |
770 | if (!phydev) { | |
771 | netdev_err(dev, "no PHY found\n"); | |
772 | return -ENXIO; | |
773 | } | |
6c36a707 | 774 | |
dacdbb4d | 775 | /* attach the mac to the phy */ |
7897b071 | 776 | ret = phylink_connect_phy(bp->phylink, phydev); |
fd2a8914 MP |
777 | } |
778 | ||
779 | if (ret) { | |
780 | netdev_err(dev, "Could not attach PHY (%d)\n", ret); | |
781 | return ret; | |
6c36a707 R |
782 | } |
783 | ||
7897b071 | 784 | phylink_start(bp->phylink); |
6c36a707 | 785 | |
7897b071 AT |
786 | return 0; |
787 | } | |
6c36a707 | 788 | |
8fab174b RH |
789 | static void macb_get_pcs_fixed_state(struct phylink_config *config, |
790 | struct phylink_link_state *state) | |
791 | { | |
792 | struct net_device *ndev = to_net_dev(config->dev); | |
793 | struct macb *bp = netdev_priv(ndev); | |
794 | ||
795 | state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0; | |
796 | } | |
797 | ||
7897b071 AT |
798 | /* based on au1000_eth. c*/ |
799 | static int macb_mii_probe(struct net_device *dev) | |
800 | { | |
801 | struct macb *bp = netdev_priv(dev); | |
802 | ||
8876769b RKO |
803 | bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops; |
804 | bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops; | |
805 | ||
7897b071 AT |
806 | bp->phylink_config.dev = &dev->dev; |
807 | bp->phylink_config.type = PHYLINK_NETDEV; | |
808 | ||
8fab174b RH |
809 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { |
810 | bp->phylink_config.poll_fixed_state = true; | |
811 | bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state; | |
812 | } | |
813 | ||
cc0a75eb RKO |
814 | bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | |
815 | MAC_10 | MAC_100; | |
816 | ||
817 | __set_bit(PHY_INTERFACE_MODE_MII, | |
818 | bp->phylink_config.supported_interfaces); | |
819 | __set_bit(PHY_INTERFACE_MODE_RMII, | |
820 | bp->phylink_config.supported_interfaces); | |
821 | ||
822 | /* Determine what modes are supported */ | |
823 | if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) { | |
824 | bp->phylink_config.mac_capabilities |= MAC_1000FD; | |
825 | if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) | |
826 | bp->phylink_config.mac_capabilities |= MAC_1000HD; | |
827 | ||
828 | __set_bit(PHY_INTERFACE_MODE_GMII, | |
829 | bp->phylink_config.supported_interfaces); | |
830 | phy_interface_set_rgmii(bp->phylink_config.supported_interfaces); | |
831 | ||
832 | if (bp->caps & MACB_CAPS_PCS) | |
833 | __set_bit(PHY_INTERFACE_MODE_SGMII, | |
834 | bp->phylink_config.supported_interfaces); | |
835 | ||
836 | if (bp->caps & MACB_CAPS_HIGH_SPEED) { | |
837 | __set_bit(PHY_INTERFACE_MODE_10GBASER, | |
838 | bp->phylink_config.supported_interfaces); | |
839 | bp->phylink_config.mac_capabilities |= MAC_10000FD; | |
840 | } | |
841 | } | |
842 | ||
7897b071 AT |
843 | bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, |
844 | bp->phy_interface, &macb_phylink_ops); | |
845 | if (IS_ERR(bp->phylink)) { | |
846 | netdev_err(dev, "Could not create a phylink instance (%ld)\n", | |
847 | PTR_ERR(bp->phylink)); | |
848 | return PTR_ERR(bp->phylink); | |
849 | } | |
6c36a707 R |
850 | |
851 | return 0; | |
89e5785f HS |
852 | } |
853 | ||
ef8a2e27 AT |
854 | static int macb_mdiobus_register(struct macb *bp) |
855 | { | |
856 | struct device_node *child, *np = bp->pdev->dev.of_node; | |
857 | ||
4d98bb0d SA |
858 | /* If we have a child named mdio, probe it instead of looking for PHYs |
859 | * directly under the MAC node | |
860 | */ | |
861 | child = of_get_child_by_name(np, "mdio"); | |
8db3cbc5 | 862 | if (child) { |
4d98bb0d SA |
863 | int ret = of_mdiobus_register(bp->mii_bus, child); |
864 | ||
865 | of_node_put(child); | |
866 | return ret; | |
867 | } | |
868 | ||
79540d13 CC |
869 | if (of_phy_is_fixed_link(np)) |
870 | return mdiobus_register(bp->mii_bus); | |
871 | ||
ef8a2e27 AT |
872 | /* Only create the PHY from the device tree if at least one PHY is |
873 | * described. Otherwise scan the entire MDIO bus. We do this to support | |
874 | * old device tree that did not follow the best practices and did not | |
875 | * describe their network PHYs. | |
876 | */ | |
877 | for_each_available_child_of_node(np, child) | |
878 | if (of_mdiobus_child_is_phy(child)) { | |
879 | /* The loop increments the child refcount, | |
880 | * decrement it before returning. | |
881 | */ | |
882 | of_node_put(child); | |
883 | ||
884 | return of_mdiobus_register(bp->mii_bus, np); | |
885 | } | |
886 | ||
887 | return mdiobus_register(bp->mii_bus); | |
888 | } | |
889 | ||
421d9df0 | 890 | static int macb_mii_init(struct macb *bp) |
89e5785f | 891 | { |
ab5f1105 | 892 | int err = -ENXIO; |
89e5785f | 893 | |
3dbda77e | 894 | /* Enable management port */ |
6c36a707 | 895 | macb_writel(bp, NCR, MACB_BIT(MPE)); |
89e5785f | 896 | |
298cf9be | 897 | bp->mii_bus = mdiobus_alloc(); |
aa50b552 | 898 | if (!bp->mii_bus) { |
298cf9be LB |
899 | err = -ENOMEM; |
900 | goto err_out; | |
901 | } | |
902 | ||
903 | bp->mii_bus->name = "MACB_mii_bus"; | |
904 | bp->mii_bus->read = &macb_mdio_read; | |
905 | bp->mii_bus->write = &macb_mdio_write; | |
98d5e57e | 906 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
aa50b552 | 907 | bp->pdev->name, bp->pdev->id); |
298cf9be | 908 | bp->mii_bus->priv = bp; |
cf669660 | 909 | bp->mii_bus->parent = &bp->pdev->dev; |
89e5785f | 910 | |
91523947 | 911 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
89e5785f | 912 | |
ef8a2e27 | 913 | err = macb_mdiobus_register(bp); |
148cbb53 | 914 | if (err) |
7897b071 | 915 | goto err_out_free_mdiobus; |
89e5785f | 916 | |
7daa78e3 BB |
917 | err = macb_mii_probe(bp->dev); |
918 | if (err) | |
6c36a707 | 919 | goto err_out_unregister_bus; |
89e5785f | 920 | |
6c36a707 | 921 | return 0; |
89e5785f | 922 | |
6c36a707 | 923 | err_out_unregister_bus: |
298cf9be | 924 | mdiobus_unregister(bp->mii_bus); |
739de9a1 | 925 | err_out_free_mdiobus: |
298cf9be | 926 | mdiobus_free(bp->mii_bus); |
6c36a707 R |
927 | err_out: |
928 | return err; | |
89e5785f HS |
929 | } |
930 | ||
931 | static void macb_update_stats(struct macb *bp) | |
932 | { | |
a494ed8e JI |
933 | u32 *p = &bp->hw_stats.macb.rx_pause_frames; |
934 | u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; | |
f2ce8a9e | 935 | int offset = MACB_PFR; |
89e5785f HS |
936 | |
937 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); | |
938 | ||
96ec6310 | 939 | for (; p < end; p++, offset += 4) |
7a6e0706 | 940 | *p += bp->macb_reg_readl(bp, offset); |
89e5785f HS |
941 | } |
942 | ||
e86cd53a | 943 | static int macb_halt_tx(struct macb *bp) |
89e5785f | 944 | { |
e86cd53a NF |
945 | unsigned long halt_time, timeout; |
946 | u32 status; | |
89e5785f | 947 | |
e86cd53a | 948 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); |
89e5785f | 949 | |
e86cd53a NF |
950 | timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); |
951 | do { | |
952 | halt_time = jiffies; | |
953 | status = macb_readl(bp, TSR); | |
954 | if (!(status & MACB_BIT(TGO))) | |
955 | return 0; | |
89e5785f | 956 | |
16fe10cf | 957 | udelay(250); |
e86cd53a | 958 | } while (time_before(halt_time, timeout)); |
bdcba151 | 959 | |
e86cd53a NF |
960 | return -ETIMEDOUT; |
961 | } | |
39eddb4c | 962 | |
138badbc | 963 | static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget) |
a4c35ed3 CP |
964 | { |
965 | if (tx_skb->mapping) { | |
966 | if (tx_skb->mapped_as_page) | |
967 | dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, | |
968 | tx_skb->size, DMA_TO_DEVICE); | |
969 | else | |
970 | dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, | |
971 | tx_skb->size, DMA_TO_DEVICE); | |
972 | tx_skb->mapping = 0; | |
973 | } | |
974 | ||
975 | if (tx_skb->skb) { | |
138badbc | 976 | napi_consume_skb(tx_skb->skb, budget); |
a4c35ed3 CP |
977 | tx_skb->skb = NULL; |
978 | } | |
979 | } | |
980 | ||
dc97a89e | 981 | static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) |
fff8019a | 982 | { |
fff8019a | 983 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
dc97a89e RO |
984 | struct macb_dma_desc_64 *desc_64; |
985 | ||
7b429614 | 986 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
dc97a89e RO |
987 | desc_64 = macb_64b_desc(bp, desc); |
988 | desc_64->addrh = upper_32_bits(addr); | |
e100a897 AH |
989 | /* The low bits of RX address contain the RX_USED bit, clearing |
990 | * of which allows packet RX. Make sure the high bits are also | |
991 | * visible to HW at that point. | |
992 | */ | |
993 | dma_wmb(); | |
dc97a89e | 994 | } |
fff8019a | 995 | #endif |
dc97a89e RO |
996 | desc->addr = lower_32_bits(addr); |
997 | } | |
998 | ||
999 | static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) | |
1000 | { | |
1001 | dma_addr_t addr = 0; | |
1002 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
1003 | struct macb_dma_desc_64 *desc_64; | |
1004 | ||
7b429614 | 1005 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
dc97a89e RO |
1006 | desc_64 = macb_64b_desc(bp, desc); |
1007 | addr = ((u64)(desc_64->addrh) << 32); | |
1008 | } | |
1009 | #endif | |
1010 | addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); | |
1011 | return addr; | |
fff8019a HK |
1012 | } |
1013 | ||
e86cd53a NF |
1014 | static void macb_tx_error_task(struct work_struct *work) |
1015 | { | |
02c958dd CP |
1016 | struct macb_queue *queue = container_of(work, struct macb_queue, |
1017 | tx_error_task); | |
1018 | struct macb *bp = queue->bp; | |
e86cd53a | 1019 | struct macb_tx_skb *tx_skb; |
02c958dd | 1020 | struct macb_dma_desc *desc; |
e86cd53a NF |
1021 | struct sk_buff *skb; |
1022 | unsigned int tail; | |
02c958dd CP |
1023 | unsigned long flags; |
1024 | ||
1025 | netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", | |
1026 | (unsigned int)(queue - bp->queues), | |
1027 | queue->tx_tail, queue->tx_head); | |
bdcba151 | 1028 | |
138badbc RH |
1029 | /* Prevent the queue NAPI TX poll from running, as it calls |
1030 | * macb_tx_complete(), which in turn may call netif_wake_subqueue(). | |
02c958dd CP |
1031 | * As explained below, we have to halt the transmission before updating |
1032 | * TBQP registers so we call netif_tx_stop_all_queues() to notify the | |
1033 | * network engine about the macb/gem being halted. | |
1034 | */ | |
138badbc | 1035 | napi_disable(&queue->napi_tx); |
02c958dd | 1036 | spin_lock_irqsave(&bp->lock, flags); |
bdcba151 | 1037 | |
e86cd53a | 1038 | /* Make sure nobody is trying to queue up new packets */ |
02c958dd | 1039 | netif_tx_stop_all_queues(bp->dev); |
d3e61457 | 1040 | |
64ec42fe | 1041 | /* Stop transmission now |
e86cd53a | 1042 | * (in case we have just queued new packets) |
02c958dd | 1043 | * macb/gem must be halted to write TBQP register |
e86cd53a NF |
1044 | */ |
1045 | if (macb_halt_tx(bp)) | |
1046 | /* Just complain for now, reinitializing TX path can be good */ | |
1047 | netdev_err(bp->dev, "BUG: halt tx timed out\n"); | |
bdcba151 | 1048 | |
64ec42fe | 1049 | /* Treat frames in TX queue including the ones that caused the error. |
e86cd53a NF |
1050 | * Free transmit buffers in upper layer. |
1051 | */ | |
02c958dd CP |
1052 | for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { |
1053 | u32 ctrl; | |
55054a16 | 1054 | |
02c958dd | 1055 | desc = macb_tx_desc(queue, tail); |
e86cd53a | 1056 | ctrl = desc->ctrl; |
02c958dd | 1057 | tx_skb = macb_tx_skb(queue, tail); |
e86cd53a | 1058 | skb = tx_skb->skb; |
bdcba151 | 1059 | |
e86cd53a | 1060 | if (ctrl & MACB_BIT(TX_USED)) { |
a4c35ed3 CP |
1061 | /* skb is set for the last buffer of the frame */ |
1062 | while (!skb) { | |
138badbc | 1063 | macb_tx_unmap(bp, tx_skb, 0); |
a4c35ed3 | 1064 | tail++; |
02c958dd | 1065 | tx_skb = macb_tx_skb(queue, tail); |
a4c35ed3 CP |
1066 | skb = tx_skb->skb; |
1067 | } | |
1068 | ||
1069 | /* ctrl still refers to the first buffer descriptor | |
1070 | * since it's the only one written back by the hardware | |
1071 | */ | |
1072 | if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { | |
1073 | netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", | |
b410d13e ZB |
1074 | macb_tx_ring_wrap(bp, tail), |
1075 | skb->data); | |
5f1d3a5c | 1076 | bp->dev->stats.tx_packets++; |
512286bb | 1077 | queue->stats.tx_packets++; |
5f1d3a5c | 1078 | bp->dev->stats.tx_bytes += skb->len; |
512286bb | 1079 | queue->stats.tx_bytes += skb->len; |
a4c35ed3 | 1080 | } |
e86cd53a | 1081 | } else { |
64ec42fe MF |
1082 | /* "Buffers exhausted mid-frame" errors may only happen |
1083 | * if the driver is buggy, so complain loudly about | |
1084 | * those. Statistics are updated by hardware. | |
e86cd53a NF |
1085 | */ |
1086 | if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) | |
1087 | netdev_err(bp->dev, | |
1088 | "BUG: TX buffers exhausted mid-frame\n"); | |
39eddb4c | 1089 | |
e86cd53a NF |
1090 | desc->ctrl = ctrl | MACB_BIT(TX_USED); |
1091 | } | |
1092 | ||
138badbc | 1093 | macb_tx_unmap(bp, tx_skb, 0); |
89e5785f HS |
1094 | } |
1095 | ||
02c958dd CP |
1096 | /* Set end of TX queue */ |
1097 | desc = macb_tx_desc(queue, 0); | |
dc97a89e | 1098 | macb_set_addr(bp, desc, 0); |
02c958dd CP |
1099 | desc->ctrl = MACB_BIT(TX_USED); |
1100 | ||
e86cd53a NF |
1101 | /* Make descriptor updates visible to hardware */ |
1102 | wmb(); | |
1103 | ||
1104 | /* Reinitialize the TX desc queue */ | |
dc97a89e | 1105 | queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
fff8019a | 1106 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
7b429614 | 1107 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
dc97a89e | 1108 | queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); |
fff8019a | 1109 | #endif |
e86cd53a | 1110 | /* Make TX ring reflect state of hardware */ |
02c958dd CP |
1111 | queue->tx_head = 0; |
1112 | queue->tx_tail = 0; | |
e86cd53a NF |
1113 | |
1114 | /* Housework before enabling TX IRQ */ | |
1115 | macb_writel(bp, TSR, macb_readl(bp, TSR)); | |
02c958dd CP |
1116 | queue_writel(queue, IER, MACB_TX_INT_FLAGS); |
1117 | ||
1118 | /* Now we are ready to start transmission again */ | |
1119 | netif_tx_start_all_queues(bp->dev); | |
1120 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); | |
1121 | ||
1122 | spin_unlock_irqrestore(&bp->lock, flags); | |
138badbc | 1123 | napi_enable(&queue->napi_tx); |
e86cd53a NF |
1124 | } |
1125 | ||
5cebb40b HK |
1126 | static bool ptp_one_step_sync(struct sk_buff *skb) |
1127 | { | |
1128 | struct ptp_header *hdr; | |
1129 | unsigned int ptp_class; | |
1130 | u8 msgtype; | |
1131 | ||
1132 | /* No need to parse packet if PTP TS is not involved */ | |
1133 | if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) | |
1134 | goto not_oss; | |
1135 | ||
1136 | /* Identify and return whether PTP one step sync is being processed */ | |
1137 | ptp_class = ptp_classify_raw(skb); | |
1138 | if (ptp_class == PTP_CLASS_NONE) | |
1139 | goto not_oss; | |
1140 | ||
1141 | hdr = ptp_parse_header(skb, ptp_class); | |
1142 | if (!hdr) | |
1143 | goto not_oss; | |
1144 | ||
1145 | if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP) | |
1146 | goto not_oss; | |
1147 | ||
1148 | msgtype = ptp_get_msgtype(hdr, ptp_class); | |
1149 | if (msgtype == PTP_MSGTYPE_SYNC) | |
1150 | return true; | |
1151 | ||
1152 | not_oss: | |
1153 | return false; | |
1154 | } | |
1155 | ||
138badbc | 1156 | static int macb_tx_complete(struct macb_queue *queue, int budget) |
e86cd53a | 1157 | { |
02c958dd CP |
1158 | struct macb *bp = queue->bp; |
1159 | u16 queue_index = queue - bp->queues; | |
138badbc RH |
1160 | unsigned int tail; |
1161 | unsigned int head; | |
1162 | int packets = 0; | |
e86cd53a | 1163 | |
138badbc | 1164 | spin_lock(&queue->tx_ptr_lock); |
02c958dd | 1165 | head = queue->tx_head; |
138badbc | 1166 | for (tail = queue->tx_tail; tail != head && packets < budget; tail++) { |
55054a16 HS |
1167 | struct macb_tx_skb *tx_skb; |
1168 | struct sk_buff *skb; | |
1169 | struct macb_dma_desc *desc; | |
1170 | u32 ctrl; | |
89e5785f | 1171 | |
02c958dd | 1172 | desc = macb_tx_desc(queue, tail); |
89e5785f | 1173 | |
03dbe05f | 1174 | /* Make hw descriptor updates visible to CPU */ |
89e5785f | 1175 | rmb(); |
03dbe05f | 1176 | |
55054a16 | 1177 | ctrl = desc->ctrl; |
89e5785f | 1178 | |
a4c35ed3 CP |
1179 | /* TX_USED bit is only set by hardware on the very first buffer |
1180 | * descriptor of the transmitted frame. | |
1181 | */ | |
55054a16 | 1182 | if (!(ctrl & MACB_BIT(TX_USED))) |
89e5785f HS |
1183 | break; |
1184 | ||
a4c35ed3 CP |
1185 | /* Process all buffers of the current transmitted frame */ |
1186 | for (;; tail++) { | |
02c958dd | 1187 | tx_skb = macb_tx_skb(queue, tail); |
a4c35ed3 CP |
1188 | skb = tx_skb->skb; |
1189 | ||
1190 | /* First, update TX stats if needed */ | |
1191 | if (skb) { | |
5cebb40b HK |
1192 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
1193 | !ptp_one_step_sync(skb) && | |
a6252047 | 1194 | gem_ptp_do_txstamp(queue, skb, desc) == 0) { |
ab91f0a9 RO |
1195 | /* skb now belongs to timestamp buffer |
1196 | * and will be removed later | |
1197 | */ | |
1198 | tx_skb->skb = NULL; | |
1199 | } | |
a4c35ed3 | 1200 | netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", |
b410d13e ZB |
1201 | macb_tx_ring_wrap(bp, tail), |
1202 | skb->data); | |
5f1d3a5c | 1203 | bp->dev->stats.tx_packets++; |
512286bb | 1204 | queue->stats.tx_packets++; |
5f1d3a5c | 1205 | bp->dev->stats.tx_bytes += skb->len; |
512286bb | 1206 | queue->stats.tx_bytes += skb->len; |
138badbc | 1207 | packets++; |
a4c35ed3 | 1208 | } |
55054a16 | 1209 | |
a4c35ed3 | 1210 | /* Now we can safely release resources */ |
138badbc | 1211 | macb_tx_unmap(bp, tx_skb, budget); |
a4c35ed3 CP |
1212 | |
1213 | /* skb is set only for the last buffer of the frame. | |
1214 | * WARNING: at this point skb has been freed by | |
1215 | * macb_tx_unmap(). | |
1216 | */ | |
1217 | if (skb) | |
1218 | break; | |
1219 | } | |
89e5785f HS |
1220 | } |
1221 | ||
02c958dd CP |
1222 | queue->tx_tail = tail; |
1223 | if (__netif_subqueue_stopped(bp->dev, queue_index) && | |
1224 | CIRC_CNT(queue->tx_head, queue->tx_tail, | |
b410d13e | 1225 | bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) |
02c958dd | 1226 | netif_wake_subqueue(bp->dev, queue_index); |
138badbc RH |
1227 | spin_unlock(&queue->tx_ptr_lock); |
1228 | ||
1229 | return packets; | |
89e5785f HS |
1230 | } |
1231 | ||
ae1f2a56 | 1232 | static void gem_rx_refill(struct macb_queue *queue) |
4df95131 NF |
1233 | { |
1234 | unsigned int entry; | |
1235 | struct sk_buff *skb; | |
4df95131 | 1236 | dma_addr_t paddr; |
ae1f2a56 | 1237 | struct macb *bp = queue->bp; |
dc97a89e | 1238 | struct macb_dma_desc *desc; |
4df95131 | 1239 | |
ae1f2a56 RO |
1240 | while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, |
1241 | bp->rx_ring_size) > 0) { | |
1242 | entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); | |
4df95131 NF |
1243 | |
1244 | /* Make hw descriptor updates visible to CPU */ | |
1245 | rmb(); | |
1246 | ||
ae1f2a56 | 1247 | desc = macb_rx_desc(queue, entry); |
4df95131 | 1248 | |
ae1f2a56 | 1249 | if (!queue->rx_skbuff[entry]) { |
4df95131 NF |
1250 | /* allocate sk_buff for this free entry in ring */ |
1251 | skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); | |
aa50b552 | 1252 | if (unlikely(!skb)) { |
4df95131 NF |
1253 | netdev_err(bp->dev, |
1254 | "Unable to allocate sk_buff\n"); | |
1255 | break; | |
1256 | } | |
4df95131 NF |
1257 | |
1258 | /* now fill corresponding descriptor entry */ | |
1259 | paddr = dma_map_single(&bp->pdev->dev, skb->data, | |
64ec42fe MF |
1260 | bp->rx_buffer_size, |
1261 | DMA_FROM_DEVICE); | |
92030908 SB |
1262 | if (dma_mapping_error(&bp->pdev->dev, paddr)) { |
1263 | dev_kfree_skb(skb); | |
1264 | break; | |
1265 | } | |
1266 | ||
ae1f2a56 | 1267 | queue->rx_skbuff[entry] = skb; |
4df95131 | 1268 | |
b410d13e | 1269 | if (entry == bp->rx_ring_size - 1) |
4df95131 | 1270 | paddr |= MACB_BIT(RX_WRAP); |
dc97a89e | 1271 | desc->ctrl = 0; |
8159ecab AH |
1272 | /* Setting addr clears RX_USED and allows reception, |
1273 | * make sure ctrl is cleared first to avoid a race. | |
1274 | */ | |
1275 | dma_wmb(); | |
1276 | macb_set_addr(bp, desc, paddr); | |
4df95131 NF |
1277 | |
1278 | /* properly align Ethernet header */ | |
1279 | skb_reserve(skb, NET_IP_ALIGN); | |
d4c216c5 | 1280 | } else { |
dc97a89e | 1281 | desc->ctrl = 0; |
8159ecab AH |
1282 | dma_wmb(); |
1283 | desc->addr &= ~MACB_BIT(RX_USED); | |
4df95131 | 1284 | } |
9500acc6 | 1285 | queue->rx_prepared_head++; |
4df95131 NF |
1286 | } |
1287 | ||
1288 | /* Make descriptor updates visible to hardware */ | |
1289 | wmb(); | |
1290 | ||
ae1f2a56 RO |
1291 | netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", |
1292 | queue, queue->rx_prepared_head, queue->rx_tail); | |
4df95131 NF |
1293 | } |
1294 | ||
1295 | /* Mark DMA descriptors from begin up to and not including end as unused */ | |
ae1f2a56 | 1296 | static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, |
4df95131 NF |
1297 | unsigned int end) |
1298 | { | |
1299 | unsigned int frag; | |
1300 | ||
1301 | for (frag = begin; frag != end; frag++) { | |
ae1f2a56 | 1302 | struct macb_dma_desc *desc = macb_rx_desc(queue, frag); |
64ec42fe | 1303 | |
4df95131 NF |
1304 | desc->addr &= ~MACB_BIT(RX_USED); |
1305 | } | |
1306 | ||
1307 | /* Make descriptor updates visible to hardware */ | |
1308 | wmb(); | |
1309 | ||
64ec42fe | 1310 | /* When this happens, the hardware stats registers for |
4df95131 NF |
1311 | * whatever caused this is updated, so we don't have to record |
1312 | * anything. | |
1313 | */ | |
1314 | } | |
1315 | ||
97236cda AT |
1316 | static int gem_rx(struct macb_queue *queue, struct napi_struct *napi, |
1317 | int budget) | |
4df95131 | 1318 | { |
ae1f2a56 | 1319 | struct macb *bp = queue->bp; |
4df95131 NF |
1320 | unsigned int len; |
1321 | unsigned int entry; | |
1322 | struct sk_buff *skb; | |
1323 | struct macb_dma_desc *desc; | |
1324 | int count = 0; | |
1325 | ||
1326 | while (count < budget) { | |
fff8019a HK |
1327 | u32 ctrl; |
1328 | dma_addr_t addr; | |
1329 | bool rxused; | |
4df95131 | 1330 | |
ae1f2a56 RO |
1331 | entry = macb_rx_ring_wrap(bp, queue->rx_tail); |
1332 | desc = macb_rx_desc(queue, entry); | |
4df95131 NF |
1333 | |
1334 | /* Make hw descriptor updates visible to CPU */ | |
1335 | rmb(); | |
1336 | ||
fff8019a | 1337 | rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; |
dc97a89e | 1338 | addr = macb_get_addr(bp, desc); |
4df95131 | 1339 | |
fff8019a | 1340 | if (!rxused) |
4df95131 NF |
1341 | break; |
1342 | ||
6e0af298 AH |
1343 | /* Ensure ctrl is at least as up-to-date as rxused */ |
1344 | dma_rmb(); | |
1345 | ||
1346 | ctrl = desc->ctrl; | |
1347 | ||
ae1f2a56 | 1348 | queue->rx_tail++; |
4df95131 NF |
1349 | count++; |
1350 | ||
1351 | if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { | |
1352 | netdev_err(bp->dev, | |
1353 | "not whole frame pointed by descriptor\n"); | |
5f1d3a5c | 1354 | bp->dev->stats.rx_dropped++; |
512286bb | 1355 | queue->stats.rx_dropped++; |
4df95131 NF |
1356 | break; |
1357 | } | |
ae1f2a56 | 1358 | skb = queue->rx_skbuff[entry]; |
4df95131 NF |
1359 | if (unlikely(!skb)) { |
1360 | netdev_err(bp->dev, | |
1361 | "inconsistent Rx descriptor chain\n"); | |
5f1d3a5c | 1362 | bp->dev->stats.rx_dropped++; |
512286bb | 1363 | queue->stats.rx_dropped++; |
4df95131 NF |
1364 | break; |
1365 | } | |
1366 | /* now everything is ready for receiving packet */ | |
ae1f2a56 | 1367 | queue->rx_skbuff[entry] = NULL; |
98b5a0f4 | 1368 | len = ctrl & bp->rx_frm_len_mask; |
4df95131 NF |
1369 | |
1370 | netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); | |
1371 | ||
1372 | skb_put(skb, len); | |
4df95131 | 1373 | dma_unmap_single(&bp->pdev->dev, addr, |
48330e08 | 1374 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
4df95131 NF |
1375 | |
1376 | skb->protocol = eth_type_trans(skb, bp->dev); | |
1377 | skb_checksum_none_assert(skb); | |
924ec53c CP |
1378 | if (bp->dev->features & NETIF_F_RXCSUM && |
1379 | !(bp->dev->flags & IFF_PROMISC) && | |
1380 | GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) | |
1381 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
4df95131 | 1382 | |
5f1d3a5c | 1383 | bp->dev->stats.rx_packets++; |
512286bb | 1384 | queue->stats.rx_packets++; |
5f1d3a5c | 1385 | bp->dev->stats.rx_bytes += skb->len; |
512286bb | 1386 | queue->stats.rx_bytes += skb->len; |
4df95131 | 1387 | |
ab91f0a9 RO |
1388 | gem_ptp_do_rxstamp(bp, skb, desc); |
1389 | ||
4df95131 NF |
1390 | #if defined(DEBUG) && defined(VERBOSE_DEBUG) |
1391 | netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", | |
1392 | skb->len, skb->csum); | |
1393 | print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, | |
51f83014 | 1394 | skb_mac_header(skb), 16, true); |
4df95131 NF |
1395 | print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, |
1396 | skb->data, 32, true); | |
1397 | #endif | |
1398 | ||
97236cda | 1399 | napi_gro_receive(napi, skb); |
4df95131 NF |
1400 | } |
1401 | ||
ae1f2a56 | 1402 | gem_rx_refill(queue); |
4df95131 NF |
1403 | |
1404 | return count; | |
1405 | } | |
1406 | ||
97236cda AT |
1407 | static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi, |
1408 | unsigned int first_frag, unsigned int last_frag) | |
89e5785f HS |
1409 | { |
1410 | unsigned int len; | |
1411 | unsigned int frag; | |
29bc2e1e | 1412 | unsigned int offset; |
89e5785f | 1413 | struct sk_buff *skb; |
55054a16 | 1414 | struct macb_dma_desc *desc; |
ae1f2a56 | 1415 | struct macb *bp = queue->bp; |
89e5785f | 1416 | |
ae1f2a56 | 1417 | desc = macb_rx_desc(queue, last_frag); |
98b5a0f4 | 1418 | len = desc->ctrl & bp->rx_frm_len_mask; |
89e5785f | 1419 | |
a268adb1 | 1420 | netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", |
b410d13e ZB |
1421 | macb_rx_ring_wrap(bp, first_frag), |
1422 | macb_rx_ring_wrap(bp, last_frag), len); | |
89e5785f | 1423 | |
64ec42fe | 1424 | /* The ethernet header starts NET_IP_ALIGN bytes into the |
29bc2e1e HS |
1425 | * first buffer. Since the header is 14 bytes, this makes the |
1426 | * payload word-aligned. | |
1427 | * | |
1428 | * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy | |
1429 | * the two padding bytes into the skb so that we avoid hitting | |
1430 | * the slowpath in memcpy(), and pull them off afterwards. | |
1431 | */ | |
1432 | skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); | |
89e5785f | 1433 | if (!skb) { |
5f1d3a5c | 1434 | bp->dev->stats.rx_dropped++; |
55054a16 | 1435 | for (frag = first_frag; ; frag++) { |
ae1f2a56 | 1436 | desc = macb_rx_desc(queue, frag); |
55054a16 | 1437 | desc->addr &= ~MACB_BIT(RX_USED); |
89e5785f HS |
1438 | if (frag == last_frag) |
1439 | break; | |
1440 | } | |
03dbe05f HS |
1441 | |
1442 | /* Make descriptor updates visible to hardware */ | |
89e5785f | 1443 | wmb(); |
03dbe05f | 1444 | |
89e5785f HS |
1445 | return 1; |
1446 | } | |
1447 | ||
29bc2e1e HS |
1448 | offset = 0; |
1449 | len += NET_IP_ALIGN; | |
bc8acf2c | 1450 | skb_checksum_none_assert(skb); |
89e5785f HS |
1451 | skb_put(skb, len); |
1452 | ||
55054a16 | 1453 | for (frag = first_frag; ; frag++) { |
1b44791a | 1454 | unsigned int frag_len = bp->rx_buffer_size; |
89e5785f HS |
1455 | |
1456 | if (offset + frag_len > len) { | |
9ba723b0 CP |
1457 | if (unlikely(frag != last_frag)) { |
1458 | dev_kfree_skb_any(skb); | |
1459 | return -1; | |
1460 | } | |
89e5785f HS |
1461 | frag_len = len - offset; |
1462 | } | |
27d7ff46 | 1463 | skb_copy_to_linear_data_offset(skb, offset, |
ae1f2a56 | 1464 | macb_rx_buffer(queue, frag), |
aa50b552 | 1465 | frag_len); |
1b44791a | 1466 | offset += bp->rx_buffer_size; |
ae1f2a56 | 1467 | desc = macb_rx_desc(queue, frag); |
55054a16 | 1468 | desc->addr &= ~MACB_BIT(RX_USED); |
89e5785f HS |
1469 | |
1470 | if (frag == last_frag) | |
1471 | break; | |
1472 | } | |
1473 | ||
03dbe05f HS |
1474 | /* Make descriptor updates visible to hardware */ |
1475 | wmb(); | |
1476 | ||
29bc2e1e | 1477 | __skb_pull(skb, NET_IP_ALIGN); |
89e5785f HS |
1478 | skb->protocol = eth_type_trans(skb, bp->dev); |
1479 | ||
5f1d3a5c TK |
1480 | bp->dev->stats.rx_packets++; |
1481 | bp->dev->stats.rx_bytes += skb->len; | |
a268adb1 | 1482 | netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", |
aa50b552 | 1483 | skb->len, skb->csum); |
97236cda | 1484 | napi_gro_receive(napi, skb); |
89e5785f HS |
1485 | |
1486 | return 0; | |
1487 | } | |
1488 | ||
ae1f2a56 | 1489 | static inline void macb_init_rx_ring(struct macb_queue *queue) |
9ba723b0 | 1490 | { |
ae1f2a56 | 1491 | struct macb *bp = queue->bp; |
9ba723b0 | 1492 | dma_addr_t addr; |
dc97a89e | 1493 | struct macb_dma_desc *desc = NULL; |
9ba723b0 CP |
1494 | int i; |
1495 | ||
ae1f2a56 | 1496 | addr = queue->rx_buffers_dma; |
b410d13e | 1497 | for (i = 0; i < bp->rx_ring_size; i++) { |
ae1f2a56 | 1498 | desc = macb_rx_desc(queue, i); |
dc97a89e RO |
1499 | macb_set_addr(bp, desc, addr); |
1500 | desc->ctrl = 0; | |
9ba723b0 CP |
1501 | addr += bp->rx_buffer_size; |
1502 | } | |
dc97a89e | 1503 | desc->addr |= MACB_BIT(RX_WRAP); |
ae1f2a56 | 1504 | queue->rx_tail = 0; |
9ba723b0 CP |
1505 | } |
1506 | ||
97236cda AT |
1507 | static int macb_rx(struct macb_queue *queue, struct napi_struct *napi, |
1508 | int budget) | |
89e5785f | 1509 | { |
ae1f2a56 | 1510 | struct macb *bp = queue->bp; |
9ba723b0 | 1511 | bool reset_rx_queue = false; |
89e5785f | 1512 | int received = 0; |
55054a16 | 1513 | unsigned int tail; |
89e5785f HS |
1514 | int first_frag = -1; |
1515 | ||
ae1f2a56 RO |
1516 | for (tail = queue->rx_tail; budget > 0; tail++) { |
1517 | struct macb_dma_desc *desc = macb_rx_desc(queue, tail); | |
dc97a89e | 1518 | u32 ctrl; |
89e5785f | 1519 | |
03dbe05f | 1520 | /* Make hw descriptor updates visible to CPU */ |
89e5785f | 1521 | rmb(); |
03dbe05f | 1522 | |
dc97a89e | 1523 | if (!(desc->addr & MACB_BIT(RX_USED))) |
89e5785f HS |
1524 | break; |
1525 | ||
6e0af298 AH |
1526 | /* Ensure ctrl is at least as up-to-date as addr */ |
1527 | dma_rmb(); | |
1528 | ||
1529 | ctrl = desc->ctrl; | |
1530 | ||
89e5785f HS |
1531 | if (ctrl & MACB_BIT(RX_SOF)) { |
1532 | if (first_frag != -1) | |
ae1f2a56 | 1533 | discard_partial_frame(queue, first_frag, tail); |
89e5785f HS |
1534 | first_frag = tail; |
1535 | } | |
1536 | ||
1537 | if (ctrl & MACB_BIT(RX_EOF)) { | |
1538 | int dropped; | |
9ba723b0 CP |
1539 | |
1540 | if (unlikely(first_frag == -1)) { | |
1541 | reset_rx_queue = true; | |
1542 | continue; | |
1543 | } | |
89e5785f | 1544 | |
97236cda | 1545 | dropped = macb_rx_frame(queue, napi, first_frag, tail); |
89e5785f | 1546 | first_frag = -1; |
9ba723b0 CP |
1547 | if (unlikely(dropped < 0)) { |
1548 | reset_rx_queue = true; | |
1549 | continue; | |
1550 | } | |
89e5785f HS |
1551 | if (!dropped) { |
1552 | received++; | |
1553 | budget--; | |
1554 | } | |
1555 | } | |
1556 | } | |
1557 | ||
9ba723b0 CP |
1558 | if (unlikely(reset_rx_queue)) { |
1559 | unsigned long flags; | |
1560 | u32 ctrl; | |
1561 | ||
1562 | netdev_err(bp->dev, "RX queue corruption: reset it\n"); | |
1563 | ||
1564 | spin_lock_irqsave(&bp->lock, flags); | |
1565 | ||
1566 | ctrl = macb_readl(bp, NCR); | |
1567 | macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); | |
1568 | ||
ae1f2a56 RO |
1569 | macb_init_rx_ring(queue); |
1570 | queue_writel(queue, RBQP, queue->rx_ring_dma); | |
9ba723b0 CP |
1571 | |
1572 | macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); | |
1573 | ||
1574 | spin_unlock_irqrestore(&bp->lock, flags); | |
1575 | return received; | |
1576 | } | |
1577 | ||
89e5785f | 1578 | if (first_frag != -1) |
ae1f2a56 | 1579 | queue->rx_tail = first_frag; |
89e5785f | 1580 | else |
ae1f2a56 | 1581 | queue->rx_tail = tail; |
89e5785f HS |
1582 | |
1583 | return received; | |
1584 | } | |
1585 | ||
1900e30d RH |
1586 | static bool macb_rx_pending(struct macb_queue *queue) |
1587 | { | |
1588 | struct macb *bp = queue->bp; | |
1589 | unsigned int entry; | |
1590 | struct macb_dma_desc *desc; | |
1591 | ||
1592 | entry = macb_rx_ring_wrap(bp, queue->rx_tail); | |
1593 | desc = macb_rx_desc(queue, entry); | |
1594 | ||
1595 | /* Make hw descriptor updates visible to CPU */ | |
1596 | rmb(); | |
1597 | ||
1598 | return (desc->addr & MACB_BIT(RX_USED)) != 0; | |
1599 | } | |
1600 | ||
138badbc | 1601 | static int macb_rx_poll(struct napi_struct *napi, int budget) |
89e5785f | 1602 | { |
138badbc | 1603 | struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx); |
ae1f2a56 | 1604 | struct macb *bp = queue->bp; |
bea3348e | 1605 | int work_done; |
89e5785f | 1606 | |
1900e30d | 1607 | work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); |
89e5785f | 1608 | |
138badbc | 1609 | netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n", |
1900e30d | 1610 | (unsigned int)(queue - bp->queues), work_done, budget); |
89e5785f | 1611 | |
1900e30d RH |
1612 | if (work_done < budget && napi_complete_done(napi, work_done)) { |
1613 | queue_writel(queue, IER, bp->rx_intr_mask); | |
89e5785f | 1614 | |
1900e30d RH |
1615 | /* Packet completions only seem to propagate to raise |
1616 | * interrupts when interrupts are enabled at the time, so if | |
1617 | * packets were received while interrupts were disabled, | |
0bf476fc RH |
1618 | * they will not cause another interrupt to be generated when |
1619 | * interrupts are re-enabled. | |
1900e30d RH |
1620 | * Check for this case here to avoid losing a wakeup. This can |
1621 | * potentially race with the interrupt handler doing the same | |
1622 | * actions if an interrupt is raised just after enabling them, | |
1623 | * but this should be harmless. | |
0bf476fc | 1624 | */ |
1900e30d RH |
1625 | if (macb_rx_pending(queue)) { |
1626 | queue_writel(queue, IDR, bp->rx_intr_mask); | |
02f7a34f | 1627 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
ae1f2a56 | 1628 | queue_writel(queue, ISR, MACB_BIT(RCOMP)); |
1900e30d RH |
1629 | netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n"); |
1630 | napi_schedule(napi); | |
02f7a34f | 1631 | } |
b336369c | 1632 | } |
89e5785f HS |
1633 | |
1634 | /* TODO: Handle errors */ | |
1635 | ||
bea3348e | 1636 | return work_done; |
89e5785f HS |
1637 | } |
1638 | ||
138badbc RH |
1639 | static void macb_tx_restart(struct macb_queue *queue) |
1640 | { | |
1641 | struct macb *bp = queue->bp; | |
1642 | unsigned int head_idx, tbqp; | |
1643 | ||
1644 | spin_lock(&queue->tx_ptr_lock); | |
1645 | ||
1646 | if (queue->tx_head == queue->tx_tail) | |
1647 | goto out_tx_ptr_unlock; | |
1648 | ||
1649 | tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); | |
1650 | tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); | |
1651 | head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head)); | |
1652 | ||
1653 | if (tbqp == head_idx) | |
1654 | goto out_tx_ptr_unlock; | |
1655 | ||
1656 | spin_lock_irq(&bp->lock); | |
1657 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); | |
1658 | spin_unlock_irq(&bp->lock); | |
1659 | ||
1660 | out_tx_ptr_unlock: | |
1661 | spin_unlock(&queue->tx_ptr_lock); | |
1662 | } | |
1663 | ||
1664 | static bool macb_tx_complete_pending(struct macb_queue *queue) | |
1665 | { | |
1666 | bool retval = false; | |
1667 | ||
1668 | spin_lock(&queue->tx_ptr_lock); | |
1669 | if (queue->tx_head != queue->tx_tail) { | |
1670 | /* Make hw descriptor updates visible to CPU */ | |
1671 | rmb(); | |
1672 | ||
1673 | if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED)) | |
1674 | retval = true; | |
1675 | } | |
1676 | spin_unlock(&queue->tx_ptr_lock); | |
1677 | return retval; | |
1678 | } | |
1679 | ||
1680 | static int macb_tx_poll(struct napi_struct *napi, int budget) | |
1681 | { | |
1682 | struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx); | |
1683 | struct macb *bp = queue->bp; | |
1684 | int work_done; | |
1685 | ||
1686 | work_done = macb_tx_complete(queue, budget); | |
1687 | ||
1688 | rmb(); // ensure txubr_pending is up to date | |
1689 | if (queue->txubr_pending) { | |
1690 | queue->txubr_pending = false; | |
1691 | netdev_vdbg(bp->dev, "poll: tx restart\n"); | |
1692 | macb_tx_restart(queue); | |
1693 | } | |
1694 | ||
1695 | netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n", | |
1696 | (unsigned int)(queue - bp->queues), work_done, budget); | |
1697 | ||
1698 | if (work_done < budget && napi_complete_done(napi, work_done)) { | |
1699 | queue_writel(queue, IER, MACB_BIT(TCOMP)); | |
1700 | ||
1701 | /* Packet completions only seem to propagate to raise | |
1702 | * interrupts when interrupts are enabled at the time, so if | |
1703 | * packets were sent while interrupts were disabled, | |
1704 | * they will not cause another interrupt to be generated when | |
1705 | * interrupts are re-enabled. | |
1706 | * Check for this case here to avoid losing a wakeup. This can | |
1707 | * potentially race with the interrupt handler doing the same | |
1708 | * actions if an interrupt is raised just after enabling them, | |
1709 | * but this should be harmless. | |
1710 | */ | |
1711 | if (macb_tx_complete_pending(queue)) { | |
1712 | queue_writel(queue, IDR, MACB_BIT(TCOMP)); | |
1713 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
1714 | queue_writel(queue, ISR, MACB_BIT(TCOMP)); | |
1715 | netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n"); | |
1716 | napi_schedule(napi); | |
1717 | } | |
1718 | } | |
1719 | ||
1720 | return work_done; | |
1721 | } | |
1722 | ||
e7412b83 | 1723 | static void macb_hresp_error_task(struct tasklet_struct *t) |
032dc41b | 1724 | { |
e7412b83 | 1725 | struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet); |
032dc41b | 1726 | struct net_device *dev = bp->dev; |
580d395c | 1727 | struct macb_queue *queue; |
032dc41b HK |
1728 | unsigned int q; |
1729 | u32 ctrl; | |
1730 | ||
1731 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | |
e501070e | 1732 | queue_writel(queue, IDR, bp->rx_intr_mask | |
032dc41b HK |
1733 | MACB_TX_INT_FLAGS | |
1734 | MACB_BIT(HRESP)); | |
1735 | } | |
1736 | ctrl = macb_readl(bp, NCR); | |
1737 | ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); | |
1738 | macb_writel(bp, NCR, ctrl); | |
1739 | ||
1740 | netif_tx_stop_all_queues(dev); | |
1741 | netif_carrier_off(dev); | |
1742 | ||
1743 | bp->macbgem_ops.mog_init_rings(bp); | |
1744 | ||
1745 | /* Initialize TX and RX buffers */ | |
6e952d95 | 1746 | macb_init_buffers(bp); |
032dc41b | 1747 | |
6e952d95 AT |
1748 | /* Enable interrupts */ |
1749 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) | |
032dc41b | 1750 | queue_writel(queue, IER, |
e501070e | 1751 | bp->rx_intr_mask | |
032dc41b HK |
1752 | MACB_TX_INT_FLAGS | |
1753 | MACB_BIT(HRESP)); | |
032dc41b HK |
1754 | |
1755 | ctrl |= MACB_BIT(RE) | MACB_BIT(TE); | |
1756 | macb_writel(bp, NCR, ctrl); | |
1757 | ||
1758 | netif_carrier_on(dev); | |
1759 | netif_tx_start_all_queues(dev); | |
1760 | } | |
1761 | ||
9d45c8e8 NF |
1762 | static irqreturn_t macb_wol_interrupt(int irq, void *dev_id) |
1763 | { | |
1764 | struct macb_queue *queue = dev_id; | |
1765 | struct macb *bp = queue->bp; | |
1766 | u32 status; | |
1767 | ||
1768 | status = queue_readl(queue, ISR); | |
1769 | ||
1770 | if (unlikely(!status)) | |
1771 | return IRQ_NONE; | |
1772 | ||
1773 | spin_lock(&bp->lock); | |
1774 | ||
1775 | if (status & MACB_BIT(WOL)) { | |
1776 | queue_writel(queue, IDR, MACB_BIT(WOL)); | |
1777 | macb_writel(bp, WOL, 0); | |
1778 | netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", | |
1779 | (unsigned int)(queue - bp->queues), | |
1780 | (unsigned long)status); | |
1781 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
1782 | queue_writel(queue, ISR, MACB_BIT(WOL)); | |
1783 | pm_wakeup_event(&bp->pdev->dev, 0); | |
1784 | } | |
1785 | ||
1786 | spin_unlock(&bp->lock); | |
1787 | ||
1788 | return IRQ_HANDLED; | |
1789 | } | |
1790 | ||
558e35cc NF |
1791 | static irqreturn_t gem_wol_interrupt(int irq, void *dev_id) |
1792 | { | |
1793 | struct macb_queue *queue = dev_id; | |
1794 | struct macb *bp = queue->bp; | |
1795 | u32 status; | |
1796 | ||
1797 | status = queue_readl(queue, ISR); | |
1798 | ||
1799 | if (unlikely(!status)) | |
1800 | return IRQ_NONE; | |
1801 | ||
1802 | spin_lock(&bp->lock); | |
1803 | ||
1804 | if (status & GEM_BIT(WOL)) { | |
1805 | queue_writel(queue, IDR, GEM_BIT(WOL)); | |
1806 | gem_writel(bp, WOL, 0); | |
1807 | netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", | |
1808 | (unsigned int)(queue - bp->queues), | |
1809 | (unsigned long)status); | |
1810 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
1811 | queue_writel(queue, ISR, GEM_BIT(WOL)); | |
1812 | pm_wakeup_event(&bp->pdev->dev, 0); | |
1813 | } | |
1814 | ||
1815 | spin_unlock(&bp->lock); | |
1816 | ||
1817 | return IRQ_HANDLED; | |
1818 | } | |
1819 | ||
89e5785f HS |
1820 | static irqreturn_t macb_interrupt(int irq, void *dev_id) |
1821 | { | |
02c958dd CP |
1822 | struct macb_queue *queue = dev_id; |
1823 | struct macb *bp = queue->bp; | |
1824 | struct net_device *dev = bp->dev; | |
bfbb92c4 | 1825 | u32 status, ctrl; |
89e5785f | 1826 | |
02c958dd | 1827 | status = queue_readl(queue, ISR); |
89e5785f HS |
1828 | |
1829 | if (unlikely(!status)) | |
1830 | return IRQ_NONE; | |
1831 | ||
1832 | spin_lock(&bp->lock); | |
1833 | ||
1834 | while (status) { | |
89e5785f HS |
1835 | /* close possible race with dev_close */ |
1836 | if (unlikely(!netif_running(dev))) { | |
02c958dd | 1837 | queue_writel(queue, IDR, -1); |
24468374 NS |
1838 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1839 | queue_writel(queue, ISR, -1); | |
89e5785f HS |
1840 | break; |
1841 | } | |
1842 | ||
02c958dd CP |
1843 | netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", |
1844 | (unsigned int)(queue - bp->queues), | |
1845 | (unsigned long)status); | |
a268adb1 | 1846 | |
e501070e | 1847 | if (status & bp->rx_intr_mask) { |
64ec42fe | 1848 | /* There's no point taking any more interrupts |
b336369c JH |
1849 | * until we have processed the buffers. The |
1850 | * scheduling call may fail if the poll routine | |
1851 | * is already scheduled, so disable interrupts | |
1852 | * now. | |
1853 | */ | |
e501070e | 1854 | queue_writel(queue, IDR, bp->rx_intr_mask); |
581df9e1 | 1855 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
02c958dd | 1856 | queue_writel(queue, ISR, MACB_BIT(RCOMP)); |
b336369c | 1857 | |
138badbc | 1858 | if (napi_schedule_prep(&queue->napi_rx)) { |
a268adb1 | 1859 | netdev_vdbg(bp->dev, "scheduling RX softirq\n"); |
138badbc RH |
1860 | __napi_schedule(&queue->napi_rx); |
1861 | } | |
1862 | } | |
1863 | ||
1864 | if (status & (MACB_BIT(TCOMP) | | |
1865 | MACB_BIT(TXUBR))) { | |
1866 | queue_writel(queue, IDR, MACB_BIT(TCOMP)); | |
1867 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
1868 | queue_writel(queue, ISR, MACB_BIT(TCOMP) | | |
1869 | MACB_BIT(TXUBR)); | |
1870 | ||
1871 | if (status & MACB_BIT(TXUBR)) { | |
1872 | queue->txubr_pending = true; | |
1873 | wmb(); // ensure softirq can see update | |
1874 | } | |
1875 | ||
1876 | if (napi_schedule_prep(&queue->napi_tx)) { | |
1877 | netdev_vdbg(bp->dev, "scheduling TX softirq\n"); | |
1878 | __napi_schedule(&queue->napi_tx); | |
89e5785f HS |
1879 | } |
1880 | } | |
1881 | ||
e86cd53a | 1882 | if (unlikely(status & (MACB_TX_ERR_FLAGS))) { |
02c958dd CP |
1883 | queue_writel(queue, IDR, MACB_TX_INT_FLAGS); |
1884 | schedule_work(&queue->tx_error_task); | |
6a027b70 SB |
1885 | |
1886 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
02c958dd | 1887 | queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); |
6a027b70 | 1888 | |
e86cd53a NF |
1889 | break; |
1890 | } | |
1891 | ||
64ec42fe | 1892 | /* Link change detection isn't possible with RMII, so we'll |
89e5785f HS |
1893 | * add that if/when we get our hands on a full-blown MII PHY. |
1894 | */ | |
1895 | ||
86b5e7de NS |
1896 | /* There is a hardware issue under heavy load where DMA can |
1897 | * stop, this causes endless "used buffer descriptor read" | |
1898 | * interrupts but it can be cleared by re-enabling RX. See | |
e501070e HK |
1899 | * the at91rm9200 manual, section 41.3.1 or the Zynq manual |
1900 | * section 16.7.4 for details. RXUBR is only enabled for | |
1901 | * these two versions. | |
86b5e7de | 1902 | */ |
bfbb92c4 NS |
1903 | if (status & MACB_BIT(RXUBR)) { |
1904 | ctrl = macb_readl(bp, NCR); | |
1905 | macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); | |
ffac0e96 | 1906 | wmb(); |
bfbb92c4 NS |
1907 | macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); |
1908 | ||
1909 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
ba504994 | 1910 | queue_writel(queue, ISR, MACB_BIT(RXUBR)); |
bfbb92c4 NS |
1911 | } |
1912 | ||
b19f7f71 AS |
1913 | if (status & MACB_BIT(ISR_ROVR)) { |
1914 | /* We missed at least one packet */ | |
f75ba50b JI |
1915 | if (macb_is_gem(bp)) |
1916 | bp->hw_stats.gem.rx_overruns++; | |
1917 | else | |
1918 | bp->hw_stats.macb.rx_overruns++; | |
6a027b70 SB |
1919 | |
1920 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
02c958dd | 1921 | queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); |
b19f7f71 AS |
1922 | } |
1923 | ||
89e5785f | 1924 | if (status & MACB_BIT(HRESP)) { |
032dc41b | 1925 | tasklet_schedule(&bp->hresp_err_tasklet); |
c220f8cd | 1926 | netdev_err(dev, "DMA bus error: HRESP not OK\n"); |
6a027b70 SB |
1927 | |
1928 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
02c958dd | 1929 | queue_writel(queue, ISR, MACB_BIT(HRESP)); |
89e5785f | 1930 | } |
02c958dd | 1931 | status = queue_readl(queue, ISR); |
89e5785f HS |
1932 | } |
1933 | ||
1934 | spin_unlock(&bp->lock); | |
1935 | ||
1936 | return IRQ_HANDLED; | |
1937 | } | |
1938 | ||
6e8cf5c0 | 1939 | #ifdef CONFIG_NET_POLL_CONTROLLER |
64ec42fe | 1940 | /* Polling receive - used by netconsole and other diagnostic tools |
6e8cf5c0 TP |
1941 | * to allow network i/o with interrupts disabled. |
1942 | */ | |
1943 | static void macb_poll_controller(struct net_device *dev) | |
1944 | { | |
02c958dd CP |
1945 | struct macb *bp = netdev_priv(dev); |
1946 | struct macb_queue *queue; | |
6e8cf5c0 | 1947 | unsigned long flags; |
02c958dd | 1948 | unsigned int q; |
6e8cf5c0 TP |
1949 | |
1950 | local_irq_save(flags); | |
02c958dd CP |
1951 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
1952 | macb_interrupt(dev->irq, queue); | |
6e8cf5c0 TP |
1953 | local_irq_restore(flags); |
1954 | } | |
1955 | #endif | |
1956 | ||
a4c35ed3 | 1957 | static unsigned int macb_tx_map(struct macb *bp, |
02c958dd | 1958 | struct macb_queue *queue, |
1629dd4f RO |
1959 | struct sk_buff *skb, |
1960 | unsigned int hdrlen) | |
89e5785f | 1961 | { |
89e5785f | 1962 | dma_addr_t mapping; |
02c958dd | 1963 | unsigned int len, entry, i, tx_head = queue->tx_head; |
a4c35ed3 | 1964 | struct macb_tx_skb *tx_skb = NULL; |
55054a16 | 1965 | struct macb_dma_desc *desc; |
a4c35ed3 CP |
1966 | unsigned int offset, size, count = 0; |
1967 | unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; | |
1629dd4f RO |
1968 | unsigned int eof = 1, mss_mfs = 0; |
1969 | u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; | |
1970 | ||
1971 | /* LSO */ | |
1972 | if (skb_shinfo(skb)->gso_size != 0) { | |
1973 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) | |
1974 | /* UDP - UFO */ | |
1975 | lso_ctrl = MACB_LSO_UFO_ENABLE; | |
1976 | else | |
1977 | /* TCP - TSO */ | |
1978 | lso_ctrl = MACB_LSO_TSO_ENABLE; | |
1979 | } | |
a4c35ed3 CP |
1980 | |
1981 | /* First, map non-paged data */ | |
1982 | len = skb_headlen(skb); | |
1629dd4f RO |
1983 | |
1984 | /* first buffer length */ | |
1985 | size = hdrlen; | |
1986 | ||
a4c35ed3 CP |
1987 | offset = 0; |
1988 | while (len) { | |
b410d13e | 1989 | entry = macb_tx_ring_wrap(bp, tx_head); |
02c958dd | 1990 | tx_skb = &queue->tx_skb[entry]; |
a4c35ed3 CP |
1991 | |
1992 | mapping = dma_map_single(&bp->pdev->dev, | |
1993 | skb->data + offset, | |
1994 | size, DMA_TO_DEVICE); | |
1995 | if (dma_mapping_error(&bp->pdev->dev, mapping)) | |
1996 | goto dma_error; | |
1997 | ||
1998 | /* Save info to properly release resources */ | |
1999 | tx_skb->skb = NULL; | |
2000 | tx_skb->mapping = mapping; | |
2001 | tx_skb->size = size; | |
2002 | tx_skb->mapped_as_page = false; | |
2003 | ||
2004 | len -= size; | |
2005 | offset += size; | |
2006 | count++; | |
2007 | tx_head++; | |
1629dd4f RO |
2008 | |
2009 | size = min(len, bp->max_tx_length); | |
a4c35ed3 CP |
2010 | } |
2011 | ||
2012 | /* Then, map paged data from fragments */ | |
2013 | for (f = 0; f < nr_frags; f++) { | |
2014 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; | |
2015 | ||
2016 | len = skb_frag_size(frag); | |
2017 | offset = 0; | |
2018 | while (len) { | |
2019 | size = min(len, bp->max_tx_length); | |
b410d13e | 2020 | entry = macb_tx_ring_wrap(bp, tx_head); |
02c958dd | 2021 | tx_skb = &queue->tx_skb[entry]; |
a4c35ed3 CP |
2022 | |
2023 | mapping = skb_frag_dma_map(&bp->pdev->dev, frag, | |
2024 | offset, size, DMA_TO_DEVICE); | |
2025 | if (dma_mapping_error(&bp->pdev->dev, mapping)) | |
2026 | goto dma_error; | |
2027 | ||
2028 | /* Save info to properly release resources */ | |
2029 | tx_skb->skb = NULL; | |
2030 | tx_skb->mapping = mapping; | |
2031 | tx_skb->size = size; | |
2032 | tx_skb->mapped_as_page = true; | |
2033 | ||
2034 | len -= size; | |
2035 | offset += size; | |
2036 | count++; | |
2037 | tx_head++; | |
2038 | } | |
2039 | } | |
2040 | ||
2041 | /* Should never happen */ | |
aa50b552 | 2042 | if (unlikely(!tx_skb)) { |
a4c35ed3 CP |
2043 | netdev_err(bp->dev, "BUG! empty skb!\n"); |
2044 | return 0; | |
2045 | } | |
2046 | ||
2047 | /* This is the last buffer of the frame: save socket buffer */ | |
2048 | tx_skb->skb = skb; | |
2049 | ||
2050 | /* Update TX ring: update buffer descriptors in reverse order | |
2051 | * to avoid race condition | |
2052 | */ | |
2053 | ||
2054 | /* Set 'TX_USED' bit in buffer descriptor at tx_head position | |
2055 | * to set the end of TX queue | |
2056 | */ | |
2057 | i = tx_head; | |
b410d13e | 2058 | entry = macb_tx_ring_wrap(bp, i); |
a4c35ed3 | 2059 | ctrl = MACB_BIT(TX_USED); |
dc97a89e | 2060 | desc = macb_tx_desc(queue, entry); |
a4c35ed3 CP |
2061 | desc->ctrl = ctrl; |
2062 | ||
1629dd4f RO |
2063 | if (lso_ctrl) { |
2064 | if (lso_ctrl == MACB_LSO_UFO_ENABLE) | |
2065 | /* include header and FCS in value given to h/w */ | |
2066 | mss_mfs = skb_shinfo(skb)->gso_size + | |
2067 | skb_transport_offset(skb) + | |
2068 | ETH_FCS_LEN; | |
2069 | else /* TSO */ { | |
2070 | mss_mfs = skb_shinfo(skb)->gso_size; | |
2071 | /* TCP Sequence Number Source Select | |
2072 | * can be set only for TSO | |
2073 | */ | |
2074 | seq_ctrl = 0; | |
2075 | } | |
2076 | } | |
2077 | ||
a4c35ed3 CP |
2078 | do { |
2079 | i--; | |
b410d13e | 2080 | entry = macb_tx_ring_wrap(bp, i); |
02c958dd | 2081 | tx_skb = &queue->tx_skb[entry]; |
dc97a89e | 2082 | desc = macb_tx_desc(queue, entry); |
a4c35ed3 CP |
2083 | |
2084 | ctrl = (u32)tx_skb->size; | |
2085 | if (eof) { | |
2086 | ctrl |= MACB_BIT(TX_LAST); | |
2087 | eof = 0; | |
2088 | } | |
b410d13e | 2089 | if (unlikely(entry == (bp->tx_ring_size - 1))) |
a4c35ed3 CP |
2090 | ctrl |= MACB_BIT(TX_WRAP); |
2091 | ||
1629dd4f RO |
2092 | /* First descriptor is header descriptor */ |
2093 | if (i == queue->tx_head) { | |
2094 | ctrl |= MACB_BF(TX_LSO, lso_ctrl); | |
2095 | ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); | |
653e92a9 | 2096 | if ((bp->dev->features & NETIF_F_HW_CSUM) && |
5cebb40b HK |
2097 | skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl && |
2098 | !ptp_one_step_sync(skb)) | |
653e92a9 | 2099 | ctrl |= MACB_BIT(TX_NOCRC); |
1629dd4f RO |
2100 | } else |
2101 | /* Only set MSS/MFS on payload descriptors | |
2102 | * (second or later descriptor) | |
2103 | */ | |
2104 | ctrl |= MACB_BF(MSS_MFS, mss_mfs); | |
2105 | ||
a4c35ed3 | 2106 | /* Set TX buffer descriptor */ |
dc97a89e | 2107 | macb_set_addr(bp, desc, tx_skb->mapping); |
a4c35ed3 CP |
2108 | /* desc->addr must be visible to hardware before clearing |
2109 | * 'TX_USED' bit in desc->ctrl. | |
2110 | */ | |
2111 | wmb(); | |
2112 | desc->ctrl = ctrl; | |
02c958dd | 2113 | } while (i != queue->tx_head); |
a4c35ed3 | 2114 | |
02c958dd | 2115 | queue->tx_head = tx_head; |
a4c35ed3 CP |
2116 | |
2117 | return count; | |
2118 | ||
2119 | dma_error: | |
2120 | netdev_err(bp->dev, "TX DMA map failed\n"); | |
2121 | ||
02c958dd CP |
2122 | for (i = queue->tx_head; i != tx_head; i++) { |
2123 | tx_skb = macb_tx_skb(queue, i); | |
a4c35ed3 | 2124 | |
138badbc | 2125 | macb_tx_unmap(bp, tx_skb, 0); |
a4c35ed3 CP |
2126 | } |
2127 | ||
2128 | return 0; | |
2129 | } | |
2130 | ||
1629dd4f RO |
2131 | static netdev_features_t macb_features_check(struct sk_buff *skb, |
2132 | struct net_device *dev, | |
2133 | netdev_features_t features) | |
2134 | { | |
2135 | unsigned int nr_frags, f; | |
2136 | unsigned int hdrlen; | |
2137 | ||
2138 | /* Validate LSO compatibility */ | |
2139 | ||
41c1ef97 HK |
2140 | /* there is only one buffer or protocol is not UDP */ |
2141 | if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) | |
1629dd4f RO |
2142 | return features; |
2143 | ||
2144 | /* length of header */ | |
2145 | hdrlen = skb_transport_offset(skb); | |
1629dd4f | 2146 | |
41c1ef97 | 2147 | /* For UFO only: |
1629dd4f RO |
2148 | * When software supplies two or more payload buffers all payload buffers |
2149 | * apart from the last must be a multiple of 8 bytes in size. | |
2150 | */ | |
2151 | if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) | |
2152 | return features & ~MACB_NETIF_LSO; | |
2153 | ||
2154 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2155 | /* No need to check last fragment */ | |
2156 | nr_frags--; | |
2157 | for (f = 0; f < nr_frags; f++) { | |
2158 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; | |
2159 | ||
2160 | if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) | |
2161 | return features & ~MACB_NETIF_LSO; | |
2162 | } | |
2163 | return features; | |
2164 | } | |
2165 | ||
007e4ba3 HB |
2166 | static inline int macb_clear_csum(struct sk_buff *skb) |
2167 | { | |
2168 | /* no change for packets without checksum offloading */ | |
2169 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
2170 | return 0; | |
2171 | ||
2172 | /* make sure we can modify the header */ | |
2173 | if (unlikely(skb_cow_head(skb, 0))) | |
2174 | return -1; | |
2175 | ||
2176 | /* initialize checksum field | |
2177 | * This is required - at least for Zynq, which otherwise calculates | |
2178 | * wrong UDP header checksums for UDP packets with UDP data len <=2 | |
2179 | */ | |
2180 | *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; | |
2181 | return 0; | |
2182 | } | |
2183 | ||
653e92a9 CB |
2184 | static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) |
2185 | { | |
403dc167 MD |
2186 | bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || |
2187 | skb_is_nonlinear(*skb); | |
653e92a9 CB |
2188 | int padlen = ETH_ZLEN - (*skb)->len; |
2189 | int headroom = skb_headroom(*skb); | |
2190 | int tailroom = skb_tailroom(*skb); | |
2191 | struct sk_buff *nskb; | |
2192 | u32 fcs; | |
2193 | ||
2194 | if (!(ndev->features & NETIF_F_HW_CSUM) || | |
2195 | !((*skb)->ip_summed != CHECKSUM_PARTIAL) || | |
5cebb40b | 2196 | skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb)) |
653e92a9 CB |
2197 | return 0; |
2198 | ||
2199 | if (padlen <= 0) { | |
2200 | /* FCS could be appeded to tailroom. */ | |
2201 | if (tailroom >= ETH_FCS_LEN) | |
2202 | goto add_fcs; | |
2203 | /* FCS could be appeded by moving data to headroom. */ | |
2204 | else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) | |
2205 | padlen = 0; | |
2206 | /* No room for FCS, need to reallocate skb. */ | |
2207 | else | |
899ecaed | 2208 | padlen = ETH_FCS_LEN; |
653e92a9 CB |
2209 | } else { |
2210 | /* Add room for FCS. */ | |
2211 | padlen += ETH_FCS_LEN; | |
2212 | } | |
2213 | ||
2214 | if (!cloned && headroom + tailroom >= padlen) { | |
2215 | (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); | |
2216 | skb_set_tail_pointer(*skb, (*skb)->len); | |
2217 | } else { | |
2218 | nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); | |
2219 | if (!nskb) | |
2220 | return -ENOMEM; | |
2221 | ||
f3e5c070 | 2222 | dev_consume_skb_any(*skb); |
653e92a9 CB |
2223 | *skb = nskb; |
2224 | } | |
2225 | ||
ba3e1847 CB |
2226 | if (padlen > ETH_FCS_LEN) |
2227 | skb_put_zero(*skb, padlen - ETH_FCS_LEN); | |
653e92a9 CB |
2228 | |
2229 | add_fcs: | |
2230 | /* set FCS to packet */ | |
2231 | fcs = crc32_le(~0, (*skb)->data, (*skb)->len); | |
2232 | fcs = ~fcs; | |
2233 | ||
2234 | skb_put_u8(*skb, fcs & 0xff); | |
2235 | skb_put_u8(*skb, (fcs >> 8) & 0xff); | |
2236 | skb_put_u8(*skb, (fcs >> 16) & 0xff); | |
2237 | skb_put_u8(*skb, (fcs >> 24) & 0xff); | |
2238 | ||
2239 | return 0; | |
2240 | } | |
2241 | ||
d1c38957 | 2242 | static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) |
a4c35ed3 | 2243 | { |
02c958dd | 2244 | u16 queue_index = skb_get_queue_mapping(skb); |
a4c35ed3 | 2245 | struct macb *bp = netdev_priv(dev); |
02c958dd | 2246 | struct macb_queue *queue = &bp->queues[queue_index]; |
1629dd4f RO |
2247 | unsigned int desc_cnt, nr_frags, frag_size, f; |
2248 | unsigned int hdrlen; | |
8932b5a5 | 2249 | bool is_lso; |
d1c38957 | 2250 | netdev_tx_t ret = NETDEV_TX_OK; |
1629dd4f | 2251 | |
33729f25 CB |
2252 | if (macb_clear_csum(skb)) { |
2253 | dev_kfree_skb_any(skb); | |
2254 | return ret; | |
2255 | } | |
2256 | ||
653e92a9 CB |
2257 | if (macb_pad_and_fcs(&skb, dev)) { |
2258 | dev_kfree_skb_any(skb); | |
2259 | return ret; | |
2260 | } | |
2261 | ||
1629dd4f RO |
2262 | is_lso = (skb_shinfo(skb)->gso_size != 0); |
2263 | ||
2264 | if (is_lso) { | |
1629dd4f | 2265 | /* length of headers */ |
8932b5a5 | 2266 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) |
1629dd4f RO |
2267 | /* only queue eth + ip headers separately for UDP */ |
2268 | hdrlen = skb_transport_offset(skb); | |
2269 | else | |
504148fe | 2270 | hdrlen = skb_tcp_all_headers(skb); |
1629dd4f RO |
2271 | if (skb_headlen(skb) < hdrlen) { |
2272 | netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); | |
2273 | /* if this is required, would need to copy to single buffer */ | |
2274 | return NETDEV_TX_BUSY; | |
2275 | } | |
2276 | } else | |
2277 | hdrlen = min(skb_headlen(skb), bp->max_tx_length); | |
89e5785f | 2278 | |
a268adb1 HS |
2279 | #if defined(DEBUG) && defined(VERBOSE_DEBUG) |
2280 | netdev_vdbg(bp->dev, | |
aa50b552 MF |
2281 | "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", |
2282 | queue_index, skb->len, skb->head, skb->data, | |
2283 | skb_tail_pointer(skb), skb_end_pointer(skb)); | |
c220f8cd JI |
2284 | print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, |
2285 | skb->data, 16, true); | |
89e5785f HS |
2286 | #endif |
2287 | ||
a4c35ed3 CP |
2288 | /* Count how many TX buffer descriptors are needed to send this |
2289 | * socket buffer: skb fragments of jumbo frames may need to be | |
aa50b552 | 2290 | * split into many buffer descriptors. |
a4c35ed3 | 2291 | */ |
1629dd4f RO |
2292 | if (is_lso && (skb_headlen(skb) > hdrlen)) |
2293 | /* extra header descriptor if also payload in first buffer */ | |
2294 | desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; | |
2295 | else | |
2296 | desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); | |
a4c35ed3 CP |
2297 | nr_frags = skb_shinfo(skb)->nr_frags; |
2298 | for (f = 0; f < nr_frags; f++) { | |
2299 | frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); | |
1629dd4f | 2300 | desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); |
a4c35ed3 CP |
2301 | } |
2302 | ||
138badbc | 2303 | spin_lock_bh(&queue->tx_ptr_lock); |
89e5785f HS |
2304 | |
2305 | /* This is a hard error, log it. */ | |
b410d13e | 2306 | if (CIRC_SPACE(queue->tx_head, queue->tx_tail, |
1629dd4f | 2307 | bp->tx_ring_size) < desc_cnt) { |
02c958dd | 2308 | netif_stop_subqueue(dev, queue_index); |
c220f8cd | 2309 | netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", |
02c958dd | 2310 | queue->tx_head, queue->tx_tail); |
138badbc RH |
2311 | ret = NETDEV_TX_BUSY; |
2312 | goto unlock; | |
89e5785f HS |
2313 | } |
2314 | ||
a4c35ed3 | 2315 | /* Map socket buffer for DMA transfer */ |
1629dd4f | 2316 | if (!macb_tx_map(bp, queue, skb, hdrlen)) { |
c88b5b6a | 2317 | dev_kfree_skb_any(skb); |
92030908 SB |
2318 | goto unlock; |
2319 | } | |
55054a16 | 2320 | |
03dbe05f | 2321 | /* Make newly initialized descriptor visible to hardware */ |
89e5785f | 2322 | wmb(); |
e072092f RC |
2323 | skb_tx_timestamp(skb); |
2324 | ||
138badbc | 2325 | spin_lock_irq(&bp->lock); |
89e5785f | 2326 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); |
138badbc | 2327 | spin_unlock_irq(&bp->lock); |
89e5785f | 2328 | |
b410d13e | 2329 | if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) |
02c958dd | 2330 | netif_stop_subqueue(dev, queue_index); |
89e5785f | 2331 | |
92030908 | 2332 | unlock: |
138badbc | 2333 | spin_unlock_bh(&queue->tx_ptr_lock); |
89e5785f | 2334 | |
d1c38957 | 2335 | return ret; |
89e5785f HS |
2336 | } |
2337 | ||
4df95131 | 2338 | static void macb_init_rx_buffer_size(struct macb *bp, size_t size) |
1b44791a NF |
2339 | { |
2340 | if (!macb_is_gem(bp)) { | |
2341 | bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; | |
2342 | } else { | |
4df95131 | 2343 | bp->rx_buffer_size = size; |
1b44791a | 2344 | |
1b44791a | 2345 | if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { |
4df95131 | 2346 | netdev_dbg(bp->dev, |
aa50b552 MF |
2347 | "RX buffer must be multiple of %d bytes, expanding\n", |
2348 | RX_BUFFER_MULTIPLE); | |
1b44791a | 2349 | bp->rx_buffer_size = |
4df95131 | 2350 | roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); |
1b44791a | 2351 | } |
1b44791a | 2352 | } |
4df95131 | 2353 | |
5b5e0928 | 2354 | netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", |
4df95131 | 2355 | bp->dev->mtu, bp->rx_buffer_size); |
1b44791a NF |
2356 | } |
2357 | ||
4df95131 NF |
2358 | static void gem_free_rx_buffers(struct macb *bp) |
2359 | { | |
2360 | struct sk_buff *skb; | |
2361 | struct macb_dma_desc *desc; | |
ae1f2a56 | 2362 | struct macb_queue *queue; |
4df95131 | 2363 | dma_addr_t addr; |
ae1f2a56 | 2364 | unsigned int q; |
4df95131 NF |
2365 | int i; |
2366 | ||
ae1f2a56 RO |
2367 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2368 | if (!queue->rx_skbuff) | |
2369 | continue; | |
4df95131 | 2370 | |
ae1f2a56 RO |
2371 | for (i = 0; i < bp->rx_ring_size; i++) { |
2372 | skb = queue->rx_skbuff[i]; | |
4df95131 | 2373 | |
ae1f2a56 RO |
2374 | if (!skb) |
2375 | continue; | |
4df95131 | 2376 | |
ae1f2a56 RO |
2377 | desc = macb_rx_desc(queue, i); |
2378 | addr = macb_get_addr(bp, desc); | |
dc97a89e | 2379 | |
ae1f2a56 RO |
2380 | dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, |
2381 | DMA_FROM_DEVICE); | |
2382 | dev_kfree_skb_any(skb); | |
2383 | skb = NULL; | |
2384 | } | |
4df95131 | 2385 | |
ae1f2a56 RO |
2386 | kfree(queue->rx_skbuff); |
2387 | queue->rx_skbuff = NULL; | |
2388 | } | |
4df95131 NF |
2389 | } |
2390 | ||
2391 | static void macb_free_rx_buffers(struct macb *bp) | |
2392 | { | |
ae1f2a56 RO |
2393 | struct macb_queue *queue = &bp->queues[0]; |
2394 | ||
2395 | if (queue->rx_buffers) { | |
4df95131 | 2396 | dma_free_coherent(&bp->pdev->dev, |
b410d13e | 2397 | bp->rx_ring_size * bp->rx_buffer_size, |
ae1f2a56 RO |
2398 | queue->rx_buffers, queue->rx_buffers_dma); |
2399 | queue->rx_buffers = NULL; | |
4df95131 NF |
2400 | } |
2401 | } | |
1b44791a | 2402 | |
89e5785f HS |
2403 | static void macb_free_consistent(struct macb *bp) |
2404 | { | |
02c958dd CP |
2405 | struct macb_queue *queue; |
2406 | unsigned int q; | |
404cd086 | 2407 | int size; |
02c958dd | 2408 | |
4df95131 | 2409 | bp->macbgem_ops.mog_free_rx_buffers(bp); |
02c958dd CP |
2410 | |
2411 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | |
2412 | kfree(queue->tx_skb); | |
2413 | queue->tx_skb = NULL; | |
2414 | if (queue->tx_ring) { | |
404cd086 HK |
2415 | size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; |
2416 | dma_free_coherent(&bp->pdev->dev, size, | |
02c958dd CP |
2417 | queue->tx_ring, queue->tx_ring_dma); |
2418 | queue->tx_ring = NULL; | |
2419 | } | |
e50b770e | 2420 | if (queue->rx_ring) { |
404cd086 HK |
2421 | size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; |
2422 | dma_free_coherent(&bp->pdev->dev, size, | |
e50b770e HK |
2423 | queue->rx_ring, queue->rx_ring_dma); |
2424 | queue->rx_ring = NULL; | |
2425 | } | |
89e5785f | 2426 | } |
4df95131 NF |
2427 | } |
2428 | ||
2429 | static int gem_alloc_rx_buffers(struct macb *bp) | |
2430 | { | |
ae1f2a56 RO |
2431 | struct macb_queue *queue; |
2432 | unsigned int q; | |
4df95131 NF |
2433 | int size; |
2434 | ||
ae1f2a56 RO |
2435 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2436 | size = bp->rx_ring_size * sizeof(struct sk_buff *); | |
2437 | queue->rx_skbuff = kzalloc(size, GFP_KERNEL); | |
2438 | if (!queue->rx_skbuff) | |
2439 | return -ENOMEM; | |
2440 | else | |
2441 | netdev_dbg(bp->dev, | |
2442 | "Allocated %d RX struct sk_buff entries at %p\n", | |
2443 | bp->rx_ring_size, queue->rx_skbuff); | |
2444 | } | |
4df95131 NF |
2445 | return 0; |
2446 | } | |
2447 | ||
2448 | static int macb_alloc_rx_buffers(struct macb *bp) | |
2449 | { | |
ae1f2a56 | 2450 | struct macb_queue *queue = &bp->queues[0]; |
4df95131 NF |
2451 | int size; |
2452 | ||
b410d13e | 2453 | size = bp->rx_ring_size * bp->rx_buffer_size; |
ae1f2a56 RO |
2454 | queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, |
2455 | &queue->rx_buffers_dma, GFP_KERNEL); | |
2456 | if (!queue->rx_buffers) | |
4df95131 | 2457 | return -ENOMEM; |
64ec42fe MF |
2458 | |
2459 | netdev_dbg(bp->dev, | |
2460 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", | |
ae1f2a56 | 2461 | size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); |
4df95131 | 2462 | return 0; |
89e5785f HS |
2463 | } |
2464 | ||
2465 | static int macb_alloc_consistent(struct macb *bp) | |
2466 | { | |
02c958dd CP |
2467 | struct macb_queue *queue; |
2468 | unsigned int q; | |
89e5785f HS |
2469 | int size; |
2470 | ||
02c958dd | 2471 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
404cd086 | 2472 | size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; |
02c958dd CP |
2473 | queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
2474 | &queue->tx_ring_dma, | |
2475 | GFP_KERNEL); | |
2476 | if (!queue->tx_ring) | |
2477 | goto out_err; | |
2478 | netdev_dbg(bp->dev, | |
2479 | "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", | |
2480 | q, size, (unsigned long)queue->tx_ring_dma, | |
2481 | queue->tx_ring); | |
2482 | ||
b410d13e | 2483 | size = bp->tx_ring_size * sizeof(struct macb_tx_skb); |
02c958dd CP |
2484 | queue->tx_skb = kmalloc(size, GFP_KERNEL); |
2485 | if (!queue->tx_skb) | |
2486 | goto out_err; | |
89e5785f | 2487 | |
404cd086 | 2488 | size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; |
ae1f2a56 RO |
2489 | queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
2490 | &queue->rx_ring_dma, GFP_KERNEL); | |
2491 | if (!queue->rx_ring) | |
2492 | goto out_err; | |
2493 | netdev_dbg(bp->dev, | |
2494 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", | |
2495 | size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); | |
2496 | } | |
4df95131 | 2497 | if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) |
89e5785f | 2498 | goto out_err; |
89e5785f HS |
2499 | |
2500 | return 0; | |
2501 | ||
2502 | out_err: | |
2503 | macb_free_consistent(bp); | |
2504 | return -ENOMEM; | |
2505 | } | |
2506 | ||
4df95131 NF |
2507 | static void gem_init_rings(struct macb *bp) |
2508 | { | |
02c958dd | 2509 | struct macb_queue *queue; |
dc97a89e | 2510 | struct macb_dma_desc *desc = NULL; |
02c958dd | 2511 | unsigned int q; |
4df95131 NF |
2512 | int i; |
2513 | ||
02c958dd | 2514 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
b410d13e | 2515 | for (i = 0; i < bp->tx_ring_size; i++) { |
dc97a89e RO |
2516 | desc = macb_tx_desc(queue, i); |
2517 | macb_set_addr(bp, desc, 0); | |
2518 | desc->ctrl = MACB_BIT(TX_USED); | |
02c958dd | 2519 | } |
dc97a89e | 2520 | desc->ctrl |= MACB_BIT(TX_WRAP); |
02c958dd CP |
2521 | queue->tx_head = 0; |
2522 | queue->tx_tail = 0; | |
4df95131 | 2523 | |
ae1f2a56 RO |
2524 | queue->rx_tail = 0; |
2525 | queue->rx_prepared_head = 0; | |
2526 | ||
2527 | gem_rx_refill(queue); | |
2528 | } | |
4df95131 | 2529 | |
4df95131 NF |
2530 | } |
2531 | ||
89e5785f HS |
2532 | static void macb_init_rings(struct macb *bp) |
2533 | { | |
2534 | int i; | |
dc97a89e | 2535 | struct macb_dma_desc *desc = NULL; |
89e5785f | 2536 | |
ae1f2a56 | 2537 | macb_init_rx_ring(&bp->queues[0]); |
89e5785f | 2538 | |
b410d13e | 2539 | for (i = 0; i < bp->tx_ring_size; i++) { |
dc97a89e RO |
2540 | desc = macb_tx_desc(&bp->queues[0], i); |
2541 | macb_set_addr(bp, desc, 0); | |
2542 | desc->ctrl = MACB_BIT(TX_USED); | |
89e5785f | 2543 | } |
21d3515c BS |
2544 | bp->queues[0].tx_head = 0; |
2545 | bp->queues[0].tx_tail = 0; | |
dc97a89e | 2546 | desc->ctrl |= MACB_BIT(TX_WRAP); |
89e5785f HS |
2547 | } |
2548 | ||
2549 | static void macb_reset_hw(struct macb *bp) | |
2550 | { | |
02c958dd CP |
2551 | struct macb_queue *queue; |
2552 | unsigned int q; | |
0da70f80 | 2553 | u32 ctrl = macb_readl(bp, NCR); |
02c958dd | 2554 | |
64ec42fe | 2555 | /* Disable RX and TX (XXX: Should we halt the transmission |
89e5785f HS |
2556 | * more gracefully?) |
2557 | */ | |
0da70f80 | 2558 | ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); |
89e5785f HS |
2559 | |
2560 | /* Clear the stats registers (XXX: Update stats first?) */ | |
0da70f80 AH |
2561 | ctrl |= MACB_BIT(CLRSTAT); |
2562 | ||
2563 | macb_writel(bp, NCR, ctrl); | |
89e5785f HS |
2564 | |
2565 | /* Clear all status flags */ | |
95ebcea6 JE |
2566 | macb_writel(bp, TSR, -1); |
2567 | macb_writel(bp, RSR, -1); | |
89e5785f HS |
2568 | |
2569 | /* Disable all interrupts */ | |
02c958dd CP |
2570 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2571 | queue_writel(queue, IDR, -1); | |
2572 | queue_readl(queue, ISR); | |
24468374 NS |
2573 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
2574 | queue_writel(queue, ISR, -1); | |
02c958dd | 2575 | } |
89e5785f HS |
2576 | } |
2577 | ||
70c9f3d4 JI |
2578 | static u32 gem_mdc_clk_div(struct macb *bp) |
2579 | { | |
2580 | u32 config; | |
2581 | unsigned long pclk_hz = clk_get_rate(bp->pclk); | |
2582 | ||
2583 | if (pclk_hz <= 20000000) | |
2584 | config = GEM_BF(CLK, GEM_CLK_DIV8); | |
2585 | else if (pclk_hz <= 40000000) | |
2586 | config = GEM_BF(CLK, GEM_CLK_DIV16); | |
2587 | else if (pclk_hz <= 80000000) | |
2588 | config = GEM_BF(CLK, GEM_CLK_DIV32); | |
2589 | else if (pclk_hz <= 120000000) | |
2590 | config = GEM_BF(CLK, GEM_CLK_DIV48); | |
2591 | else if (pclk_hz <= 160000000) | |
2592 | config = GEM_BF(CLK, GEM_CLK_DIV64); | |
2593 | else | |
2594 | config = GEM_BF(CLK, GEM_CLK_DIV96); | |
2595 | ||
2596 | return config; | |
2597 | } | |
2598 | ||
2599 | static u32 macb_mdc_clk_div(struct macb *bp) | |
2600 | { | |
2601 | u32 config; | |
2602 | unsigned long pclk_hz; | |
2603 | ||
2604 | if (macb_is_gem(bp)) | |
2605 | return gem_mdc_clk_div(bp); | |
2606 | ||
2607 | pclk_hz = clk_get_rate(bp->pclk); | |
2608 | if (pclk_hz <= 20000000) | |
2609 | config = MACB_BF(CLK, MACB_CLK_DIV8); | |
2610 | else if (pclk_hz <= 40000000) | |
2611 | config = MACB_BF(CLK, MACB_CLK_DIV16); | |
2612 | else if (pclk_hz <= 80000000) | |
2613 | config = MACB_BF(CLK, MACB_CLK_DIV32); | |
2614 | else | |
2615 | config = MACB_BF(CLK, MACB_CLK_DIV64); | |
2616 | ||
2617 | return config; | |
2618 | } | |
2619 | ||
64ec42fe | 2620 | /* Get the DMA bus width field of the network configuration register that we |
757a03c6 JI |
2621 | * should program. We find the width from decoding the design configuration |
2622 | * register to find the maximum supported data bus width. | |
2623 | */ | |
2624 | static u32 macb_dbw(struct macb *bp) | |
2625 | { | |
2626 | if (!macb_is_gem(bp)) | |
2627 | return 0; | |
2628 | ||
2629 | switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { | |
2630 | case 4: | |
2631 | return GEM_BF(DBW, GEM_DBW128); | |
2632 | case 2: | |
2633 | return GEM_BF(DBW, GEM_DBW64); | |
2634 | case 1: | |
2635 | default: | |
2636 | return GEM_BF(DBW, GEM_DBW32); | |
2637 | } | |
2638 | } | |
2639 | ||
64ec42fe | 2640 | /* Configure the receive DMA engine |
b3e3bd71 | 2641 | * - use the correct receive buffer size |
e175587f | 2642 | * - set best burst length for DMA operations |
b3e3bd71 NF |
2643 | * (if not supported by FIFO, it will fallback to default) |
2644 | * - set both rx/tx packet buffers to full memory size | |
2645 | * These are configurable parameters for GEM. | |
0116da4f JI |
2646 | */ |
2647 | static void macb_configure_dma(struct macb *bp) | |
2648 | { | |
ae1f2a56 RO |
2649 | struct macb_queue *queue; |
2650 | u32 buffer_size; | |
2651 | unsigned int q; | |
0116da4f JI |
2652 | u32 dmacfg; |
2653 | ||
ae1f2a56 | 2654 | buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; |
0116da4f JI |
2655 | if (macb_is_gem(bp)) { |
2656 | dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); | |
ae1f2a56 RO |
2657 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2658 | if (q) | |
2659 | queue_writel(queue, RBQS, buffer_size); | |
2660 | else | |
2661 | dmacfg |= GEM_BF(RXBS, buffer_size); | |
2662 | } | |
e175587f NF |
2663 | if (bp->dma_burst_length) |
2664 | dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); | |
b3e3bd71 | 2665 | dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); |
a50dad35 | 2666 | dmacfg &= ~GEM_BIT(ENDIA_PKT); |
62f6924c | 2667 | |
f2ce8a9e | 2668 | if (bp->native_io) |
62f6924c AC |
2669 | dmacfg &= ~GEM_BIT(ENDIA_DESC); |
2670 | else | |
2671 | dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ | |
2672 | ||
85ff3d87 CP |
2673 | if (bp->dev->features & NETIF_F_HW_CSUM) |
2674 | dmacfg |= GEM_BIT(TXCOEN); | |
2675 | else | |
2676 | dmacfg &= ~GEM_BIT(TXCOEN); | |
fff8019a | 2677 | |
bd620720 | 2678 | dmacfg &= ~GEM_BIT(ADDR64); |
fff8019a | 2679 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
7b429614 | 2680 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
dc97a89e | 2681 | dmacfg |= GEM_BIT(ADDR64); |
7b429614 RO |
2682 | #endif |
2683 | #ifdef CONFIG_MACB_USE_HWSTAMP | |
2684 | if (bp->hw_dma_cap & HW_DMA_CAP_PTP) | |
2685 | dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); | |
fff8019a | 2686 | #endif |
e175587f NF |
2687 | netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", |
2688 | dmacfg); | |
0116da4f JI |
2689 | gem_writel(bp, DMACFG, dmacfg); |
2690 | } | |
2691 | } | |
2692 | ||
89e5785f HS |
2693 | static void macb_init_hw(struct macb *bp) |
2694 | { | |
2695 | u32 config; | |
2696 | ||
2697 | macb_reset_hw(bp); | |
314bccc4 | 2698 | macb_set_hwaddr(bp); |
89e5785f | 2699 | |
70c9f3d4 | 2700 | config = macb_mdc_clk_div(bp); |
29bc2e1e | 2701 | config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ |
89e5785f | 2702 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ |
a104a6b3 | 2703 | if (bp->caps & MACB_CAPS_JUMBO) |
98b5a0f4 HK |
2704 | config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ |
2705 | else | |
2706 | config |= MACB_BIT(BIG); /* Receive oversized frames */ | |
89e5785f HS |
2707 | if (bp->dev->flags & IFF_PROMISC) |
2708 | config |= MACB_BIT(CAF); /* Copy All Frames */ | |
924ec53c CP |
2709 | else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) |
2710 | config |= GEM_BIT(RXCOEN); | |
89e5785f HS |
2711 | if (!(bp->dev->flags & IFF_BROADCAST)) |
2712 | config |= MACB_BIT(NBC); /* No BroadCast */ | |
757a03c6 | 2713 | config |= macb_dbw(bp); |
89e5785f | 2714 | macb_writel(bp, NCFGR, config); |
a104a6b3 | 2715 | if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) |
98b5a0f4 | 2716 | gem_writel(bp, JML, bp->jumbo_max_len); |
98b5a0f4 | 2717 | bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; |
a104a6b3 | 2718 | if (bp->caps & MACB_CAPS_JUMBO) |
98b5a0f4 | 2719 | bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; |
89e5785f | 2720 | |
0116da4f | 2721 | macb_configure_dma(bp); |
89e5785f HS |
2722 | } |
2723 | ||
64ec42fe | 2724 | /* The hash address register is 64 bits long and takes up two |
446ebd01 PV |
2725 | * locations in the memory map. The least significant bits are stored |
2726 | * in EMAC_HSL and the most significant bits in EMAC_HSH. | |
2727 | * | |
2728 | * The unicast hash enable and the multicast hash enable bits in the | |
2729 | * network configuration register enable the reception of hash matched | |
2730 | * frames. The destination address is reduced to a 6 bit index into | |
2731 | * the 64 bit hash register using the following hash function. The | |
2732 | * hash function is an exclusive or of every sixth bit of the | |
2733 | * destination address. | |
2734 | * | |
2735 | * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] | |
2736 | * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] | |
2737 | * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] | |
2738 | * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] | |
2739 | * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] | |
2740 | * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] | |
2741 | * | |
2742 | * da[0] represents the least significant bit of the first byte | |
2743 | * received, that is, the multicast/unicast indicator, and da[47] | |
2744 | * represents the most significant bit of the last byte received. If | |
2745 | * the hash index, hi[n], points to a bit that is set in the hash | |
2746 | * register then the frame will be matched according to whether the | |
2747 | * frame is multicast or unicast. A multicast match will be signalled | |
2748 | * if the multicast hash enable bit is set, da[0] is 1 and the hash | |
2749 | * index points to a bit set in the hash register. A unicast match | |
2750 | * will be signalled if the unicast hash enable bit is set, da[0] is 0 | |
2751 | * and the hash index points to a bit set in the hash register. To | |
2752 | * receive all multicast frames, the hash register should be set with | |
2753 | * all ones and the multicast hash enable bit should be set in the | |
2754 | * network configuration register. | |
2755 | */ | |
2756 | ||
2757 | static inline int hash_bit_value(int bitnr, __u8 *addr) | |
2758 | { | |
2759 | if (addr[bitnr / 8] & (1 << (bitnr % 8))) | |
2760 | return 1; | |
2761 | return 0; | |
2762 | } | |
2763 | ||
64ec42fe | 2764 | /* Return the hash index value for the specified address. */ |
446ebd01 PV |
2765 | static int hash_get_index(__u8 *addr) |
2766 | { | |
2767 | int i, j, bitval; | |
2768 | int hash_index = 0; | |
2769 | ||
2770 | for (j = 0; j < 6; j++) { | |
2771 | for (i = 0, bitval = 0; i < 8; i++) | |
2fa45e22 | 2772 | bitval ^= hash_bit_value(i * 6 + j, addr); |
446ebd01 PV |
2773 | |
2774 | hash_index |= (bitval << j); | |
2775 | } | |
2776 | ||
2777 | return hash_index; | |
2778 | } | |
2779 | ||
64ec42fe | 2780 | /* Add multicast addresses to the internal multicast-hash table. */ |
446ebd01 PV |
2781 | static void macb_sethashtable(struct net_device *dev) |
2782 | { | |
22bedad3 | 2783 | struct netdev_hw_addr *ha; |
446ebd01 | 2784 | unsigned long mc_filter[2]; |
f9dcbcc9 | 2785 | unsigned int bitnr; |
446ebd01 PV |
2786 | struct macb *bp = netdev_priv(dev); |
2787 | ||
aa50b552 MF |
2788 | mc_filter[0] = 0; |
2789 | mc_filter[1] = 0; | |
446ebd01 | 2790 | |
22bedad3 JP |
2791 | netdev_for_each_mc_addr(ha, dev) { |
2792 | bitnr = hash_get_index(ha->addr); | |
446ebd01 PV |
2793 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); |
2794 | } | |
2795 | ||
f75ba50b JI |
2796 | macb_or_gem_writel(bp, HRB, mc_filter[0]); |
2797 | macb_or_gem_writel(bp, HRT, mc_filter[1]); | |
446ebd01 PV |
2798 | } |
2799 | ||
64ec42fe | 2800 | /* Enable/Disable promiscuous and multicast modes. */ |
421d9df0 | 2801 | static void macb_set_rx_mode(struct net_device *dev) |
446ebd01 PV |
2802 | { |
2803 | unsigned long cfg; | |
2804 | struct macb *bp = netdev_priv(dev); | |
2805 | ||
2806 | cfg = macb_readl(bp, NCFGR); | |
2807 | ||
924ec53c | 2808 | if (dev->flags & IFF_PROMISC) { |
446ebd01 PV |
2809 | /* Enable promiscuous mode */ |
2810 | cfg |= MACB_BIT(CAF); | |
924ec53c CP |
2811 | |
2812 | /* Disable RX checksum offload */ | |
2813 | if (macb_is_gem(bp)) | |
2814 | cfg &= ~GEM_BIT(RXCOEN); | |
2815 | } else { | |
2816 | /* Disable promiscuous mode */ | |
446ebd01 PV |
2817 | cfg &= ~MACB_BIT(CAF); |
2818 | ||
924ec53c CP |
2819 | /* Enable RX checksum offload only if requested */ |
2820 | if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) | |
2821 | cfg |= GEM_BIT(RXCOEN); | |
2822 | } | |
2823 | ||
446ebd01 PV |
2824 | if (dev->flags & IFF_ALLMULTI) { |
2825 | /* Enable all multicast mode */ | |
f75ba50b JI |
2826 | macb_or_gem_writel(bp, HRB, -1); |
2827 | macb_or_gem_writel(bp, HRT, -1); | |
446ebd01 | 2828 | cfg |= MACB_BIT(NCFGR_MTI); |
4cd24eaf | 2829 | } else if (!netdev_mc_empty(dev)) { |
446ebd01 PV |
2830 | /* Enable specific multicasts */ |
2831 | macb_sethashtable(dev); | |
2832 | cfg |= MACB_BIT(NCFGR_MTI); | |
2833 | } else if (dev->flags & (~IFF_ALLMULTI)) { | |
2834 | /* Disable all multicast mode */ | |
f75ba50b JI |
2835 | macb_or_gem_writel(bp, HRB, 0); |
2836 | macb_or_gem_writel(bp, HRT, 0); | |
446ebd01 PV |
2837 | cfg &= ~MACB_BIT(NCFGR_MTI); |
2838 | } | |
2839 | ||
2840 | macb_writel(bp, NCFGR, cfg); | |
2841 | } | |
2842 | ||
89e5785f HS |
2843 | static int macb_open(struct net_device *dev) |
2844 | { | |
4df95131 | 2845 | size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; |
7897b071 | 2846 | struct macb *bp = netdev_priv(dev); |
ae1f2a56 RO |
2847 | struct macb_queue *queue; |
2848 | unsigned int q; | |
89e5785f HS |
2849 | int err; |
2850 | ||
c220f8cd | 2851 | netdev_dbg(bp->dev, "open\n"); |
89e5785f | 2852 | |
b66bfc13 | 2853 | err = pm_runtime_resume_and_get(&bp->pdev->dev); |
d54f89af | 2854 | if (err < 0) |
b66bfc13 | 2855 | return err; |
d54f89af | 2856 | |
1b44791a | 2857 | /* RX buffers initialization */ |
4df95131 | 2858 | macb_init_rx_buffer_size(bp, bufsz); |
6c36a707 | 2859 | |
89e5785f HS |
2860 | err = macb_alloc_consistent(bp); |
2861 | if (err) { | |
c220f8cd JI |
2862 | netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", |
2863 | err); | |
d54f89af | 2864 | goto pm_exit; |
89e5785f HS |
2865 | } |
2866 | ||
138badbc RH |
2867 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2868 | napi_enable(&queue->napi_rx); | |
2869 | napi_enable(&queue->napi_tx); | |
2870 | } | |
ae1f2a56 | 2871 | |
05044531 | 2872 | macb_init_hw(bp); |
ae1f2a56 | 2873 | |
8b73fa3a | 2874 | err = phy_power_on(bp->sgmii_phy); |
7897b071 | 2875 | if (err) |
faa62087 | 2876 | goto reset_hw; |
89e5785f | 2877 | |
8b73fa3a RH |
2878 | err = macb_phylink_connect(bp); |
2879 | if (err) | |
2880 | goto phy_off; | |
2881 | ||
02c958dd | 2882 | netif_tx_start_all_queues(dev); |
89e5785f | 2883 | |
c2594d80 AP |
2884 | if (bp->ptp_info) |
2885 | bp->ptp_info->ptp_init(dev); | |
2886 | ||
939a5bf7 CK |
2887 | return 0; |
2888 | ||
8b73fa3a RH |
2889 | phy_off: |
2890 | phy_power_off(bp->sgmii_phy); | |
2891 | ||
faa62087 CB |
2892 | reset_hw: |
2893 | macb_reset_hw(bp); | |
138badbc RH |
2894 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2895 | napi_disable(&queue->napi_rx); | |
2896 | napi_disable(&queue->napi_tx); | |
2897 | } | |
faa62087 | 2898 | macb_free_consistent(bp); |
d54f89af | 2899 | pm_exit: |
939a5bf7 CK |
2900 | pm_runtime_put_sync(&bp->pdev->dev); |
2901 | return err; | |
89e5785f HS |
2902 | } |
2903 | ||
2904 | static int macb_close(struct net_device *dev) | |
2905 | { | |
2906 | struct macb *bp = netdev_priv(dev); | |
ae1f2a56 | 2907 | struct macb_queue *queue; |
89e5785f | 2908 | unsigned long flags; |
ae1f2a56 | 2909 | unsigned int q; |
89e5785f | 2910 | |
02c958dd | 2911 | netif_tx_stop_all_queues(dev); |
ae1f2a56 | 2912 | |
138badbc RH |
2913 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2914 | napi_disable(&queue->napi_rx); | |
2915 | napi_disable(&queue->napi_tx); | |
2916 | } | |
89e5785f | 2917 | |
7897b071 AT |
2918 | phylink_stop(bp->phylink); |
2919 | phylink_disconnect_phy(bp->phylink); | |
6c36a707 | 2920 | |
8b73fa3a RH |
2921 | phy_power_off(bp->sgmii_phy); |
2922 | ||
89e5785f HS |
2923 | spin_lock_irqsave(&bp->lock, flags); |
2924 | macb_reset_hw(bp); | |
2925 | netif_carrier_off(dev); | |
2926 | spin_unlock_irqrestore(&bp->lock, flags); | |
2927 | ||
2928 | macb_free_consistent(bp); | |
2929 | ||
c2594d80 AP |
2930 | if (bp->ptp_info) |
2931 | bp->ptp_info->ptp_remove(dev); | |
2932 | ||
d54f89af HK |
2933 | pm_runtime_put(&bp->pdev->dev); |
2934 | ||
89e5785f HS |
2935 | return 0; |
2936 | } | |
2937 | ||
a5898ea0 HK |
2938 | static int macb_change_mtu(struct net_device *dev, int new_mtu) |
2939 | { | |
a5898ea0 HK |
2940 | if (netif_running(dev)) |
2941 | return -EBUSY; | |
2942 | ||
a5898ea0 HK |
2943 | dev->mtu = new_mtu; |
2944 | ||
2945 | return 0; | |
2946 | } | |
2947 | ||
a494ed8e JI |
2948 | static void gem_update_stats(struct macb *bp) |
2949 | { | |
512286bb RO |
2950 | struct macb_queue *queue; |
2951 | unsigned int i, q, idx; | |
2952 | unsigned long *stat; | |
2953 | ||
a494ed8e | 2954 | u32 *p = &bp->hw_stats.gem.tx_octets_31_0; |
a494ed8e | 2955 | |
3ff13f1c XH |
2956 | for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { |
2957 | u32 offset = gem_statistics[i].offset; | |
7a6e0706 | 2958 | u64 val = bp->macb_reg_readl(bp, offset); |
3ff13f1c XH |
2959 | |
2960 | bp->ethtool_stats[i] += val; | |
2961 | *p += val; | |
2962 | ||
2963 | if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { | |
2964 | /* Add GEM_OCTTXH, GEM_OCTRXH */ | |
7a6e0706 | 2965 | val = bp->macb_reg_readl(bp, offset + 4); |
2fa45e22 | 2966 | bp->ethtool_stats[i] += ((u64)val) << 32; |
3ff13f1c XH |
2967 | *(++p) += val; |
2968 | } | |
2969 | } | |
512286bb RO |
2970 | |
2971 | idx = GEM_STATS_LEN; | |
2972 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) | |
2973 | for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) | |
2974 | bp->ethtool_stats[idx++] = *stat; | |
a494ed8e JI |
2975 | } |
2976 | ||
2977 | static struct net_device_stats *gem_get_stats(struct macb *bp) | |
2978 | { | |
2979 | struct gem_stats *hwstat = &bp->hw_stats.gem; | |
5f1d3a5c | 2980 | struct net_device_stats *nstat = &bp->dev->stats; |
a494ed8e | 2981 | |
5eff1461 ZL |
2982 | if (!netif_running(bp->dev)) |
2983 | return nstat; | |
2984 | ||
a494ed8e JI |
2985 | gem_update_stats(bp); |
2986 | ||
2987 | nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + | |
2988 | hwstat->rx_alignment_errors + | |
2989 | hwstat->rx_resource_errors + | |
2990 | hwstat->rx_overruns + | |
2991 | hwstat->rx_oversize_frames + | |
2992 | hwstat->rx_jabbers + | |
2993 | hwstat->rx_undersized_frames + | |
2994 | hwstat->rx_length_field_frame_errors); | |
2995 | nstat->tx_errors = (hwstat->tx_late_collisions + | |
2996 | hwstat->tx_excessive_collisions + | |
2997 | hwstat->tx_underrun + | |
2998 | hwstat->tx_carrier_sense_errors); | |
2999 | nstat->multicast = hwstat->rx_multicast_frames; | |
3000 | nstat->collisions = (hwstat->tx_single_collision_frames + | |
3001 | hwstat->tx_multiple_collision_frames + | |
3002 | hwstat->tx_excessive_collisions); | |
3003 | nstat->rx_length_errors = (hwstat->rx_oversize_frames + | |
3004 | hwstat->rx_jabbers + | |
3005 | hwstat->rx_undersized_frames + | |
3006 | hwstat->rx_length_field_frame_errors); | |
3007 | nstat->rx_over_errors = hwstat->rx_resource_errors; | |
3008 | nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; | |
3009 | nstat->rx_frame_errors = hwstat->rx_alignment_errors; | |
3010 | nstat->rx_fifo_errors = hwstat->rx_overruns; | |
3011 | nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; | |
3012 | nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; | |
3013 | nstat->tx_fifo_errors = hwstat->tx_underrun; | |
3014 | ||
3015 | return nstat; | |
3016 | } | |
3017 | ||
3ff13f1c XH |
3018 | static void gem_get_ethtool_stats(struct net_device *dev, |
3019 | struct ethtool_stats *stats, u64 *data) | |
3020 | { | |
3021 | struct macb *bp; | |
3022 | ||
3023 | bp = netdev_priv(dev); | |
3024 | gem_update_stats(bp); | |
512286bb RO |
3025 | memcpy(data, &bp->ethtool_stats, sizeof(u64) |
3026 | * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); | |
3ff13f1c XH |
3027 | } |
3028 | ||
3029 | static int gem_get_sset_count(struct net_device *dev, int sset) | |
3030 | { | |
512286bb RO |
3031 | struct macb *bp = netdev_priv(dev); |
3032 | ||
3ff13f1c XH |
3033 | switch (sset) { |
3034 | case ETH_SS_STATS: | |
512286bb | 3035 | return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; |
3ff13f1c XH |
3036 | default: |
3037 | return -EOPNOTSUPP; | |
3038 | } | |
3039 | } | |
3040 | ||
3041 | static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) | |
3042 | { | |
512286bb RO |
3043 | char stat_string[ETH_GSTRING_LEN]; |
3044 | struct macb *bp = netdev_priv(dev); | |
3045 | struct macb_queue *queue; | |
8bcbf82f | 3046 | unsigned int i; |
512286bb | 3047 | unsigned int q; |
3ff13f1c XH |
3048 | |
3049 | switch (sset) { | |
3050 | case ETH_SS_STATS: | |
3051 | for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) | |
3052 | memcpy(p, gem_statistics[i].stat_string, | |
3053 | ETH_GSTRING_LEN); | |
512286bb RO |
3054 | |
3055 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | |
3056 | for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { | |
3057 | snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", | |
3058 | q, queue_statistics[i].stat_string); | |
3059 | memcpy(p, stat_string, ETH_GSTRING_LEN); | |
3060 | } | |
3061 | } | |
3ff13f1c XH |
3062 | break; |
3063 | } | |
3064 | } | |
3065 | ||
421d9df0 | 3066 | static struct net_device_stats *macb_get_stats(struct net_device *dev) |
89e5785f HS |
3067 | { |
3068 | struct macb *bp = netdev_priv(dev); | |
5f1d3a5c | 3069 | struct net_device_stats *nstat = &bp->dev->stats; |
a494ed8e JI |
3070 | struct macb_stats *hwstat = &bp->hw_stats.macb; |
3071 | ||
3072 | if (macb_is_gem(bp)) | |
3073 | return gem_get_stats(bp); | |
89e5785f | 3074 | |
6c36a707 R |
3075 | /* read stats from hardware */ |
3076 | macb_update_stats(bp); | |
3077 | ||
89e5785f HS |
3078 | /* Convert HW stats into netdevice stats */ |
3079 | nstat->rx_errors = (hwstat->rx_fcs_errors + | |
3080 | hwstat->rx_align_errors + | |
3081 | hwstat->rx_resource_errors + | |
3082 | hwstat->rx_overruns + | |
3083 | hwstat->rx_oversize_pkts + | |
3084 | hwstat->rx_jabbers + | |
3085 | hwstat->rx_undersize_pkts + | |
89e5785f HS |
3086 | hwstat->rx_length_mismatch); |
3087 | nstat->tx_errors = (hwstat->tx_late_cols + | |
3088 | hwstat->tx_excessive_cols + | |
3089 | hwstat->tx_underruns + | |
716723c2 WS |
3090 | hwstat->tx_carrier_errors + |
3091 | hwstat->sqe_test_errors); | |
89e5785f HS |
3092 | nstat->collisions = (hwstat->tx_single_cols + |
3093 | hwstat->tx_multiple_cols + | |
3094 | hwstat->tx_excessive_cols); | |
3095 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | |
3096 | hwstat->rx_jabbers + | |
3097 | hwstat->rx_undersize_pkts + | |
3098 | hwstat->rx_length_mismatch); | |
b19f7f71 AS |
3099 | nstat->rx_over_errors = hwstat->rx_resource_errors + |
3100 | hwstat->rx_overruns; | |
89e5785f HS |
3101 | nstat->rx_crc_errors = hwstat->rx_fcs_errors; |
3102 | nstat->rx_frame_errors = hwstat->rx_align_errors; | |
3103 | nstat->rx_fifo_errors = hwstat->rx_overruns; | |
3104 | /* XXX: What does "missed" mean? */ | |
3105 | nstat->tx_aborted_errors = hwstat->tx_excessive_cols; | |
3106 | nstat->tx_carrier_errors = hwstat->tx_carrier_errors; | |
3107 | nstat->tx_fifo_errors = hwstat->tx_underruns; | |
3108 | /* Don't know about heartbeat or window errors... */ | |
3109 | ||
3110 | return nstat; | |
3111 | } | |
3112 | ||
d1d1b53d NF |
3113 | static int macb_get_regs_len(struct net_device *netdev) |
3114 | { | |
3115 | return MACB_GREGS_NBR * sizeof(u32); | |
3116 | } | |
3117 | ||
3118 | static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |
3119 | void *p) | |
3120 | { | |
3121 | struct macb *bp = netdev_priv(dev); | |
3122 | unsigned int tail, head; | |
3123 | u32 *regs_buff = p; | |
3124 | ||
3125 | regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) | |
3126 | | MACB_GREGS_VERSION; | |
3127 | ||
b410d13e ZB |
3128 | tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); |
3129 | head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); | |
d1d1b53d NF |
3130 | |
3131 | regs_buff[0] = macb_readl(bp, NCR); | |
3132 | regs_buff[1] = macb_or_gem_readl(bp, NCFGR); | |
3133 | regs_buff[2] = macb_readl(bp, NSR); | |
3134 | regs_buff[3] = macb_readl(bp, TSR); | |
3135 | regs_buff[4] = macb_readl(bp, RBQP); | |
3136 | regs_buff[5] = macb_readl(bp, TBQP); | |
3137 | regs_buff[6] = macb_readl(bp, RSR); | |
3138 | regs_buff[7] = macb_readl(bp, IMR); | |
3139 | ||
3140 | regs_buff[8] = tail; | |
3141 | regs_buff[9] = head; | |
02c958dd CP |
3142 | regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); |
3143 | regs_buff[11] = macb_tx_dma(&bp->queues[0], head); | |
d1d1b53d | 3144 | |
ce721a70 NA |
3145 | if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) |
3146 | regs_buff[12] = macb_or_gem_readl(bp, USRIO); | |
64ec42fe | 3147 | if (macb_is_gem(bp)) |
d1d1b53d | 3148 | regs_buff[13] = gem_readl(bp, DMACFG); |
d1d1b53d NF |
3149 | } |
3150 | ||
3e2a5e15 SP |
3151 | static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) |
3152 | { | |
3153 | struct macb *bp = netdev_priv(netdev); | |
3154 | ||
253fe094 | 3155 | if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { |
7897b071 | 3156 | phylink_ethtool_get_wol(bp->phylink, wol); |
253fe094 NF |
3157 | wol->supported |= WAKE_MAGIC; |
3158 | ||
3159 | if (bp->wol & MACB_WOL_ENABLED) | |
3160 | wol->wolopts |= WAKE_MAGIC; | |
3161 | } | |
3e2a5e15 SP |
3162 | } |
3163 | ||
3164 | static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |
3165 | { | |
3166 | struct macb *bp = netdev_priv(netdev); | |
7897b071 AT |
3167 | int ret; |
3168 | ||
253fe094 | 3169 | /* Pass the order to phylink layer */ |
7897b071 | 3170 | ret = phylink_ethtool_set_wol(bp->phylink, wol); |
253fe094 NF |
3171 | /* Don't manage WoL on MAC if handled by the PHY |
3172 | * or if there's a failure in talking to the PHY | |
3173 | */ | |
3174 | if (!ret || ret != -EOPNOTSUPP) | |
3175 | return ret; | |
3e2a5e15 SP |
3176 | |
3177 | if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || | |
3178 | (wol->wolopts & ~WAKE_MAGIC)) | |
3179 | return -EOPNOTSUPP; | |
3180 | ||
3181 | if (wol->wolopts & WAKE_MAGIC) | |
3182 | bp->wol |= MACB_WOL_ENABLED; | |
3183 | else | |
3184 | bp->wol &= ~MACB_WOL_ENABLED; | |
3185 | ||
3186 | device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); | |
3187 | ||
3188 | return 0; | |
3189 | } | |
3190 | ||
7897b071 AT |
3191 | static int macb_get_link_ksettings(struct net_device *netdev, |
3192 | struct ethtool_link_ksettings *kset) | |
3193 | { | |
3194 | struct macb *bp = netdev_priv(netdev); | |
3195 | ||
3196 | return phylink_ethtool_ksettings_get(bp->phylink, kset); | |
3197 | } | |
3198 | ||
3199 | static int macb_set_link_ksettings(struct net_device *netdev, | |
3200 | const struct ethtool_link_ksettings *kset) | |
3201 | { | |
3202 | struct macb *bp = netdev_priv(netdev); | |
3203 | ||
3204 | return phylink_ethtool_ksettings_set(bp->phylink, kset); | |
3205 | } | |
3206 | ||
8441bb33 | 3207 | static void macb_get_ringparam(struct net_device *netdev, |
74624944 HC |
3208 | struct ethtool_ringparam *ring, |
3209 | struct kernel_ethtool_ringparam *kernel_ring, | |
3210 | struct netlink_ext_ack *extack) | |
8441bb33 ZB |
3211 | { |
3212 | struct macb *bp = netdev_priv(netdev); | |
3213 | ||
3214 | ring->rx_max_pending = MAX_RX_RING_SIZE; | |
3215 | ring->tx_max_pending = MAX_TX_RING_SIZE; | |
3216 | ||
3217 | ring->rx_pending = bp->rx_ring_size; | |
3218 | ring->tx_pending = bp->tx_ring_size; | |
3219 | } | |
3220 | ||
3221 | static int macb_set_ringparam(struct net_device *netdev, | |
74624944 HC |
3222 | struct ethtool_ringparam *ring, |
3223 | struct kernel_ethtool_ringparam *kernel_ring, | |
3224 | struct netlink_ext_ack *extack) | |
8441bb33 ZB |
3225 | { |
3226 | struct macb *bp = netdev_priv(netdev); | |
3227 | u32 new_rx_size, new_tx_size; | |
3228 | unsigned int reset = 0; | |
3229 | ||
3230 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | |
3231 | return -EINVAL; | |
3232 | ||
3233 | new_rx_size = clamp_t(u32, ring->rx_pending, | |
3234 | MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); | |
3235 | new_rx_size = roundup_pow_of_two(new_rx_size); | |
3236 | ||
3237 | new_tx_size = clamp_t(u32, ring->tx_pending, | |
3238 | MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); | |
3239 | new_tx_size = roundup_pow_of_two(new_tx_size); | |
3240 | ||
3241 | if ((new_tx_size == bp->tx_ring_size) && | |
3242 | (new_rx_size == bp->rx_ring_size)) { | |
3243 | /* nothing to do */ | |
3244 | return 0; | |
3245 | } | |
3246 | ||
3247 | if (netif_running(bp->dev)) { | |
3248 | reset = 1; | |
3249 | macb_close(bp->dev); | |
3250 | } | |
3251 | ||
3252 | bp->rx_ring_size = new_rx_size; | |
3253 | bp->tx_ring_size = new_tx_size; | |
3254 | ||
3255 | if (reset) | |
3256 | macb_open(bp->dev); | |
3257 | ||
3258 | return 0; | |
3259 | } | |
3260 | ||
ab91f0a9 RO |
3261 | #ifdef CONFIG_MACB_USE_HWSTAMP |
3262 | static unsigned int gem_get_tsu_rate(struct macb *bp) | |
3263 | { | |
3264 | struct clk *tsu_clk; | |
3265 | unsigned int tsu_rate; | |
3266 | ||
3267 | tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); | |
3268 | if (!IS_ERR(tsu_clk)) | |
3269 | tsu_rate = clk_get_rate(tsu_clk); | |
3270 | /* try pclk instead */ | |
3271 | else if (!IS_ERR(bp->pclk)) { | |
3272 | tsu_clk = bp->pclk; | |
3273 | tsu_rate = clk_get_rate(tsu_clk); | |
3274 | } else | |
3275 | return -ENOTSUPP; | |
3276 | return tsu_rate; | |
3277 | } | |
3278 | ||
3279 | static s32 gem_get_ptp_max_adj(void) | |
3280 | { | |
3281 | return 64000000; | |
3282 | } | |
3283 | ||
3284 | static int gem_get_ts_info(struct net_device *dev, | |
3285 | struct ethtool_ts_info *info) | |
3286 | { | |
3287 | struct macb *bp = netdev_priv(dev); | |
3288 | ||
3289 | if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { | |
3290 | ethtool_op_get_ts_info(dev, info); | |
3291 | return 0; | |
3292 | } | |
3293 | ||
3294 | info->so_timestamping = | |
3295 | SOF_TIMESTAMPING_TX_SOFTWARE | | |
3296 | SOF_TIMESTAMPING_RX_SOFTWARE | | |
3297 | SOF_TIMESTAMPING_SOFTWARE | | |
3298 | SOF_TIMESTAMPING_TX_HARDWARE | | |
3299 | SOF_TIMESTAMPING_RX_HARDWARE | | |
3300 | SOF_TIMESTAMPING_RAW_HARDWARE; | |
3301 | info->tx_types = | |
3302 | (1 << HWTSTAMP_TX_ONESTEP_SYNC) | | |
3303 | (1 << HWTSTAMP_TX_OFF) | | |
3304 | (1 << HWTSTAMP_TX_ON); | |
3305 | info->rx_filters = | |
3306 | (1 << HWTSTAMP_FILTER_NONE) | | |
3307 | (1 << HWTSTAMP_FILTER_ALL); | |
3308 | ||
3309 | info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; | |
3310 | ||
3311 | return 0; | |
3312 | } | |
3313 | ||
3314 | static struct macb_ptp_info gem_ptp_info = { | |
3315 | .ptp_init = gem_ptp_init, | |
3316 | .ptp_remove = gem_ptp_remove, | |
3317 | .get_ptp_max_adj = gem_get_ptp_max_adj, | |
3318 | .get_tsu_rate = gem_get_tsu_rate, | |
3319 | .get_ts_info = gem_get_ts_info, | |
3320 | .get_hwtst = gem_get_hwtst, | |
3321 | .set_hwtst = gem_set_hwtst, | |
3322 | }; | |
3323 | #endif | |
3324 | ||
c2594d80 AP |
3325 | static int macb_get_ts_info(struct net_device *netdev, |
3326 | struct ethtool_ts_info *info) | |
3327 | { | |
3328 | struct macb *bp = netdev_priv(netdev); | |
3329 | ||
3330 | if (bp->ptp_info) | |
3331 | return bp->ptp_info->get_ts_info(netdev, info); | |
3332 | ||
3333 | return ethtool_op_get_ts_info(netdev, info); | |
3334 | } | |
3335 | ||
ae8223de RO |
3336 | static void gem_enable_flow_filters(struct macb *bp, bool enable) |
3337 | { | |
c1e85c6c | 3338 | struct net_device *netdev = bp->dev; |
ae8223de RO |
3339 | struct ethtool_rx_fs_item *item; |
3340 | u32 t2_scr; | |
3341 | int num_t2_scr; | |
3342 | ||
c1e85c6c CB |
3343 | if (!(netdev->features & NETIF_F_NTUPLE)) |
3344 | return; | |
3345 | ||
ae8223de RO |
3346 | num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); |
3347 | ||
3348 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { | |
3349 | struct ethtool_rx_flow_spec *fs = &item->fs; | |
3350 | struct ethtool_tcpip4_spec *tp4sp_m; | |
3351 | ||
3352 | if (fs->location >= num_t2_scr) | |
3353 | continue; | |
3354 | ||
3355 | t2_scr = gem_readl_n(bp, SCRT2, fs->location); | |
3356 | ||
3357 | /* enable/disable screener regs for the flow entry */ | |
3358 | t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr); | |
3359 | ||
3360 | /* only enable fields with no masking */ | |
3361 | tp4sp_m = &(fs->m_u.tcp_ip4_spec); | |
3362 | ||
3363 | if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) | |
3364 | t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); | |
3365 | else | |
3366 | t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr); | |
3367 | ||
3368 | if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) | |
3369 | t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); | |
3370 | else | |
3371 | t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr); | |
3372 | ||
3373 | if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) | |
3374 | t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); | |
3375 | else | |
3376 | t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr); | |
3377 | ||
3378 | gem_writel_n(bp, SCRT2, fs->location, t2_scr); | |
3379 | } | |
3380 | } | |
3381 | ||
3382 | static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) | |
3383 | { | |
3384 | struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; | |
3385 | uint16_t index = fs->location; | |
3386 | u32 w0, w1, t2_scr; | |
3387 | bool cmp_a = false; | |
3388 | bool cmp_b = false; | |
3389 | bool cmp_c = false; | |
3390 | ||
a14d273b CB |
3391 | if (!macb_is_gem(bp)) |
3392 | return; | |
3393 | ||
ae8223de RO |
3394 | tp4sp_v = &(fs->h_u.tcp_ip4_spec); |
3395 | tp4sp_m = &(fs->m_u.tcp_ip4_spec); | |
3396 | ||
3397 | /* ignore field if any masking set */ | |
3398 | if (tp4sp_m->ip4src == 0xFFFFFFFF) { | |
3399 | /* 1st compare reg - IP source address */ | |
3400 | w0 = 0; | |
3401 | w1 = 0; | |
3402 | w0 = tp4sp_v->ip4src; | |
3403 | w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ | |
3404 | w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); | |
3405 | w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1); | |
3406 | gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); | |
3407 | gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); | |
3408 | cmp_a = true; | |
3409 | } | |
3410 | ||
3411 | /* ignore field if any masking set */ | |
3412 | if (tp4sp_m->ip4dst == 0xFFFFFFFF) { | |
3413 | /* 2nd compare reg - IP destination address */ | |
3414 | w0 = 0; | |
3415 | w1 = 0; | |
3416 | w0 = tp4sp_v->ip4dst; | |
3417 | w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ | |
3418 | w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); | |
3419 | w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1); | |
3420 | gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); | |
3421 | gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); | |
3422 | cmp_b = true; | |
3423 | } | |
3424 | ||
3425 | /* ignore both port fields if masking set in both */ | |
3426 | if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { | |
3427 | /* 3rd compare reg - source port, destination port */ | |
3428 | w0 = 0; | |
3429 | w1 = 0; | |
3430 | w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1); | |
3431 | if (tp4sp_m->psrc == tp4sp_m->pdst) { | |
3432 | w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); | |
3433 | w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); | |
3434 | w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ | |
3435 | w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); | |
3436 | } else { | |
3437 | /* only one port definition */ | |
3438 | w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ | |
3439 | w0 = GEM_BFINS(T2MASK, 0xFFFF, w0); | |
3440 | if (tp4sp_m->psrc == 0xFFFF) { /* src port */ | |
3441 | w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); | |
3442 | w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); | |
3443 | } else { /* dst port */ | |
3444 | w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); | |
3445 | w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1); | |
3446 | } | |
3447 | } | |
3448 | gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); | |
3449 | gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); | |
3450 | cmp_c = true; | |
3451 | } | |
3452 | ||
3453 | t2_scr = 0; | |
3454 | t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); | |
3455 | t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr); | |
3456 | if (cmp_a) | |
3457 | t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr); | |
3458 | if (cmp_b) | |
3459 | t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr); | |
3460 | if (cmp_c) | |
3461 | t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr); | |
3462 | gem_writel_n(bp, SCRT2, index, t2_scr); | |
3463 | } | |
3464 | ||
3465 | static int gem_add_flow_filter(struct net_device *netdev, | |
3466 | struct ethtool_rxnfc *cmd) | |
3467 | { | |
3468 | struct macb *bp = netdev_priv(netdev); | |
3469 | struct ethtool_rx_flow_spec *fs = &cmd->fs; | |
3470 | struct ethtool_rx_fs_item *item, *newfs; | |
7038cdb7 | 3471 | unsigned long flags; |
ae8223de RO |
3472 | int ret = -EINVAL; |
3473 | bool added = false; | |
3474 | ||
cc1674ee | 3475 | newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); |
ae8223de RO |
3476 | if (newfs == NULL) |
3477 | return -ENOMEM; | |
3478 | memcpy(&newfs->fs, fs, sizeof(newfs->fs)); | |
3479 | ||
3480 | netdev_dbg(netdev, | |
3481 | "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", | |
3482 | fs->flow_type, (int)fs->ring_cookie, fs->location, | |
3483 | htonl(fs->h_u.tcp_ip4_spec.ip4src), | |
3484 | htonl(fs->h_u.tcp_ip4_spec.ip4dst), | |
3485 | htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); | |
3486 | ||
7038cdb7 JC |
3487 | spin_lock_irqsave(&bp->rx_fs_lock, flags); |
3488 | ||
ae8223de | 3489 | /* find correct place to add in list */ |
a3da8adc JC |
3490 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { |
3491 | if (item->fs.location > newfs->fs.location) { | |
3492 | list_add_tail(&newfs->list, &item->list); | |
3493 | added = true; | |
3494 | break; | |
3495 | } else if (item->fs.location == fs->location) { | |
3496 | netdev_err(netdev, "Rule not added: location %d not free!\n", | |
3497 | fs->location); | |
3498 | ret = -EBUSY; | |
3499 | goto err; | |
ae8223de | 3500 | } |
ae8223de | 3501 | } |
a3da8adc JC |
3502 | if (!added) |
3503 | list_add_tail(&newfs->list, &bp->rx_fs_list.list); | |
ae8223de RO |
3504 | |
3505 | gem_prog_cmp_regs(bp, fs); | |
3506 | bp->rx_fs_list.count++; | |
3507 | /* enable filtering if NTUPLE on */ | |
c1e85c6c | 3508 | gem_enable_flow_filters(bp, 1); |
ae8223de | 3509 | |
7038cdb7 | 3510 | spin_unlock_irqrestore(&bp->rx_fs_lock, flags); |
ae8223de RO |
3511 | return 0; |
3512 | ||
3513 | err: | |
7038cdb7 | 3514 | spin_unlock_irqrestore(&bp->rx_fs_lock, flags); |
ae8223de RO |
3515 | kfree(newfs); |
3516 | return ret; | |
3517 | } | |
3518 | ||
3519 | static int gem_del_flow_filter(struct net_device *netdev, | |
3520 | struct ethtool_rxnfc *cmd) | |
3521 | { | |
3522 | struct macb *bp = netdev_priv(netdev); | |
3523 | struct ethtool_rx_fs_item *item; | |
3524 | struct ethtool_rx_flow_spec *fs; | |
7038cdb7 JC |
3525 | unsigned long flags; |
3526 | ||
3527 | spin_lock_irqsave(&bp->rx_fs_lock, flags); | |
ae8223de | 3528 | |
ae8223de RO |
3529 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { |
3530 | if (item->fs.location == cmd->fs.location) { | |
3531 | /* disable screener regs for the flow entry */ | |
3532 | fs = &(item->fs); | |
3533 | netdev_dbg(netdev, | |
3534 | "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", | |
3535 | fs->flow_type, (int)fs->ring_cookie, fs->location, | |
3536 | htonl(fs->h_u.tcp_ip4_spec.ip4src), | |
3537 | htonl(fs->h_u.tcp_ip4_spec.ip4dst), | |
3538 | htons(fs->h_u.tcp_ip4_spec.psrc), | |
3539 | htons(fs->h_u.tcp_ip4_spec.pdst)); | |
3540 | ||
3541 | gem_writel_n(bp, SCRT2, fs->location, 0); | |
3542 | ||
3543 | list_del(&item->list); | |
ae8223de | 3544 | bp->rx_fs_list.count--; |
7038cdb7 JC |
3545 | spin_unlock_irqrestore(&bp->rx_fs_lock, flags); |
3546 | kfree(item); | |
ae8223de RO |
3547 | return 0; |
3548 | } | |
3549 | } | |
3550 | ||
7038cdb7 | 3551 | spin_unlock_irqrestore(&bp->rx_fs_lock, flags); |
ae8223de RO |
3552 | return -EINVAL; |
3553 | } | |
3554 | ||
3555 | static int gem_get_flow_entry(struct net_device *netdev, | |
3556 | struct ethtool_rxnfc *cmd) | |
3557 | { | |
3558 | struct macb *bp = netdev_priv(netdev); | |
3559 | struct ethtool_rx_fs_item *item; | |
3560 | ||
3561 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { | |
3562 | if (item->fs.location == cmd->fs.location) { | |
3563 | memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); | |
3564 | return 0; | |
3565 | } | |
3566 | } | |
3567 | return -EINVAL; | |
3568 | } | |
3569 | ||
3570 | static int gem_get_all_flow_entries(struct net_device *netdev, | |
3571 | struct ethtool_rxnfc *cmd, u32 *rule_locs) | |
3572 | { | |
3573 | struct macb *bp = netdev_priv(netdev); | |
3574 | struct ethtool_rx_fs_item *item; | |
3575 | uint32_t cnt = 0; | |
3576 | ||
3577 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { | |
3578 | if (cnt == cmd->rule_cnt) | |
3579 | return -EMSGSIZE; | |
3580 | rule_locs[cnt] = item->fs.location; | |
3581 | cnt++; | |
3582 | } | |
3583 | cmd->data = bp->max_tuples; | |
3584 | cmd->rule_cnt = cnt; | |
3585 | ||
3586 | return 0; | |
3587 | } | |
3588 | ||
3589 | static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, | |
3590 | u32 *rule_locs) | |
3591 | { | |
3592 | struct macb *bp = netdev_priv(netdev); | |
3593 | int ret = 0; | |
3594 | ||
3595 | switch (cmd->cmd) { | |
3596 | case ETHTOOL_GRXRINGS: | |
3597 | cmd->data = bp->num_queues; | |
3598 | break; | |
3599 | case ETHTOOL_GRXCLSRLCNT: | |
3600 | cmd->rule_cnt = bp->rx_fs_list.count; | |
3601 | break; | |
3602 | case ETHTOOL_GRXCLSRULE: | |
3603 | ret = gem_get_flow_entry(netdev, cmd); | |
3604 | break; | |
3605 | case ETHTOOL_GRXCLSRLALL: | |
3606 | ret = gem_get_all_flow_entries(netdev, cmd, rule_locs); | |
3607 | break; | |
3608 | default: | |
3609 | netdev_err(netdev, | |
3610 | "Command parameter %d is not supported\n", cmd->cmd); | |
3611 | ret = -EOPNOTSUPP; | |
3612 | } | |
3613 | ||
3614 | return ret; | |
3615 | } | |
3616 | ||
3617 | static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) | |
3618 | { | |
3619 | struct macb *bp = netdev_priv(netdev); | |
ae8223de RO |
3620 | int ret; |
3621 | ||
ae8223de RO |
3622 | switch (cmd->cmd) { |
3623 | case ETHTOOL_SRXCLSRLINS: | |
3624 | if ((cmd->fs.location >= bp->max_tuples) | |
3625 | || (cmd->fs.ring_cookie >= bp->num_queues)) { | |
3626 | ret = -EINVAL; | |
3627 | break; | |
3628 | } | |
3629 | ret = gem_add_flow_filter(netdev, cmd); | |
3630 | break; | |
3631 | case ETHTOOL_SRXCLSRLDEL: | |
3632 | ret = gem_del_flow_filter(netdev, cmd); | |
3633 | break; | |
3634 | default: | |
3635 | netdev_err(netdev, | |
3636 | "Command parameter %d is not supported\n", cmd->cmd); | |
3637 | ret = -EOPNOTSUPP; | |
3638 | } | |
3639 | ||
ae8223de RO |
3640 | return ret; |
3641 | } | |
3642 | ||
421d9df0 | 3643 | static const struct ethtool_ops macb_ethtool_ops = { |
d1d1b53d NF |
3644 | .get_regs_len = macb_get_regs_len, |
3645 | .get_regs = macb_get_regs, | |
89e5785f | 3646 | .get_link = ethtool_op_get_link, |
17f393e8 | 3647 | .get_ts_info = ethtool_op_get_ts_info, |
3e2a5e15 SP |
3648 | .get_wol = macb_get_wol, |
3649 | .set_wol = macb_set_wol, | |
7897b071 AT |
3650 | .get_link_ksettings = macb_get_link_ksettings, |
3651 | .set_link_ksettings = macb_set_link_ksettings, | |
8441bb33 ZB |
3652 | .get_ringparam = macb_get_ringparam, |
3653 | .set_ringparam = macb_set_ringparam, | |
8cd5a56c | 3654 | }; |
8cd5a56c | 3655 | |
8093b1c3 | 3656 | static const struct ethtool_ops gem_ethtool_ops = { |
8cd5a56c XH |
3657 | .get_regs_len = macb_get_regs_len, |
3658 | .get_regs = macb_get_regs, | |
558e35cc NF |
3659 | .get_wol = macb_get_wol, |
3660 | .set_wol = macb_set_wol, | |
8cd5a56c | 3661 | .get_link = ethtool_op_get_link, |
c2594d80 | 3662 | .get_ts_info = macb_get_ts_info, |
3ff13f1c XH |
3663 | .get_ethtool_stats = gem_get_ethtool_stats, |
3664 | .get_strings = gem_get_ethtool_strings, | |
3665 | .get_sset_count = gem_get_sset_count, | |
7897b071 AT |
3666 | .get_link_ksettings = macb_get_link_ksettings, |
3667 | .set_link_ksettings = macb_set_link_ksettings, | |
8441bb33 ZB |
3668 | .get_ringparam = macb_get_ringparam, |
3669 | .set_ringparam = macb_set_ringparam, | |
ae8223de RO |
3670 | .get_rxnfc = gem_get_rxnfc, |
3671 | .set_rxnfc = gem_set_rxnfc, | |
89e5785f HS |
3672 | }; |
3673 | ||
421d9df0 | 3674 | static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
89e5785f | 3675 | { |
c2594d80 | 3676 | struct macb *bp = netdev_priv(dev); |
89e5785f HS |
3677 | |
3678 | if (!netif_running(dev)) | |
3679 | return -EINVAL; | |
3680 | ||
7897b071 AT |
3681 | if (bp->ptp_info) { |
3682 | switch (cmd) { | |
3683 | case SIOCSHWTSTAMP: | |
3684 | return bp->ptp_info->set_hwtst(dev, rq, cmd); | |
3685 | case SIOCGHWTSTAMP: | |
3686 | return bp->ptp_info->get_hwtst(dev, rq); | |
3687 | } | |
c2594d80 | 3688 | } |
7897b071 AT |
3689 | |
3690 | return phylink_mii_ioctl(bp->phylink, rq, cmd); | |
89e5785f HS |
3691 | } |
3692 | ||
c1e85c6c CB |
3693 | static inline void macb_set_txcsum_feature(struct macb *bp, |
3694 | netdev_features_t features) | |
3695 | { | |
3696 | u32 val; | |
3697 | ||
3698 | if (!macb_is_gem(bp)) | |
3699 | return; | |
3700 | ||
3701 | val = gem_readl(bp, DMACFG); | |
3702 | if (features & NETIF_F_HW_CSUM) | |
3703 | val |= GEM_BIT(TXCOEN); | |
3704 | else | |
3705 | val &= ~GEM_BIT(TXCOEN); | |
3706 | ||
3707 | gem_writel(bp, DMACFG, val); | |
3708 | } | |
3709 | ||
3710 | static inline void macb_set_rxcsum_feature(struct macb *bp, | |
3711 | netdev_features_t features) | |
3712 | { | |
3713 | struct net_device *netdev = bp->dev; | |
3714 | u32 val; | |
3715 | ||
3716 | if (!macb_is_gem(bp)) | |
3717 | return; | |
3718 | ||
3719 | val = gem_readl(bp, NCFGR); | |
3720 | if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC)) | |
3721 | val |= GEM_BIT(RXCOEN); | |
3722 | else | |
3723 | val &= ~GEM_BIT(RXCOEN); | |
3724 | ||
3725 | gem_writel(bp, NCFGR, val); | |
3726 | } | |
3727 | ||
3728 | static inline void macb_set_rxflow_feature(struct macb *bp, | |
3729 | netdev_features_t features) | |
3730 | { | |
3731 | if (!macb_is_gem(bp)) | |
3732 | return; | |
3733 | ||
3734 | gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE)); | |
3735 | } | |
3736 | ||
85ff3d87 CP |
3737 | static int macb_set_features(struct net_device *netdev, |
3738 | netdev_features_t features) | |
3739 | { | |
3740 | struct macb *bp = netdev_priv(netdev); | |
3741 | netdev_features_t changed = features ^ netdev->features; | |
3742 | ||
3743 | /* TX checksum offload */ | |
c1e85c6c CB |
3744 | if (changed & NETIF_F_HW_CSUM) |
3745 | macb_set_txcsum_feature(bp, features); | |
85ff3d87 | 3746 | |
924ec53c | 3747 | /* RX checksum offload */ |
c1e85c6c CB |
3748 | if (changed & NETIF_F_RXCSUM) |
3749 | macb_set_rxcsum_feature(bp, features); | |
924ec53c | 3750 | |
ae8223de | 3751 | /* RX Flow Filters */ |
c1e85c6c CB |
3752 | if (changed & NETIF_F_NTUPLE) |
3753 | macb_set_rxflow_feature(bp, features); | |
ae8223de | 3754 | |
85ff3d87 CP |
3755 | return 0; |
3756 | } | |
3757 | ||
c1e85c6c CB |
3758 | static void macb_restore_features(struct macb *bp) |
3759 | { | |
3760 | struct net_device *netdev = bp->dev; | |
3761 | netdev_features_t features = netdev->features; | |
a14d273b | 3762 | struct ethtool_rx_fs_item *item; |
c1e85c6c CB |
3763 | |
3764 | /* TX checksum offload */ | |
3765 | macb_set_txcsum_feature(bp, features); | |
3766 | ||
3767 | /* RX checksum offload */ | |
3768 | macb_set_rxcsum_feature(bp, features); | |
3769 | ||
3770 | /* RX Flow Filters */ | |
a14d273b CB |
3771 | list_for_each_entry(item, &bp->rx_fs_list.list, list) |
3772 | gem_prog_cmp_regs(bp, &item->fs); | |
3773 | ||
c1e85c6c CB |
3774 | macb_set_rxflow_feature(bp, features); |
3775 | } | |
3776 | ||
5f1fa992 AB |
3777 | static const struct net_device_ops macb_netdev_ops = { |
3778 | .ndo_open = macb_open, | |
3779 | .ndo_stop = macb_close, | |
3780 | .ndo_start_xmit = macb_start_xmit, | |
afc4b13d | 3781 | .ndo_set_rx_mode = macb_set_rx_mode, |
5f1fa992 | 3782 | .ndo_get_stats = macb_get_stats, |
a7605370 | 3783 | .ndo_eth_ioctl = macb_ioctl, |
5f1fa992 | 3784 | .ndo_validate_addr = eth_validate_addr, |
a5898ea0 | 3785 | .ndo_change_mtu = macb_change_mtu, |
5f1fa992 | 3786 | .ndo_set_mac_address = eth_mac_addr, |
6e8cf5c0 TP |
3787 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3788 | .ndo_poll_controller = macb_poll_controller, | |
3789 | #endif | |
85ff3d87 | 3790 | .ndo_set_features = macb_set_features, |
1629dd4f | 3791 | .ndo_features_check = macb_features_check, |
5f1fa992 AB |
3792 | }; |
3793 | ||
64ec42fe | 3794 | /* Configure peripheral capabilities according to device tree |
e175587f NF |
3795 | * and integration options used |
3796 | */ | |
64ec42fe MF |
3797 | static void macb_configure_caps(struct macb *bp, |
3798 | const struct macb_config *dt_conf) | |
e175587f NF |
3799 | { |
3800 | u32 dcfg; | |
e175587f | 3801 | |
f6970505 NF |
3802 | if (dt_conf) |
3803 | bp->caps = dt_conf->caps; | |
3804 | ||
f2ce8a9e | 3805 | if (hw_is_gem(bp->regs, bp->native_io)) { |
e175587f NF |
3806 | bp->caps |= MACB_CAPS_MACB_IS_GEM; |
3807 | ||
e175587f NF |
3808 | dcfg = gem_readl(bp, DCFG1); |
3809 | if (GEM_BFEXT(IRQCOR, dcfg) == 0) | |
3810 | bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; | |
e4e143e2 PT |
3811 | if (GEM_BFEXT(NO_PCS, dcfg) == 0) |
3812 | bp->caps |= MACB_CAPS_PCS; | |
3813 | dcfg = gem_readl(bp, DCFG12); | |
3814 | if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1) | |
3815 | bp->caps |= MACB_CAPS_HIGH_SPEED; | |
e175587f NF |
3816 | dcfg = gem_readl(bp, DCFG2); |
3817 | if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) | |
3818 | bp->caps |= MACB_CAPS_FIFO_MODE; | |
ab91f0a9 RO |
3819 | #ifdef CONFIG_MACB_USE_HWSTAMP |
3820 | if (gem_has_ptp(bp)) { | |
7b429614 | 3821 | if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) |
7897b071 AT |
3822 | dev_err(&bp->pdev->dev, |
3823 | "GEM doesn't support hardware ptp.\n"); | |
ab91f0a9 | 3824 | else { |
7b429614 | 3825 | bp->hw_dma_cap |= HW_DMA_CAP_PTP; |
ab91f0a9 RO |
3826 | bp->ptp_info = &gem_ptp_info; |
3827 | } | |
7b429614 | 3828 | } |
ab91f0a9 | 3829 | #endif |
e175587f NF |
3830 | } |
3831 | ||
a35919e1 | 3832 | dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); |
e175587f NF |
3833 | } |
3834 | ||
02c958dd | 3835 | static void macb_probe_queues(void __iomem *mem, |
f2ce8a9e | 3836 | bool native_io, |
02c958dd CP |
3837 | unsigned int *queue_mask, |
3838 | unsigned int *num_queues) | |
3839 | { | |
02c958dd CP |
3840 | *queue_mask = 0x1; |
3841 | *num_queues = 1; | |
3842 | ||
da120112 NF |
3843 | /* is it macb or gem ? |
3844 | * | |
3845 | * We need to read directly from the hardware here because | |
3846 | * we are early in the probe process and don't have the | |
3847 | * MACB_CAPS_MACB_IS_GEM flag positioned | |
3848 | */ | |
f2ce8a9e | 3849 | if (!hw_is_gem(mem, native_io)) |
02c958dd CP |
3850 | return; |
3851 | ||
3852 | /* bit 0 is never set but queue 0 always exists */ | |
fec371f6 | 3853 | *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff; |
b7ab39b3 | 3854 | *num_queues = hweight32(*queue_mask); |
02c958dd CP |
3855 | } |
3856 | ||
38493da4 CB |
3857 | static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk, |
3858 | struct clk *rx_clk, struct clk *tsu_clk) | |
3859 | { | |
3860 | struct clk_bulk_data clks[] = { | |
3861 | { .clk = tsu_clk, }, | |
3862 | { .clk = rx_clk, }, | |
3863 | { .clk = pclk, }, | |
3864 | { .clk = hclk, }, | |
3865 | { .clk = tx_clk }, | |
3866 | }; | |
3867 | ||
3868 | clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks); | |
3869 | } | |
3870 | ||
c69618b3 | 3871 | static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, |
aead88bd | 3872 | struct clk **hclk, struct clk **tx_clk, |
f5473d1d | 3873 | struct clk **rx_clk, struct clk **tsu_clk) |
89e5785f | 3874 | { |
83a77e9e | 3875 | struct macb_platform_data *pdata; |
421d9df0 | 3876 | int err; |
89e5785f | 3877 | |
83a77e9e BF |
3878 | pdata = dev_get_platdata(&pdev->dev); |
3879 | if (pdata) { | |
3880 | *pclk = pdata->pclk; | |
3881 | *hclk = pdata->hclk; | |
3882 | } else { | |
3883 | *pclk = devm_clk_get(&pdev->dev, "pclk"); | |
3884 | *hclk = devm_clk_get(&pdev->dev, "hclk"); | |
3885 | } | |
3886 | ||
a04be4b6 MT |
3887 | if (IS_ERR_OR_NULL(*pclk)) |
3888 | return dev_err_probe(&pdev->dev, | |
3889 | IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV, | |
3890 | "failed to get pclk\n"); | |
3891 | ||
3892 | if (IS_ERR_OR_NULL(*hclk)) | |
3893 | return dev_err_probe(&pdev->dev, | |
3894 | IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV, | |
3895 | "failed to get hclk\n"); | |
b48e0bab | 3896 | |
bd310aca | 3897 | *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk"); |
c69618b3 | 3898 | if (IS_ERR(*tx_clk)) |
bd310aca | 3899 | return PTR_ERR(*tx_clk); |
e1824dfe | 3900 | |
bd310aca | 3901 | *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk"); |
aead88bd | 3902 | if (IS_ERR(*rx_clk)) |
bd310aca | 3903 | return PTR_ERR(*rx_clk); |
aead88bd | 3904 | |
bd310aca | 3905 | *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk"); |
f5473d1d | 3906 | if (IS_ERR(*tsu_clk)) |
bd310aca | 3907 | return PTR_ERR(*tsu_clk); |
f5473d1d | 3908 | |
c69618b3 | 3909 | err = clk_prepare_enable(*pclk); |
b48e0bab | 3910 | if (err) { |
f413cbb3 | 3911 | dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); |
421d9df0 | 3912 | return err; |
b48e0bab SB |
3913 | } |
3914 | ||
c69618b3 | 3915 | err = clk_prepare_enable(*hclk); |
b48e0bab | 3916 | if (err) { |
f413cbb3 | 3917 | dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); |
421d9df0 | 3918 | goto err_disable_pclk; |
89e5785f | 3919 | } |
89e5785f | 3920 | |
c69618b3 | 3921 | err = clk_prepare_enable(*tx_clk); |
93b31f48 | 3922 | if (err) { |
f413cbb3 | 3923 | dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); |
421d9df0 | 3924 | goto err_disable_hclk; |
e1824dfe SB |
3925 | } |
3926 | ||
aead88bd | 3927 | err = clk_prepare_enable(*rx_clk); |
3928 | if (err) { | |
f413cbb3 | 3929 | dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); |
aead88bd | 3930 | goto err_disable_txclk; |
3931 | } | |
3932 | ||
f5473d1d HK |
3933 | err = clk_prepare_enable(*tsu_clk); |
3934 | if (err) { | |
f413cbb3 | 3935 | dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); |
f5473d1d HK |
3936 | goto err_disable_rxclk; |
3937 | } | |
3938 | ||
c69618b3 NF |
3939 | return 0; |
3940 | ||
f5473d1d HK |
3941 | err_disable_rxclk: |
3942 | clk_disable_unprepare(*rx_clk); | |
3943 | ||
aead88bd | 3944 | err_disable_txclk: |
3945 | clk_disable_unprepare(*tx_clk); | |
3946 | ||
c69618b3 NF |
3947 | err_disable_hclk: |
3948 | clk_disable_unprepare(*hclk); | |
3949 | ||
3950 | err_disable_pclk: | |
3951 | clk_disable_unprepare(*pclk); | |
3952 | ||
3953 | return err; | |
3954 | } | |
3955 | ||
3956 | static int macb_init(struct platform_device *pdev) | |
3957 | { | |
3958 | struct net_device *dev = platform_get_drvdata(pdev); | |
3959 | unsigned int hw_q, q; | |
3960 | struct macb *bp = netdev_priv(dev); | |
3961 | struct macb_queue *queue; | |
3962 | int err; | |
ae8223de | 3963 | u32 val, reg; |
c69618b3 | 3964 | |
b410d13e ZB |
3965 | bp->tx_ring_size = DEFAULT_TX_RING_SIZE; |
3966 | bp->rx_ring_size = DEFAULT_RX_RING_SIZE; | |
3967 | ||
02c958dd CP |
3968 | /* set the queue register mapping once for all: queue0 has a special |
3969 | * register mapping but we don't want to test the queue index then | |
3970 | * compute the corresponding register offset at run time. | |
3971 | */ | |
cf250de0 | 3972 | for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { |
bfa0914a | 3973 | if (!(bp->queue_mask & (1 << hw_q))) |
02c958dd CP |
3974 | continue; |
3975 | ||
cf250de0 | 3976 | queue = &bp->queues[q]; |
02c958dd | 3977 | queue->bp = bp; |
138badbc RH |
3978 | spin_lock_init(&queue->tx_ptr_lock); |
3979 | netif_napi_add(dev, &queue->napi_rx, macb_rx_poll, NAPI_POLL_WEIGHT); | |
3980 | netif_napi_add(dev, &queue->napi_tx, macb_tx_poll, NAPI_POLL_WEIGHT); | |
02c958dd CP |
3981 | if (hw_q) { |
3982 | queue->ISR = GEM_ISR(hw_q - 1); | |
3983 | queue->IER = GEM_IER(hw_q - 1); | |
3984 | queue->IDR = GEM_IDR(hw_q - 1); | |
3985 | queue->IMR = GEM_IMR(hw_q - 1); | |
3986 | queue->TBQP = GEM_TBQP(hw_q - 1); | |
ae1f2a56 RO |
3987 | queue->RBQP = GEM_RBQP(hw_q - 1); |
3988 | queue->RBQS = GEM_RBQS(hw_q - 1); | |
fff8019a | 3989 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
ae1f2a56 | 3990 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
dc97a89e | 3991 | queue->TBQPH = GEM_TBQPH(hw_q - 1); |
ae1f2a56 RO |
3992 | queue->RBQPH = GEM_RBQPH(hw_q - 1); |
3993 | } | |
fff8019a | 3994 | #endif |
02c958dd CP |
3995 | } else { |
3996 | /* queue0 uses legacy registers */ | |
3997 | queue->ISR = MACB_ISR; | |
3998 | queue->IER = MACB_IER; | |
3999 | queue->IDR = MACB_IDR; | |
4000 | queue->IMR = MACB_IMR; | |
4001 | queue->TBQP = MACB_TBQP; | |
ae1f2a56 | 4002 | queue->RBQP = MACB_RBQP; |
fff8019a | 4003 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
ae1f2a56 | 4004 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
dc97a89e | 4005 | queue->TBQPH = MACB_TBQPH; |
ae1f2a56 RO |
4006 | queue->RBQPH = MACB_RBQPH; |
4007 | } | |
fff8019a | 4008 | #endif |
02c958dd CP |
4009 | } |
4010 | ||
4011 | /* get irq: here we use the linux queue index, not the hardware | |
4012 | * queue index. the queue irq definitions in the device tree | |
4013 | * must remove the optional gaps that could exist in the | |
4014 | * hardware queue mask. | |
4015 | */ | |
cf250de0 | 4016 | queue->irq = platform_get_irq(pdev, q); |
02c958dd | 4017 | err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, |
20488239 | 4018 | IRQF_SHARED, dev->name, queue); |
02c958dd CP |
4019 | if (err) { |
4020 | dev_err(&pdev->dev, | |
4021 | "Unable to request IRQ %d (error %d)\n", | |
4022 | queue->irq, err); | |
c69618b3 | 4023 | return err; |
02c958dd CP |
4024 | } |
4025 | ||
4026 | INIT_WORK(&queue->tx_error_task, macb_tx_error_task); | |
cf250de0 | 4027 | q++; |
89e5785f HS |
4028 | } |
4029 | ||
5f1fa992 | 4030 | dev->netdev_ops = &macb_netdev_ops; |
89e5785f | 4031 | |
4df95131 NF |
4032 | /* setup appropriated routines according to adapter type */ |
4033 | if (macb_is_gem(bp)) { | |
a4c35ed3 | 4034 | bp->max_tx_length = GEM_MAX_TX_LEN; |
4df95131 NF |
4035 | bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; |
4036 | bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; | |
4037 | bp->macbgem_ops.mog_init_rings = gem_init_rings; | |
4038 | bp->macbgem_ops.mog_rx = gem_rx; | |
8cd5a56c | 4039 | dev->ethtool_ops = &gem_ethtool_ops; |
4df95131 | 4040 | } else { |
a4c35ed3 | 4041 | bp->max_tx_length = MACB_MAX_TX_LEN; |
4df95131 NF |
4042 | bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; |
4043 | bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; | |
4044 | bp->macbgem_ops.mog_init_rings = macb_init_rings; | |
4045 | bp->macbgem_ops.mog_rx = macb_rx; | |
8cd5a56c | 4046 | dev->ethtool_ops = &macb_ethtool_ops; |
4df95131 NF |
4047 | } |
4048 | ||
a4c35ed3 CP |
4049 | /* Set features */ |
4050 | dev->hw_features = NETIF_F_SG; | |
1629dd4f RO |
4051 | |
4052 | /* Check LSO capability */ | |
4053 | if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) | |
4054 | dev->hw_features |= MACB_NETIF_LSO; | |
4055 | ||
85ff3d87 CP |
4056 | /* Checksum offload is only available on gem with packet buffer */ |
4057 | if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) | |
924ec53c | 4058 | dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; |
a4c35ed3 CP |
4059 | if (bp->caps & MACB_CAPS_SG_DISABLED) |
4060 | dev->hw_features &= ~NETIF_F_SG; | |
4061 | dev->features = dev->hw_features; | |
4062 | ||
ae8223de RO |
4063 | /* Check RX Flow Filters support. |
4064 | * Max Rx flows set by availability of screeners & compare regs: | |
4065 | * each 4-tuple define requires 1 T2 screener reg + 3 compare regs | |
4066 | */ | |
4067 | reg = gem_readl(bp, DCFG8); | |
4068 | bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), | |
4069 | GEM_BFEXT(T2SCR, reg)); | |
a714e27e | 4070 | INIT_LIST_HEAD(&bp->rx_fs_list.list); |
ae8223de RO |
4071 | if (bp->max_tuples > 0) { |
4072 | /* also needs one ethtype match to check IPv4 */ | |
4073 | if (GEM_BFEXT(SCR2ETH, reg) > 0) { | |
4074 | /* program this reg now */ | |
4075 | reg = 0; | |
4076 | reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg); | |
4077 | gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); | |
4078 | /* Filtering is supported in hw but don't enable it in kernel now */ | |
4079 | dev->hw_features |= NETIF_F_NTUPLE; | |
4080 | /* init Rx flow definitions */ | |
ae8223de RO |
4081 | bp->rx_fs_list.count = 0; |
4082 | spin_lock_init(&bp->rx_fs_lock); | |
4083 | } else | |
4084 | bp->max_tuples = 0; | |
4085 | } | |
4086 | ||
ce721a70 NA |
4087 | if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { |
4088 | val = 0; | |
2ccb0161 | 4089 | if (phy_interface_mode_is_rgmii(bp->phy_interface)) |
edac6386 | 4090 | val = bp->usrio->rgmii; |
ce721a70 | 4091 | else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && |
6bdaa5e9 | 4092 | (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) |
edac6386 | 4093 | val = bp->usrio->rmii; |
6bdaa5e9 | 4094 | else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) |
edac6386 | 4095 | val = bp->usrio->mii; |
421d9df0 | 4096 | |
ce721a70 | 4097 | if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) |
edac6386 | 4098 | val |= bp->usrio->refclk; |
421d9df0 | 4099 | |
ce721a70 NA |
4100 | macb_or_gem_writel(bp, USRIO, val); |
4101 | } | |
421d9df0 | 4102 | |
89e5785f | 4103 | /* Set MII management clock divider */ |
421d9df0 CP |
4104 | val = macb_mdc_clk_div(bp); |
4105 | val |= macb_dbw(bp); | |
022be25c PCK |
4106 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) |
4107 | val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); | |
421d9df0 CP |
4108 | macb_writel(bp, NCFGR, val); |
4109 | ||
4110 | return 0; | |
421d9df0 CP |
4111 | } |
4112 | ||
b1242236 AP |
4113 | static const struct macb_usrio_config macb_default_usrio = { |
4114 | .mii = MACB_BIT(MII), | |
4115 | .rmii = MACB_BIT(RMII), | |
4116 | .rgmii = GEM_BIT(RGMII), | |
4117 | .refclk = MACB_BIT(CLKEN), | |
4118 | }; | |
4119 | ||
421d9df0 CP |
4120 | #if defined(CONFIG_OF) |
4121 | /* 1518 rounded up */ | |
4122 | #define AT91ETHER_MAX_RBUFF_SZ 0x600 | |
4123 | /* max number of receive buffers */ | |
4124 | #define AT91ETHER_MAX_RX_DESCR 9 | |
4125 | ||
49db9228 AB |
4126 | static struct sifive_fu540_macb_mgmt *mgmt; |
4127 | ||
33fdef24 | 4128 | static int at91ether_alloc_coherent(struct macb *lp) |
421d9df0 | 4129 | { |
ae1f2a56 | 4130 | struct macb_queue *q = &lp->queues[0]; |
421d9df0 | 4131 | |
ae1f2a56 | 4132 | q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, |
421d9df0 | 4133 | (AT91ETHER_MAX_RX_DESCR * |
dc97a89e | 4134 | macb_dma_desc_get_size(lp)), |
ae1f2a56 RO |
4135 | &q->rx_ring_dma, GFP_KERNEL); |
4136 | if (!q->rx_ring) | |
421d9df0 CP |
4137 | return -ENOMEM; |
4138 | ||
ae1f2a56 | 4139 | q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, |
421d9df0 CP |
4140 | AT91ETHER_MAX_RX_DESCR * |
4141 | AT91ETHER_MAX_RBUFF_SZ, | |
ae1f2a56 RO |
4142 | &q->rx_buffers_dma, GFP_KERNEL); |
4143 | if (!q->rx_buffers) { | |
421d9df0 CP |
4144 | dma_free_coherent(&lp->pdev->dev, |
4145 | AT91ETHER_MAX_RX_DESCR * | |
dc97a89e | 4146 | macb_dma_desc_get_size(lp), |
ae1f2a56 RO |
4147 | q->rx_ring, q->rx_ring_dma); |
4148 | q->rx_ring = NULL; | |
421d9df0 CP |
4149 | return -ENOMEM; |
4150 | } | |
4151 | ||
33fdef24 CB |
4152 | return 0; |
4153 | } | |
4154 | ||
4155 | static void at91ether_free_coherent(struct macb *lp) | |
4156 | { | |
4157 | struct macb_queue *q = &lp->queues[0]; | |
4158 | ||
4159 | if (q->rx_ring) { | |
4160 | dma_free_coherent(&lp->pdev->dev, | |
4161 | AT91ETHER_MAX_RX_DESCR * | |
4162 | macb_dma_desc_get_size(lp), | |
4163 | q->rx_ring, q->rx_ring_dma); | |
4164 | q->rx_ring = NULL; | |
4165 | } | |
4166 | ||
4167 | if (q->rx_buffers) { | |
4168 | dma_free_coherent(&lp->pdev->dev, | |
4169 | AT91ETHER_MAX_RX_DESCR * | |
4170 | AT91ETHER_MAX_RBUFF_SZ, | |
4171 | q->rx_buffers, q->rx_buffers_dma); | |
4172 | q->rx_buffers = NULL; | |
4173 | } | |
4174 | } | |
4175 | ||
4176 | /* Initialize and start the Receiver and Transmit subsystems */ | |
4177 | static int at91ether_start(struct macb *lp) | |
4178 | { | |
4179 | struct macb_queue *q = &lp->queues[0]; | |
4180 | struct macb_dma_desc *desc; | |
4181 | dma_addr_t addr; | |
4182 | u32 ctl; | |
4183 | int i, ret; | |
4184 | ||
4185 | ret = at91ether_alloc_coherent(lp); | |
4186 | if (ret) | |
4187 | return ret; | |
4188 | ||
ae1f2a56 | 4189 | addr = q->rx_buffers_dma; |
421d9df0 | 4190 | for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { |
ae1f2a56 | 4191 | desc = macb_rx_desc(q, i); |
dc97a89e RO |
4192 | macb_set_addr(lp, desc, addr); |
4193 | desc->ctrl = 0; | |
421d9df0 CP |
4194 | addr += AT91ETHER_MAX_RBUFF_SZ; |
4195 | } | |
4196 | ||
4197 | /* Set the Wrap bit on the last descriptor */ | |
dc97a89e | 4198 | desc->addr |= MACB_BIT(RX_WRAP); |
421d9df0 CP |
4199 | |
4200 | /* Reset buffer index */ | |
ae1f2a56 | 4201 | q->rx_tail = 0; |
421d9df0 CP |
4202 | |
4203 | /* Program address of descriptor list in Rx Buffer Queue register */ | |
ae1f2a56 | 4204 | macb_writel(lp, RBQP, q->rx_ring_dma); |
421d9df0 CP |
4205 | |
4206 | /* Enable Receive and Transmit */ | |
4207 | ctl = macb_readl(lp, NCR); | |
4208 | macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); | |
4209 | ||
33fdef24 CB |
4210 | /* Enable MAC interrupts */ |
4211 | macb_writel(lp, IER, MACB_BIT(RCOMP) | | |
4212 | MACB_BIT(RXUBR) | | |
4213 | MACB_BIT(ISR_TUND) | | |
4214 | MACB_BIT(ISR_RLE) | | |
4215 | MACB_BIT(TCOMP) | | |
4216 | MACB_BIT(ISR_ROVR) | | |
4217 | MACB_BIT(HRESP)); | |
4218 | ||
421d9df0 CP |
4219 | return 0; |
4220 | } | |
4221 | ||
33fdef24 CB |
4222 | static void at91ether_stop(struct macb *lp) |
4223 | { | |
4224 | u32 ctl; | |
4225 | ||
4226 | /* Disable MAC interrupts */ | |
4227 | macb_writel(lp, IDR, MACB_BIT(RCOMP) | | |
4228 | MACB_BIT(RXUBR) | | |
4229 | MACB_BIT(ISR_TUND) | | |
4230 | MACB_BIT(ISR_RLE) | | |
4231 | MACB_BIT(TCOMP) | | |
4232 | MACB_BIT(ISR_ROVR) | | |
4233 | MACB_BIT(HRESP)); | |
4234 | ||
4235 | /* Disable Receiver and Transmitter */ | |
4236 | ctl = macb_readl(lp, NCR); | |
4237 | macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); | |
4238 | ||
4239 | /* Free resources. */ | |
4240 | at91ether_free_coherent(lp); | |
4241 | } | |
4242 | ||
421d9df0 CP |
4243 | /* Open the ethernet interface */ |
4244 | static int at91ether_open(struct net_device *dev) | |
4245 | { | |
4246 | struct macb *lp = netdev_priv(dev); | |
4247 | u32 ctl; | |
4248 | int ret; | |
4249 | ||
b66bfc13 MC |
4250 | ret = pm_runtime_resume_and_get(&lp->pdev->dev); |
4251 | if (ret < 0) | |
e6a41c23 AB |
4252 | return ret; |
4253 | ||
421d9df0 CP |
4254 | /* Clear internal statistics */ |
4255 | ctl = macb_readl(lp, NCR); | |
4256 | macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); | |
4257 | ||
4258 | macb_set_hwaddr(lp); | |
4259 | ||
33fdef24 | 4260 | ret = at91ether_start(lp); |
421d9df0 | 4261 | if (ret) |
0eaf228d | 4262 | goto pm_exit; |
421d9df0 | 4263 | |
7897b071 AT |
4264 | ret = macb_phylink_connect(lp); |
4265 | if (ret) | |
33fdef24 | 4266 | goto stop; |
421d9df0 CP |
4267 | |
4268 | netif_start_queue(dev); | |
4269 | ||
4270 | return 0; | |
0eaf228d | 4271 | |
33fdef24 CB |
4272 | stop: |
4273 | at91ether_stop(lp); | |
0eaf228d CB |
4274 | pm_exit: |
4275 | pm_runtime_put_sync(&lp->pdev->dev); | |
4276 | return ret; | |
421d9df0 CP |
4277 | } |
4278 | ||
4279 | /* Close the interface */ | |
4280 | static int at91ether_close(struct net_device *dev) | |
4281 | { | |
4282 | struct macb *lp = netdev_priv(dev); | |
421d9df0 CP |
4283 | |
4284 | netif_stop_queue(dev); | |
4285 | ||
7897b071 AT |
4286 | phylink_stop(lp->phylink); |
4287 | phylink_disconnect_phy(lp->phylink); | |
4288 | ||
33fdef24 | 4289 | at91ether_stop(lp); |
421d9df0 | 4290 | |
e6a41c23 | 4291 | return pm_runtime_put(&lp->pdev->dev); |
421d9df0 CP |
4292 | } |
4293 | ||
4294 | /* Transmit packet */ | |
d1c38957 CB |
4295 | static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, |
4296 | struct net_device *dev) | |
421d9df0 CP |
4297 | { |
4298 | struct macb *lp = netdev_priv(dev); | |
4299 | ||
1d608d2e WT |
4300 | if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { |
4301 | int desc = 0; | |
4302 | ||
4303 | netif_stop_queue(dev); | |
421d9df0 CP |
4304 | |
4305 | /* Store packet information (to free when Tx completed) */ | |
73d74228 WT |
4306 | lp->rm9200_txq[desc].skb = skb; |
4307 | lp->rm9200_txq[desc].size = skb->len; | |
4308 | lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data, | |
4309 | skb->len, DMA_TO_DEVICE); | |
4310 | if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) { | |
178c7ae9 AK |
4311 | dev_kfree_skb_any(skb); |
4312 | dev->stats.tx_dropped++; | |
4313 | netdev_err(dev, "%s: DMA mapping error\n", __func__); | |
4314 | return NETDEV_TX_OK; | |
4315 | } | |
421d9df0 CP |
4316 | |
4317 | /* Set address of the data in the Transmit Address register */ | |
73d74228 | 4318 | macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping); |
421d9df0 CP |
4319 | /* Set length of the packet in the Transmit Control register */ |
4320 | macb_writel(lp, TCR, skb->len); | |
89e5785f | 4321 | |
421d9df0 CP |
4322 | } else { |
4323 | netdev_err(dev, "%s called, but device is busy!\n", __func__); | |
4324 | return NETDEV_TX_BUSY; | |
4325 | } | |
4326 | ||
4327 | return NETDEV_TX_OK; | |
4328 | } | |
4329 | ||
4330 | /* Extract received frame from buffer descriptors and sent to upper layers. | |
4331 | * (Called from interrupt context) | |
4332 | */ | |
4333 | static void at91ether_rx(struct net_device *dev) | |
4334 | { | |
4335 | struct macb *lp = netdev_priv(dev); | |
ae1f2a56 | 4336 | struct macb_queue *q = &lp->queues[0]; |
dc97a89e | 4337 | struct macb_dma_desc *desc; |
421d9df0 CP |
4338 | unsigned char *p_recv; |
4339 | struct sk_buff *skb; | |
4340 | unsigned int pktlen; | |
4341 | ||
ae1f2a56 | 4342 | desc = macb_rx_desc(q, q->rx_tail); |
dc97a89e | 4343 | while (desc->addr & MACB_BIT(RX_USED)) { |
ae1f2a56 | 4344 | p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; |
dc97a89e | 4345 | pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); |
421d9df0 CP |
4346 | skb = netdev_alloc_skb(dev, pktlen + 2); |
4347 | if (skb) { | |
4348 | skb_reserve(skb, 2); | |
59ae1d12 | 4349 | skb_put_data(skb, p_recv, pktlen); |
421d9df0 CP |
4350 | |
4351 | skb->protocol = eth_type_trans(skb, dev); | |
5f1d3a5c TK |
4352 | dev->stats.rx_packets++; |
4353 | dev->stats.rx_bytes += pktlen; | |
421d9df0 CP |
4354 | netif_rx(skb); |
4355 | } else { | |
5f1d3a5c | 4356 | dev->stats.rx_dropped++; |
421d9df0 CP |
4357 | } |
4358 | ||
dc97a89e | 4359 | if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) |
5f1d3a5c | 4360 | dev->stats.multicast++; |
421d9df0 CP |
4361 | |
4362 | /* reset ownership bit */ | |
dc97a89e | 4363 | desc->addr &= ~MACB_BIT(RX_USED); |
421d9df0 CP |
4364 | |
4365 | /* wrap after last buffer */ | |
ae1f2a56 RO |
4366 | if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) |
4367 | q->rx_tail = 0; | |
421d9df0 | 4368 | else |
ae1f2a56 | 4369 | q->rx_tail++; |
dc97a89e | 4370 | |
ae1f2a56 | 4371 | desc = macb_rx_desc(q, q->rx_tail); |
421d9df0 CP |
4372 | } |
4373 | } | |
4374 | ||
4375 | /* MAC interrupt handler */ | |
4376 | static irqreturn_t at91ether_interrupt(int irq, void *dev_id) | |
4377 | { | |
4378 | struct net_device *dev = dev_id; | |
4379 | struct macb *lp = netdev_priv(dev); | |
4380 | u32 intstatus, ctl; | |
73d74228 | 4381 | unsigned int desc; |
421d9df0 CP |
4382 | |
4383 | /* MAC Interrupt Status register indicates what interrupts are pending. | |
4384 | * It is automatically cleared once read. | |
4385 | */ | |
4386 | intstatus = macb_readl(lp, ISR); | |
4387 | ||
4388 | /* Receive complete */ | |
4389 | if (intstatus & MACB_BIT(RCOMP)) | |
4390 | at91ether_rx(dev); | |
4391 | ||
4392 | /* Transmit complete */ | |
1d608d2e | 4393 | if (intstatus & MACB_BIT(TCOMP)) { |
421d9df0 CP |
4394 | /* The TCOM bit is set even if the transmission failed */ |
4395 | if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) | |
5f1d3a5c | 4396 | dev->stats.tx_errors++; |
421d9df0 | 4397 | |
1d608d2e WT |
4398 | desc = 0; |
4399 | if (lp->rm9200_txq[desc].skb) { | |
73d74228 WT |
4400 | dev_consume_skb_irq(lp->rm9200_txq[desc].skb); |
4401 | lp->rm9200_txq[desc].skb = NULL; | |
4402 | dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping, | |
4403 | lp->rm9200_txq[desc].size, DMA_TO_DEVICE); | |
5f1d3a5c | 4404 | dev->stats.tx_packets++; |
73d74228 | 4405 | dev->stats.tx_bytes += lp->rm9200_txq[desc].size; |
421d9df0 | 4406 | } |
1d608d2e | 4407 | netif_wake_queue(dev); |
421d9df0 CP |
4408 | } |
4409 | ||
4410 | /* Work-around for EMAC Errata section 41.3.1 */ | |
4411 | if (intstatus & MACB_BIT(RXUBR)) { | |
4412 | ctl = macb_readl(lp, NCR); | |
4413 | macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); | |
ffac0e96 | 4414 | wmb(); |
421d9df0 CP |
4415 | macb_writel(lp, NCR, ctl | MACB_BIT(RE)); |
4416 | } | |
4417 | ||
4418 | if (intstatus & MACB_BIT(ISR_ROVR)) | |
4419 | netdev_err(dev, "ROVR error\n"); | |
4420 | ||
4421 | return IRQ_HANDLED; | |
4422 | } | |
4423 | ||
4424 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
4425 | static void at91ether_poll_controller(struct net_device *dev) | |
4426 | { | |
4427 | unsigned long flags; | |
4428 | ||
4429 | local_irq_save(flags); | |
4430 | at91ether_interrupt(dev->irq, dev); | |
4431 | local_irq_restore(flags); | |
4432 | } | |
4433 | #endif | |
4434 | ||
4435 | static const struct net_device_ops at91ether_netdev_ops = { | |
4436 | .ndo_open = at91ether_open, | |
4437 | .ndo_stop = at91ether_close, | |
4438 | .ndo_start_xmit = at91ether_start_xmit, | |
4439 | .ndo_get_stats = macb_get_stats, | |
4440 | .ndo_set_rx_mode = macb_set_rx_mode, | |
4441 | .ndo_set_mac_address = eth_mac_addr, | |
a7605370 | 4442 | .ndo_eth_ioctl = macb_ioctl, |
421d9df0 | 4443 | .ndo_validate_addr = eth_validate_addr, |
421d9df0 CP |
4444 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4445 | .ndo_poll_controller = at91ether_poll_controller, | |
4446 | #endif | |
4447 | }; | |
4448 | ||
c69618b3 | 4449 | static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, |
aead88bd | 4450 | struct clk **hclk, struct clk **tx_clk, |
f5473d1d | 4451 | struct clk **rx_clk, struct clk **tsu_clk) |
421d9df0 | 4452 | { |
421d9df0 | 4453 | int err; |
421d9df0 | 4454 | |
c69618b3 NF |
4455 | *hclk = NULL; |
4456 | *tx_clk = NULL; | |
aead88bd | 4457 | *rx_clk = NULL; |
f5473d1d | 4458 | *tsu_clk = NULL; |
c69618b3 NF |
4459 | |
4460 | *pclk = devm_clk_get(&pdev->dev, "ether_clk"); | |
4461 | if (IS_ERR(*pclk)) | |
4462 | return PTR_ERR(*pclk); | |
421d9df0 | 4463 | |
c69618b3 | 4464 | err = clk_prepare_enable(*pclk); |
421d9df0 | 4465 | if (err) { |
f413cbb3 | 4466 | dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); |
421d9df0 CP |
4467 | return err; |
4468 | } | |
4469 | ||
c69618b3 NF |
4470 | return 0; |
4471 | } | |
4472 | ||
4473 | static int at91ether_init(struct platform_device *pdev) | |
4474 | { | |
4475 | struct net_device *dev = platform_get_drvdata(pdev); | |
4476 | struct macb *bp = netdev_priv(dev); | |
4477 | int err; | |
c69618b3 | 4478 | |
fec9d3b1 AB |
4479 | bp->queues[0].bp = bp; |
4480 | ||
421d9df0 CP |
4481 | dev->netdev_ops = &at91ether_netdev_ops; |
4482 | dev->ethtool_ops = &macb_ethtool_ops; | |
4483 | ||
4484 | err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, | |
4485 | 0, dev->name, dev); | |
4486 | if (err) | |
c69618b3 | 4487 | return err; |
421d9df0 CP |
4488 | |
4489 | macb_writel(bp, NCR, 0); | |
4490 | ||
ac2fcfa9 | 4491 | macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); |
421d9df0 CP |
4492 | |
4493 | return 0; | |
421d9df0 CP |
4494 | } |
4495 | ||
c218ad55 YS |
4496 | static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw, |
4497 | unsigned long parent_rate) | |
4498 | { | |
4499 | return mgmt->rate; | |
4500 | } | |
4501 | ||
4502 | static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate, | |
4503 | unsigned long *parent_rate) | |
4504 | { | |
4505 | if (WARN_ON(rate < 2500000)) | |
4506 | return 2500000; | |
4507 | else if (rate == 2500000) | |
4508 | return 2500000; | |
4509 | else if (WARN_ON(rate < 13750000)) | |
4510 | return 2500000; | |
4511 | else if (WARN_ON(rate < 25000000)) | |
4512 | return 25000000; | |
4513 | else if (rate == 25000000) | |
4514 | return 25000000; | |
4515 | else if (WARN_ON(rate < 75000000)) | |
4516 | return 25000000; | |
4517 | else if (WARN_ON(rate < 125000000)) | |
4518 | return 125000000; | |
4519 | else if (rate == 125000000) | |
4520 | return 125000000; | |
4521 | ||
4522 | WARN_ON(rate > 125000000); | |
4523 | ||
4524 | return 125000000; | |
4525 | } | |
4526 | ||
4527 | static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate, | |
4528 | unsigned long parent_rate) | |
4529 | { | |
4530 | rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate); | |
4531 | if (rate != 125000000) | |
4532 | iowrite32(1, mgmt->reg); | |
4533 | else | |
4534 | iowrite32(0, mgmt->reg); | |
4535 | mgmt->rate = rate; | |
4536 | ||
4537 | return 0; | |
4538 | } | |
4539 | ||
4540 | static const struct clk_ops fu540_c000_ops = { | |
4541 | .recalc_rate = fu540_macb_tx_recalc_rate, | |
4542 | .round_rate = fu540_macb_tx_round_rate, | |
4543 | .set_rate = fu540_macb_tx_set_rate, | |
4544 | }; | |
4545 | ||
4546 | static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, | |
4547 | struct clk **hclk, struct clk **tx_clk, | |
4548 | struct clk **rx_clk, struct clk **tsu_clk) | |
4549 | { | |
4550 | struct clk_init_data init; | |
4551 | int err = 0; | |
4552 | ||
4553 | err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk); | |
4554 | if (err) | |
4555 | return err; | |
4556 | ||
4557 | mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); | |
f4de93f0 CB |
4558 | if (!mgmt) { |
4559 | err = -ENOMEM; | |
4560 | goto err_disable_clks; | |
4561 | } | |
c218ad55 YS |
4562 | |
4563 | init.name = "sifive-gemgxl-mgmt"; | |
4564 | init.ops = &fu540_c000_ops; | |
4565 | init.flags = 0; | |
4566 | init.num_parents = 0; | |
4567 | ||
4568 | mgmt->rate = 0; | |
4569 | mgmt->hw.init = &init; | |
4570 | ||
d89091a4 | 4571 | *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); |
f4de93f0 CB |
4572 | if (IS_ERR(*tx_clk)) { |
4573 | err = PTR_ERR(*tx_clk); | |
4574 | goto err_disable_clks; | |
4575 | } | |
c218ad55 YS |
4576 | |
4577 | err = clk_prepare_enable(*tx_clk); | |
f4de93f0 | 4578 | if (err) { |
c218ad55 | 4579 | dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); |
f4de93f0 CB |
4580 | *tx_clk = NULL; |
4581 | goto err_disable_clks; | |
4582 | } else { | |
c218ad55 | 4583 | dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); |
f4de93f0 | 4584 | } |
c218ad55 YS |
4585 | |
4586 | return 0; | |
f4de93f0 CB |
4587 | |
4588 | err_disable_clks: | |
4589 | macb_clks_disable(*pclk, *hclk, *tx_clk, *rx_clk, *tsu_clk); | |
4590 | ||
4591 | return err; | |
c218ad55 YS |
4592 | } |
4593 | ||
4594 | static int fu540_c000_init(struct platform_device *pdev) | |
4595 | { | |
b959c77d DZ |
4596 | mgmt->reg = devm_platform_ioremap_resource(pdev, 1); |
4597 | if (IS_ERR(mgmt->reg)) | |
4598 | return PTR_ERR(mgmt->reg); | |
c218ad55 YS |
4599 | |
4600 | return macb_init(pdev); | |
4601 | } | |
4602 | ||
ec771de6 CB |
4603 | static const struct macb_usrio_config sama7g5_usrio = { |
4604 | .mii = 0, | |
4605 | .rmii = 1, | |
4606 | .rgmii = 2, | |
4607 | .refclk = BIT(2), | |
4608 | .hdfctlen = BIT(6), | |
4609 | }; | |
4610 | ||
c218ad55 YS |
4611 | static const struct macb_config fu540_c000_config = { |
4612 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | | |
4613 | MACB_CAPS_GEM_HAS_PTP, | |
4614 | .dma_burst_length = 16, | |
4615 | .clk_init = fu540_c000_clk_init, | |
4616 | .init = fu540_c000_init, | |
4617 | .jumbo_max_len = 10240, | |
edac6386 | 4618 | .usrio = &macb_default_usrio, |
c218ad55 YS |
4619 | }; |
4620 | ||
3cef5c5b | 4621 | static const struct macb_config at91sam9260_config = { |
6bdaa5e9 | 4622 | .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, |
c69618b3 | 4623 | .clk_init = macb_clk_init, |
421d9df0 | 4624 | .init = macb_init, |
edac6386 | 4625 | .usrio = &macb_default_usrio, |
421d9df0 CP |
4626 | }; |
4627 | ||
eb4ed8e2 NF |
4628 | static const struct macb_config sama5d3macb_config = { |
4629 | .caps = MACB_CAPS_SG_DISABLED | |
4630 | | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, | |
4631 | .clk_init = macb_clk_init, | |
4632 | .init = macb_init, | |
edac6386 | 4633 | .usrio = &macb_default_usrio, |
eb4ed8e2 NF |
4634 | }; |
4635 | ||
3cef5c5b | 4636 | static const struct macb_config pc302gem_config = { |
421d9df0 CP |
4637 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
4638 | .dma_burst_length = 16, | |
c69618b3 | 4639 | .clk_init = macb_clk_init, |
421d9df0 | 4640 | .init = macb_init, |
edac6386 | 4641 | .usrio = &macb_default_usrio, |
421d9df0 CP |
4642 | }; |
4643 | ||
5c8fe711 | 4644 | static const struct macb_config sama5d2_config = { |
6bdaa5e9 | 4645 | .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, |
5c8fe711 CP |
4646 | .dma_burst_length = 16, |
4647 | .clk_init = macb_clk_init, | |
4648 | .init = macb_init, | |
edac6386 | 4649 | .usrio = &macb_default_usrio, |
5c8fe711 CP |
4650 | }; |
4651 | ||
7d13ad50 HP |
4652 | static const struct macb_config sama5d29_config = { |
4653 | .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_GEM_HAS_PTP, | |
4654 | .dma_burst_length = 16, | |
4655 | .clk_init = macb_clk_init, | |
4656 | .init = macb_init, | |
4657 | .usrio = &macb_default_usrio, | |
4658 | }; | |
4659 | ||
3cef5c5b | 4660 | static const struct macb_config sama5d3_config = { |
6bdaa5e9 | 4661 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
233a1587 | 4662 | | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, |
421d9df0 | 4663 | .dma_burst_length = 16, |
c69618b3 | 4664 | .clk_init = macb_clk_init, |
421d9df0 | 4665 | .init = macb_init, |
233a1587 | 4666 | .jumbo_max_len = 10240, |
edac6386 | 4667 | .usrio = &macb_default_usrio, |
421d9df0 CP |
4668 | }; |
4669 | ||
3cef5c5b | 4670 | static const struct macb_config sama5d4_config = { |
6bdaa5e9 | 4671 | .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, |
421d9df0 | 4672 | .dma_burst_length = 4, |
c69618b3 | 4673 | .clk_init = macb_clk_init, |
421d9df0 | 4674 | .init = macb_init, |
edac6386 | 4675 | .usrio = &macb_default_usrio, |
421d9df0 CP |
4676 | }; |
4677 | ||
3cef5c5b | 4678 | static const struct macb_config emac_config = { |
ac2fcfa9 | 4679 | .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC, |
c69618b3 | 4680 | .clk_init = at91ether_clk_init, |
421d9df0 | 4681 | .init = at91ether_init, |
edac6386 | 4682 | .usrio = &macb_default_usrio, |
421d9df0 CP |
4683 | }; |
4684 | ||
e611b5b8 NA |
4685 | static const struct macb_config np4_config = { |
4686 | .caps = MACB_CAPS_USRIO_DISABLED, | |
4687 | .clk_init = macb_clk_init, | |
4688 | .init = macb_init, | |
edac6386 | 4689 | .usrio = &macb_default_usrio, |
e611b5b8 | 4690 | }; |
36583eb5 | 4691 | |
8b73fa3a RH |
4692 | static int zynqmp_init(struct platform_device *pdev) |
4693 | { | |
4694 | struct net_device *dev = platform_get_drvdata(pdev); | |
4695 | struct macb *bp = netdev_priv(dev); | |
4696 | int ret; | |
4697 | ||
4698 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { | |
4699 | /* Ensure PS-GTR PHY device used in SGMII mode is ready */ | |
29e96fe9 | 4700 | bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL); |
8b73fa3a RH |
4701 | |
4702 | if (IS_ERR(bp->sgmii_phy)) { | |
4703 | ret = PTR_ERR(bp->sgmii_phy); | |
4704 | dev_err_probe(&pdev->dev, ret, | |
4705 | "failed to get PS-GTR PHY\n"); | |
4706 | return ret; | |
4707 | } | |
4708 | ||
4709 | ret = phy_init(bp->sgmii_phy); | |
4710 | if (ret) { | |
4711 | dev_err(&pdev->dev, "failed to init PS-GTR PHY: %d\n", | |
4712 | ret); | |
4713 | return ret; | |
4714 | } | |
4715 | } | |
4716 | ||
4717 | /* Fully reset GEM controller at hardware level using zynqmp-reset driver, | |
4718 | * if mapped in device tree. | |
4719 | */ | |
4720 | ret = device_reset_optional(&pdev->dev); | |
4721 | if (ret) { | |
4722 | dev_err_probe(&pdev->dev, ret, "failed to reset controller"); | |
4723 | phy_exit(bp->sgmii_phy); | |
4724 | return ret; | |
4725 | } | |
4726 | ||
4727 | ret = macb_init(pdev); | |
4728 | if (ret) | |
4729 | phy_exit(bp->sgmii_phy); | |
4730 | ||
4731 | return ret; | |
4732 | } | |
4733 | ||
7b61f9c1 | 4734 | static const struct macb_config zynqmp_config = { |
ab91f0a9 RO |
4735 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | |
4736 | MACB_CAPS_JUMBO | | |
404cd086 | 4737 | MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, |
7b61f9c1 HK |
4738 | .dma_burst_length = 16, |
4739 | .clk_init = macb_clk_init, | |
8b73fa3a | 4740 | .init = zynqmp_init, |
98b5a0f4 | 4741 | .jumbo_max_len = 10240, |
edac6386 | 4742 | .usrio = &macb_default_usrio, |
7b61f9c1 HK |
4743 | }; |
4744 | ||
222ca8e0 | 4745 | static const struct macb_config zynq_config = { |
e501070e HK |
4746 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | |
4747 | MACB_CAPS_NEEDS_RSTONUBR, | |
222ca8e0 NS |
4748 | .dma_burst_length = 16, |
4749 | .clk_init = macb_clk_init, | |
4750 | .init = macb_init, | |
edac6386 | 4751 | .usrio = &macb_default_usrio, |
222ca8e0 NS |
4752 | }; |
4753 | ||
ec771de6 | 4754 | static const struct macb_config sama7g5_gem_config = { |
0f4f6d73 CB |
4755 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG | |
4756 | MACB_CAPS_MIIONRGMII, | |
ec771de6 CB |
4757 | .dma_burst_length = 16, |
4758 | .clk_init = macb_clk_init, | |
4759 | .init = macb_init, | |
4760 | .usrio = &sama7g5_usrio, | |
4761 | }; | |
4762 | ||
700d566e | 4763 | static const struct macb_config sama7g5_emac_config = { |
0f4f6d73 CB |
4764 | .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | |
4765 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII, | |
700d566e CB |
4766 | .dma_burst_length = 16, |
4767 | .clk_init = macb_clk_init, | |
4768 | .init = macb_init, | |
4769 | .usrio = &sama7g5_usrio, | |
4770 | }; | |
4771 | ||
421d9df0 CP |
4772 | static const struct of_device_id macb_dt_ids[] = { |
4773 | { .compatible = "cdns,at32ap7000-macb" }, | |
4774 | { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, | |
4775 | { .compatible = "cdns,macb" }, | |
e611b5b8 | 4776 | { .compatible = "cdns,np4-macb", .data = &np4_config }, |
421d9df0 CP |
4777 | { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, |
4778 | { .compatible = "cdns,gem", .data = &pc302gem_config }, | |
3e3e0cdf | 4779 | { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config }, |
5c8fe711 | 4780 | { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, |
7d13ad50 | 4781 | { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config }, |
421d9df0 | 4782 | { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, |
eb4ed8e2 | 4783 | { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, |
421d9df0 CP |
4784 | { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, |
4785 | { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, | |
4786 | { .compatible = "cdns,emac", .data = &emac_config }, | |
7b61f9c1 | 4787 | { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, |
222ca8e0 | 4788 | { .compatible = "cdns,zynq-gem", .data = &zynq_config }, |
6342ea88 | 4789 | { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, |
ec771de6 | 4790 | { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config }, |
700d566e | 4791 | { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config }, |
421d9df0 CP |
4792 | { /* sentinel */ } |
4793 | }; | |
4794 | MODULE_DEVICE_TABLE(of, macb_dt_ids); | |
4795 | #endif /* CONFIG_OF */ | |
4796 | ||
83a77e9e | 4797 | static const struct macb_config default_gem_config = { |
ab91f0a9 RO |
4798 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | |
4799 | MACB_CAPS_JUMBO | | |
4800 | MACB_CAPS_GEM_HAS_PTP, | |
83a77e9e BF |
4801 | .dma_burst_length = 16, |
4802 | .clk_init = macb_clk_init, | |
4803 | .init = macb_init, | |
b1242236 | 4804 | .usrio = &macb_default_usrio, |
83a77e9e BF |
4805 | .jumbo_max_len = 10240, |
4806 | }; | |
4807 | ||
421d9df0 CP |
4808 | static int macb_probe(struct platform_device *pdev) |
4809 | { | |
83a77e9e | 4810 | const struct macb_config *macb_config = &default_gem_config; |
c69618b3 | 4811 | int (*clk_init)(struct platform_device *, struct clk **, |
f5473d1d HK |
4812 | struct clk **, struct clk **, struct clk **, |
4813 | struct clk **) = macb_config->clk_init; | |
83a77e9e | 4814 | int (*init)(struct platform_device *) = macb_config->init; |
421d9df0 | 4815 | struct device_node *np = pdev->dev.of_node; |
aead88bd | 4816 | struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; |
f5473d1d | 4817 | struct clk *tsu_clk = NULL; |
421d9df0 | 4818 | unsigned int queue_mask, num_queues; |
f2ce8a9e | 4819 | bool native_io; |
0c65b2b9 | 4820 | phy_interface_t interface; |
421d9df0 CP |
4821 | struct net_device *dev; |
4822 | struct resource *regs; | |
4823 | void __iomem *mem; | |
421d9df0 | 4824 | struct macb *bp; |
404cd086 | 4825 | int err, val; |
421d9df0 | 4826 | |
809660cb | 4827 | mem = devm_platform_get_and_ioremap_resource(pdev, 0, ®s); |
f2ce8a9e AS |
4828 | if (IS_ERR(mem)) |
4829 | return PTR_ERR(mem); | |
4830 | ||
c69618b3 NF |
4831 | if (np) { |
4832 | const struct of_device_id *match; | |
4833 | ||
4834 | match = of_match_node(macb_dt_ids, np); | |
4835 | if (match && match->data) { | |
4836 | macb_config = match->data; | |
4837 | clk_init = macb_config->clk_init; | |
4838 | init = macb_config->init; | |
4839 | } | |
4840 | } | |
4841 | ||
f5473d1d | 4842 | err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); |
c69618b3 NF |
4843 | if (err) |
4844 | return err; | |
4845 | ||
d54f89af HK |
4846 | pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); |
4847 | pm_runtime_use_autosuspend(&pdev->dev); | |
4848 | pm_runtime_get_noresume(&pdev->dev); | |
4849 | pm_runtime_set_active(&pdev->dev); | |
4850 | pm_runtime_enable(&pdev->dev); | |
f2ce8a9e | 4851 | native_io = hw_is_native_io(mem); |
421d9df0 | 4852 | |
f2ce8a9e | 4853 | macb_probe_queues(mem, native_io, &queue_mask, &num_queues); |
421d9df0 | 4854 | dev = alloc_etherdev_mq(sizeof(*bp), num_queues); |
c69618b3 NF |
4855 | if (!dev) { |
4856 | err = -ENOMEM; | |
4857 | goto err_disable_clocks; | |
4858 | } | |
421d9df0 CP |
4859 | |
4860 | dev->base_addr = regs->start; | |
4861 | ||
4862 | SET_NETDEV_DEV(dev, &pdev->dev); | |
4863 | ||
4864 | bp = netdev_priv(dev); | |
4865 | bp->pdev = pdev; | |
4866 | bp->dev = dev; | |
4867 | bp->regs = mem; | |
f2ce8a9e AS |
4868 | bp->native_io = native_io; |
4869 | if (native_io) { | |
7a6e0706 DM |
4870 | bp->macb_reg_readl = hw_readl_native; |
4871 | bp->macb_reg_writel = hw_writel_native; | |
f2ce8a9e | 4872 | } else { |
7a6e0706 DM |
4873 | bp->macb_reg_readl = hw_readl; |
4874 | bp->macb_reg_writel = hw_writel; | |
f2ce8a9e | 4875 | } |
421d9df0 | 4876 | bp->num_queues = num_queues; |
bfa0914a | 4877 | bp->queue_mask = queue_mask; |
c69618b3 NF |
4878 | if (macb_config) |
4879 | bp->dma_burst_length = macb_config->dma_burst_length; | |
4880 | bp->pclk = pclk; | |
4881 | bp->hclk = hclk; | |
4882 | bp->tx_clk = tx_clk; | |
aead88bd | 4883 | bp->rx_clk = rx_clk; |
f5473d1d | 4884 | bp->tsu_clk = tsu_clk; |
f36dbe6a | 4885 | if (macb_config) |
98b5a0f4 | 4886 | bp->jumbo_max_len = macb_config->jumbo_max_len; |
98b5a0f4 | 4887 | |
3e2a5e15 | 4888 | bp->wol = 0; |
7c4a1d0c | 4889 | if (of_get_property(np, "magic-packet", NULL)) |
3e2a5e15 | 4890 | bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; |
ced4799d | 4891 | device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); |
3e2a5e15 | 4892 | |
edac6386 CB |
4893 | bp->usrio = macb_config->usrio; |
4894 | ||
421d9df0 CP |
4895 | spin_lock_init(&bp->lock); |
4896 | ||
ad78347f | 4897 | /* setup capabilities */ |
f6970505 NF |
4898 | macb_configure_caps(bp, macb_config); |
4899 | ||
7b429614 RO |
4900 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
4901 | if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { | |
37f78606 | 4902 | dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); |
7b429614 RO |
4903 | bp->hw_dma_cap |= HW_DMA_CAP_64B; |
4904 | } | |
4905 | #endif | |
421d9df0 CP |
4906 | platform_set_drvdata(pdev, dev); |
4907 | ||
4908 | dev->irq = platform_get_irq(pdev, 0); | |
c69618b3 NF |
4909 | if (dev->irq < 0) { |
4910 | err = dev->irq; | |
b22ae0b4 | 4911 | goto err_out_free_netdev; |
c69618b3 | 4912 | } |
421d9df0 | 4913 | |
44770e11 JW |
4914 | /* MTU range: 68 - 1500 or 10240 */ |
4915 | dev->min_mtu = GEM_MTU_MIN_SIZE; | |
46e31db5 OR |
4916 | if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) |
4917 | dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN; | |
44770e11 JW |
4918 | else |
4919 | dev->max_mtu = ETH_DATA_LEN; | |
4920 | ||
404cd086 HK |
4921 | if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { |
4922 | val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); | |
4923 | if (val) | |
4924 | bp->rx_bd_rd_prefetch = (2 << (val - 1)) * | |
4925 | macb_dma_desc_get_size(bp); | |
4926 | ||
4927 | val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); | |
4928 | if (val) | |
4929 | bp->tx_bd_rd_prefetch = (2 << (val - 1)) * | |
4930 | macb_dma_desc_get_size(bp); | |
4931 | } | |
4932 | ||
e501070e HK |
4933 | bp->rx_intr_mask = MACB_RX_INT_FLAGS; |
4934 | if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) | |
4935 | bp->rx_intr_mask |= MACB_BIT(RXUBR); | |
4936 | ||
9ca01b25 | 4937 | err = of_get_ethdev_address(np, bp->dev); |
83216e39 | 4938 | if (err == -EPROBE_DEFER) |
541ddc66 | 4939 | goto err_out_free_netdev; |
83216e39 | 4940 | else if (err) |
541ddc66 | 4941 | macb_get_hwaddr(bp); |
fb97a846 | 4942 | |
0c65b2b9 AL |
4943 | err = of_get_phy_mode(np, &interface); |
4944 | if (err) | |
8b952747 NF |
4945 | /* not found in DT, MII by default */ |
4946 | bp->phy_interface = PHY_INTERFACE_MODE_MII; | |
4947 | else | |
0c65b2b9 | 4948 | bp->phy_interface = interface; |
6c36a707 | 4949 | |
421d9df0 CP |
4950 | /* IP specific init */ |
4951 | err = init(pdev); | |
4952 | if (err) | |
4953 | goto err_out_free_netdev; | |
89e5785f | 4954 | |
cf669660 FF |
4955 | err = macb_mii_init(bp); |
4956 | if (err) | |
8b73fa3a | 4957 | goto err_out_phy_exit; |
cf669660 | 4958 | |
cf669660 FF |
4959 | netif_carrier_off(dev); |
4960 | ||
89e5785f HS |
4961 | err = register_netdev(dev); |
4962 | if (err) { | |
4963 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | |
cf669660 | 4964 | goto err_out_unregister_mdio; |
89e5785f HS |
4965 | } |
4966 | ||
e7412b83 | 4967 | tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task); |
032dc41b | 4968 | |
5879823f BS |
4969 | netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", |
4970 | macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), | |
4971 | dev->base_addr, dev->irq, dev->dev_addr); | |
89e5785f | 4972 | |
d54f89af HK |
4973 | pm_runtime_mark_last_busy(&bp->pdev->dev); |
4974 | pm_runtime_put_autosuspend(&bp->pdev->dev); | |
4975 | ||
89e5785f HS |
4976 | return 0; |
4977 | ||
cf669660 | 4978 | err_out_unregister_mdio: |
cf669660 FF |
4979 | mdiobus_unregister(bp->mii_bus); |
4980 | mdiobus_free(bp->mii_bus); | |
4981 | ||
8b73fa3a RH |
4982 | err_out_phy_exit: |
4983 | phy_exit(bp->sgmii_phy); | |
4984 | ||
cf250de0 | 4985 | err_out_free_netdev: |
02c958dd | 4986 | free_netdev(dev); |
421d9df0 | 4987 | |
c69618b3 | 4988 | err_disable_clocks: |
38493da4 | 4989 | macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk); |
d54f89af HK |
4990 | pm_runtime_disable(&pdev->dev); |
4991 | pm_runtime_set_suspended(&pdev->dev); | |
4992 | pm_runtime_dont_use_autosuspend(&pdev->dev); | |
c69618b3 | 4993 | |
89e5785f HS |
4994 | return err; |
4995 | } | |
4996 | ||
9e86d766 | 4997 | static int macb_remove(struct platform_device *pdev) |
89e5785f HS |
4998 | { |
4999 | struct net_device *dev; | |
5000 | struct macb *bp; | |
5001 | ||
5002 | dev = platform_get_drvdata(pdev); | |
5003 | ||
5004 | if (dev) { | |
5005 | bp = netdev_priv(dev); | |
8b73fa3a | 5006 | phy_exit(bp->sgmii_phy); |
298cf9be | 5007 | mdiobus_unregister(bp->mii_bus); |
298cf9be | 5008 | mdiobus_free(bp->mii_bus); |
5833e052 | 5009 | |
89e5785f | 5010 | unregister_netdev(dev); |
61183b05 | 5011 | tasklet_kill(&bp->hresp_err_tasklet); |
d54f89af HK |
5012 | pm_runtime_disable(&pdev->dev); |
5013 | pm_runtime_dont_use_autosuspend(&pdev->dev); | |
5014 | if (!pm_runtime_suspended(&pdev->dev)) { | |
38493da4 CB |
5015 | macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, |
5016 | bp->rx_clk, bp->tsu_clk); | |
d54f89af HK |
5017 | pm_runtime_set_suspended(&pdev->dev); |
5018 | } | |
7897b071 | 5019 | phylink_destroy(bp->phylink); |
e965be7d | 5020 | free_netdev(dev); |
89e5785f HS |
5021 | } |
5022 | ||
5023 | return 0; | |
5024 | } | |
5025 | ||
d23823dd | 5026 | static int __maybe_unused macb_suspend(struct device *dev) |
c1f598fd | 5027 | { |
ce886a47 | 5028 | struct net_device *netdev = dev_get_drvdata(dev); |
c1f598fd | 5029 | struct macb *bp = netdev_priv(netdev); |
bbf6acea | 5030 | struct macb_queue *queue; |
de991c58 HK |
5031 | unsigned long flags; |
5032 | unsigned int q; | |
558e35cc | 5033 | int err; |
de991c58 HK |
5034 | |
5035 | if (!netif_running(netdev)) | |
5036 | return 0; | |
c1f598fd | 5037 | |
3e2a5e15 | 5038 | if (bp->wol & MACB_WOL_ENABLED) { |
558e35cc NF |
5039 | spin_lock_irqsave(&bp->lock, flags); |
5040 | /* Flush all status bits */ | |
5041 | macb_writel(bp, TSR, -1); | |
5042 | macb_writel(bp, RSR, -1); | |
de991c58 | 5043 | for (q = 0, queue = bp->queues; q < bp->num_queues; |
558e35cc NF |
5044 | ++q, ++queue) { |
5045 | /* Disable all interrupts */ | |
5046 | queue_writel(queue, IDR, -1); | |
5047 | queue_readl(queue, ISR); | |
5048 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
5049 | queue_writel(queue, ISR, -1); | |
5050 | } | |
5051 | /* Change interrupt handler and | |
5052 | * Enable WoL IRQ on queue 0 | |
5053 | */ | |
9d45c8e8 | 5054 | devm_free_irq(dev, bp->queues[0].irq, bp->queues); |
558e35cc | 5055 | if (macb_is_gem(bp)) { |
558e35cc NF |
5056 | err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, |
5057 | IRQF_SHARED, netdev->name, bp->queues); | |
5058 | if (err) { | |
5059 | dev_err(dev, | |
5060 | "Unable to request IRQ %d (error %d)\n", | |
5061 | bp->queues[0].irq, err); | |
5062 | spin_unlock_irqrestore(&bp->lock, flags); | |
5063 | return err; | |
5064 | } | |
5065 | queue_writel(bp->queues, IER, GEM_BIT(WOL)); | |
5066 | gem_writel(bp, WOL, MACB_BIT(MAG)); | |
5067 | } else { | |
9d45c8e8 NF |
5068 | err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, |
5069 | IRQF_SHARED, netdev->name, bp->queues); | |
5070 | if (err) { | |
5071 | dev_err(dev, | |
5072 | "Unable to request IRQ %d (error %d)\n", | |
5073 | bp->queues[0].irq, err); | |
5074 | spin_unlock_irqrestore(&bp->lock, flags); | |
5075 | return err; | |
5076 | } | |
558e35cc NF |
5077 | queue_writel(bp->queues, IER, MACB_BIT(WOL)); |
5078 | macb_writel(bp, WOL, MACB_BIT(MAG)); | |
5079 | } | |
5080 | spin_unlock_irqrestore(&bp->lock, flags); | |
5081 | ||
5082 | enable_irq_wake(bp->queues[0].irq); | |
5083 | } | |
5084 | ||
5085 | netif_device_detach(netdev); | |
5086 | for (q = 0, queue = bp->queues; q < bp->num_queues; | |
138badbc RH |
5087 | ++q, ++queue) { |
5088 | napi_disable(&queue->napi_rx); | |
5089 | napi_disable(&queue->napi_tx); | |
5090 | } | |
558e35cc NF |
5091 | |
5092 | if (!(bp->wol & MACB_WOL_ENABLED)) { | |
7897b071 AT |
5093 | rtnl_lock(); |
5094 | phylink_stop(bp->phylink); | |
5095 | rtnl_unlock(); | |
de991c58 HK |
5096 | spin_lock_irqsave(&bp->lock, flags); |
5097 | macb_reset_hw(bp); | |
5098 | spin_unlock_irqrestore(&bp->lock, flags); | |
558e35cc | 5099 | } |
c1e85c6c | 5100 | |
558e35cc NF |
5101 | if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) |
5102 | bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); | |
c1e85c6c | 5103 | |
558e35cc NF |
5104 | if (netdev->hw_features & NETIF_F_NTUPLE) |
5105 | bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); | |
d54f89af | 5106 | |
de991c58 HK |
5107 | if (bp->ptp_info) |
5108 | bp->ptp_info->ptp_remove(netdev); | |
6c8f85ca NF |
5109 | if (!device_may_wakeup(dev)) |
5110 | pm_runtime_force_suspend(dev); | |
c1f598fd HS |
5111 | |
5112 | return 0; | |
5113 | } | |
5114 | ||
d23823dd | 5115 | static int __maybe_unused macb_resume(struct device *dev) |
c1f598fd | 5116 | { |
ce886a47 | 5117 | struct net_device *netdev = dev_get_drvdata(dev); |
c1f598fd | 5118 | struct macb *bp = netdev_priv(netdev); |
bbf6acea | 5119 | struct macb_queue *queue; |
558e35cc | 5120 | unsigned long flags; |
de991c58 | 5121 | unsigned int q; |
558e35cc | 5122 | int err; |
de991c58 HK |
5123 | |
5124 | if (!netif_running(netdev)) | |
5125 | return 0; | |
c1f598fd | 5126 | |
6c8f85ca NF |
5127 | if (!device_may_wakeup(dev)) |
5128 | pm_runtime_force_resume(dev); | |
d54f89af | 5129 | |
3e2a5e15 | 5130 | if (bp->wol & MACB_WOL_ENABLED) { |
558e35cc NF |
5131 | spin_lock_irqsave(&bp->lock, flags); |
5132 | /* Disable WoL */ | |
5133 | if (macb_is_gem(bp)) { | |
5134 | queue_writel(bp->queues, IDR, GEM_BIT(WOL)); | |
5135 | gem_writel(bp, WOL, 0); | |
5136 | } else { | |
5137 | queue_writel(bp->queues, IDR, MACB_BIT(WOL)); | |
5138 | macb_writel(bp, WOL, 0); | |
5139 | } | |
5140 | /* Clear ISR on queue 0 */ | |
5141 | queue_readl(bp->queues, ISR); | |
5142 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
5143 | queue_writel(bp->queues, ISR, -1); | |
5144 | /* Replace interrupt handler on queue 0 */ | |
5145 | devm_free_irq(dev, bp->queues[0].irq, bp->queues); | |
5146 | err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, | |
5147 | IRQF_SHARED, netdev->name, bp->queues); | |
5148 | if (err) { | |
5149 | dev_err(dev, | |
5150 | "Unable to request IRQ %d (error %d)\n", | |
5151 | bp->queues[0].irq, err); | |
5152 | spin_unlock_irqrestore(&bp->lock, flags); | |
5153 | return err; | |
5154 | } | |
5155 | spin_unlock_irqrestore(&bp->lock, flags); | |
c1e85c6c | 5156 | |
558e35cc | 5157 | disable_irq_wake(bp->queues[0].irq); |
c1e85c6c | 5158 | |
558e35cc NF |
5159 | /* Now make sure we disable phy before moving |
5160 | * to common restore path | |
5161 | */ | |
7897b071 | 5162 | rtnl_lock(); |
558e35cc | 5163 | phylink_stop(bp->phylink); |
7897b071 | 5164 | rtnl_unlock(); |
d54f89af HK |
5165 | } |
5166 | ||
558e35cc | 5167 | for (q = 0, queue = bp->queues; q < bp->num_queues; |
138badbc RH |
5168 | ++q, ++queue) { |
5169 | napi_enable(&queue->napi_rx); | |
5170 | napi_enable(&queue->napi_tx); | |
5171 | } | |
558e35cc NF |
5172 | |
5173 | if (netdev->hw_features & NETIF_F_NTUPLE) | |
5174 | gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); | |
5175 | ||
5176 | if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) | |
5177 | macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); | |
5178 | ||
5179 | macb_writel(bp, NCR, MACB_BIT(MPE)); | |
de991c58 HK |
5180 | macb_init_hw(bp); |
5181 | macb_set_rx_mode(netdev); | |
c1e85c6c | 5182 | macb_restore_features(bp); |
558e35cc NF |
5183 | rtnl_lock(); |
5184 | phylink_start(bp->phylink); | |
5185 | rtnl_unlock(); | |
5186 | ||
d54f89af | 5187 | netif_device_attach(netdev); |
de991c58 HK |
5188 | if (bp->ptp_info) |
5189 | bp->ptp_info->ptp_init(netdev); | |
d54f89af HK |
5190 | |
5191 | return 0; | |
5192 | } | |
5193 | ||
5194 | static int __maybe_unused macb_runtime_suspend(struct device *dev) | |
5195 | { | |
f9cb7597 | 5196 | struct net_device *netdev = dev_get_drvdata(dev); |
d54f89af HK |
5197 | struct macb *bp = netdev_priv(netdev); |
5198 | ||
38493da4 CB |
5199 | if (!(device_may_wakeup(dev))) |
5200 | macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk); | |
5201 | else | |
5202 | macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk); | |
d54f89af HK |
5203 | |
5204 | return 0; | |
5205 | } | |
5206 | ||
5207 | static int __maybe_unused macb_runtime_resume(struct device *dev) | |
5208 | { | |
f9cb7597 | 5209 | struct net_device *netdev = dev_get_drvdata(dev); |
d54f89af HK |
5210 | struct macb *bp = netdev_priv(netdev); |
5211 | ||
515a10a7 | 5212 | if (!(device_may_wakeup(dev))) { |
3e2a5e15 SP |
5213 | clk_prepare_enable(bp->pclk); |
5214 | clk_prepare_enable(bp->hclk); | |
5215 | clk_prepare_enable(bp->tx_clk); | |
aead88bd | 5216 | clk_prepare_enable(bp->rx_clk); |
3e2a5e15 | 5217 | } |
f5473d1d | 5218 | clk_prepare_enable(bp->tsu_clk); |
c1f598fd | 5219 | |
c1f598fd HS |
5220 | return 0; |
5221 | } | |
c1f598fd | 5222 | |
d54f89af HK |
5223 | static const struct dev_pm_ops macb_pm_ops = { |
5224 | SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume) | |
5225 | SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL) | |
5226 | }; | |
0dfc3e18 | 5227 | |
89e5785f | 5228 | static struct platform_driver macb_driver = { |
9e86d766 NR |
5229 | .probe = macb_probe, |
5230 | .remove = macb_remove, | |
89e5785f HS |
5231 | .driver = { |
5232 | .name = "macb", | |
fb97a846 | 5233 | .of_match_table = of_match_ptr(macb_dt_ids), |
0dfc3e18 | 5234 | .pm = &macb_pm_ops, |
89e5785f HS |
5235 | }, |
5236 | }; | |
5237 | ||
9e86d766 | 5238 | module_platform_driver(macb_driver); |
89e5785f HS |
5239 | |
5240 | MODULE_LICENSE("GPL"); | |
f75ba50b | 5241 | MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); |
e05503ef | 5242 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
72abb461 | 5243 | MODULE_ALIAS("platform:macb"); |