Commit | Line | Data |
---|---|---|
f1e37e31 | 1 | // SPDX-License-Identifier: GPL-2.0 |
3f518509 MW |
2 | /* |
3 | * Driver for Marvell PPv2 network controller for Armada 375 SoC. | |
4 | * | |
5 | * Copyright (C) 2014 Marvell | |
6 | * | |
7 | * Marcin Wojtas <mw@semihalf.com> | |
3f518509 MW |
8 | */ |
9 | ||
a75edc7c | 10 | #include <linux/acpi.h> |
3f518509 MW |
11 | #include <linux/kernel.h> |
12 | #include <linux/netdevice.h> | |
13 | #include <linux/etherdevice.h> | |
14 | #include <linux/platform_device.h> | |
15 | #include <linux/skbuff.h> | |
16 | #include <linux/inetdevice.h> | |
17 | #include <linux/mbus.h> | |
18 | #include <linux/module.h> | |
f84bf386 | 19 | #include <linux/mfd/syscon.h> |
3f518509 MW |
20 | #include <linux/interrupt.h> |
21 | #include <linux/cpumask.h> | |
22 | #include <linux/of.h> | |
23 | #include <linux/of_irq.h> | |
24 | #include <linux/of_mdio.h> | |
25 | #include <linux/of_net.h> | |
26 | #include <linux/of_address.h> | |
27 | #include <linux/phy.h> | |
4bb04326 | 28 | #include <linux/phylink.h> |
542897d9 | 29 | #include <linux/phy/phy.h> |
f5015a59 | 30 | #include <linux/ptp_classify.h> |
3f518509 | 31 | #include <linux/clk.h> |
edc660fa MW |
32 | #include <linux/hrtimer.h> |
33 | #include <linux/ktime.h> | |
f84bf386 | 34 | #include <linux/regmap.h> |
3f518509 MW |
35 | #include <uapi/linux/ppp_defs.h> |
36 | #include <net/ip.h> | |
37 | #include <net/ipv6.h> | |
a9ca9f9c | 38 | #include <net/page_pool/helpers.h> |
186cd4d4 | 39 | #include <net/tso.h> |
07dd0a7a | 40 | #include <linux/bpf_trace.h> |
3f518509 | 41 | |
db9d7d36 MC |
42 | #include "mvpp2.h" |
43 | #include "mvpp2_prs.h" | |
44 | #include "mvpp2_cls.h" | |
a786841d | 45 | |
01d04936 SC |
46 | enum mvpp2_bm_pool_log_num { |
47 | MVPP2_BM_SHORT, | |
48 | MVPP2_BM_LONG, | |
576193f2 | 49 | MVPP2_BM_JUMBO, |
01d04936 | 50 | MVPP2_BM_POOLS_NUM |
3f518509 MW |
51 | }; |
52 | ||
db9d7d36 MC |
53 | static struct { |
54 | int pkt_size; | |
55 | int buf_num; | |
56 | } mvpp2_pools[MVPP2_BM_POOLS_NUM]; | |
3f518509 | 57 | |
db9d7d36 MC |
58 | /* The prototype is added here to be used in start_dev when using ACPI. This |
59 | * will be removed once phylink is used for all modes (dt+ACPI). | |
60 | */ | |
87745c74 | 61 | static void mvpp2_acpi_start(struct mvpp2_port *port); |
10fea26c | 62 | |
db9d7d36 MC |
63 | /* Queue modes */ |
64 | #define MVPP2_QDIST_SINGLE_MODE 0 | |
65 | #define MVPP2_QDIST_MULTI_MODE 1 | |
3f518509 | 66 | |
3f6aaf72 | 67 | static int queue_mode = MVPP2_QDIST_MULTI_MODE; |
3f518509 | 68 | |
db9d7d36 MC |
69 | module_param(queue_mode, int, 0444); |
70 | MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); | |
3f518509 | 71 | |
db9d7d36 | 72 | /* Utility/helper methods */ |
3f518509 | 73 | |
db9d7d36 MC |
74 | void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) |
75 | { | |
76 | writel(data, priv->swth_base[0] + offset); | |
3f518509 MW |
77 | } |
78 | ||
db9d7d36 | 79 | u32 mvpp2_read(struct mvpp2 *priv, u32 offset) |
3f518509 | 80 | { |
db9d7d36 | 81 | return readl(priv->swth_base[0] + offset); |
3f518509 MW |
82 | } |
83 | ||
16274427 | 84 | static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) |
3f518509 | 85 | { |
db9d7d36 | 86 | return readl_relaxed(priv->swth_base[0] + offset); |
3f518509 | 87 | } |
543ec376 | 88 | |
e531f767 | 89 | static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) |
543ec376 | 90 | { |
e531f767 | 91 | return cpu % priv->nthreads; |
543ec376 AT |
92 | } |
93 | ||
a59d3542 SC |
94 | static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data) |
95 | { | |
96 | writel(data, priv->cm3_base + offset); | |
97 | } | |
98 | ||
99 | static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset) | |
100 | { | |
101 | return readl(priv->cm3_base + offset); | |
102 | } | |
103 | ||
b27db227 | 104 | static struct page_pool * |
c2d6fe61 MC |
105 | mvpp2_create_page_pool(struct device *dev, int num, int len, |
106 | enum dma_data_direction dma_dir) | |
b27db227 MC |
107 | { |
108 | struct page_pool_params pp_params = { | |
109 | /* internal DMA mapping in page_pool */ | |
110 | .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, | |
111 | .pool_size = num, | |
112 | .nid = NUMA_NO_NODE, | |
113 | .dev = dev, | |
c2d6fe61 | 114 | .dma_dir = dma_dir, |
07dd0a7a | 115 | .offset = MVPP2_SKB_HEADROOM, |
b27db227 MC |
116 | .max_len = len, |
117 | }; | |
118 | ||
119 | return page_pool_create(&pp_params); | |
120 | } | |
121 | ||
db9d7d36 MC |
122 | /* These accessors should be used to access: |
123 | * | |
543ec376 | 124 | * - per-thread registers, where each thread has its own copy of the |
db9d7d36 MC |
125 | * register. |
126 | * | |
127 | * MVPP2_BM_VIRT_ALLOC_REG | |
128 | * MVPP2_BM_ADDR_HIGH_ALLOC | |
129 | * MVPP22_BM_ADDR_HIGH_RLS_REG | |
130 | * MVPP2_BM_VIRT_RLS_REG | |
131 | * MVPP2_ISR_RX_TX_CAUSE_REG | |
132 | * MVPP2_ISR_RX_TX_MASK_REG | |
133 | * MVPP2_TXQ_NUM_REG | |
134 | * MVPP2_AGGR_TXQ_UPDATE_REG | |
135 | * MVPP2_TXQ_RSVD_REQ_REG | |
136 | * MVPP2_TXQ_RSVD_RSLT_REG | |
137 | * MVPP2_TXQ_SENT_REG | |
138 | * MVPP2_RXQ_NUM_REG | |
139 | * | |
543ec376 AT |
140 | * - global registers that must be accessed through a specific thread |
141 | * window, because they are related to an access to a per-thread | |
db9d7d36 MC |
142 | * register |
143 | * | |
144 | * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) | |
145 | * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) | |
146 | * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) | |
147 | * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) | |
148 | * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) | |
149 | * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) | |
150 | * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) | |
151 | * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) | |
152 | * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) | |
153 | * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) | |
154 | * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) | |
155 | * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) | |
156 | * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) | |
157 | */ | |
1068549c | 158 | static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, |
db9d7d36 MC |
159 | u32 offset, u32 data) |
160 | { | |
543ec376 | 161 | writel(data, priv->swth_base[thread] + offset); |
3f518509 MW |
162 | } |
163 | ||
1068549c | 164 | static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, |
db9d7d36 | 165 | u32 offset) |
3f518509 | 166 | { |
543ec376 | 167 | return readl(priv->swth_base[thread] + offset); |
db9d7d36 | 168 | } |
3f518509 | 169 | |
1068549c | 170 | static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, |
db9d7d36 MC |
171 | u32 offset, u32 data) |
172 | { | |
543ec376 | 173 | writel_relaxed(data, priv->swth_base[thread] + offset); |
db9d7d36 | 174 | } |
0c6d9b44 | 175 | |
1068549c | 176 | static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, |
db9d7d36 MC |
177 | u32 offset) |
178 | { | |
543ec376 | 179 | return readl_relaxed(priv->swth_base[thread] + offset); |
db9d7d36 | 180 | } |
3f518509 | 181 | |
db9d7d36 MC |
182 | static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, |
183 | struct mvpp2_tx_desc *tx_desc) | |
184 | { | |
185 | if (port->priv->hw_version == MVPP21) | |
7b9c7d7d | 186 | return le32_to_cpu(tx_desc->pp21.buf_dma_addr); |
db9d7d36 | 187 | else |
7b9c7d7d MC |
188 | return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) & |
189 | MVPP2_DESC_DMA_MASK; | |
db9d7d36 | 190 | } |
3f518509 | 191 | |
db9d7d36 MC |
192 | static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, |
193 | struct mvpp2_tx_desc *tx_desc, | |
194 | dma_addr_t dma_addr) | |
195 | { | |
196 | dma_addr_t addr, offset; | |
3f518509 | 197 | |
db9d7d36 MC |
198 | addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; |
199 | offset = dma_addr & MVPP2_TX_DESC_ALIGN; | |
3f518509 | 200 | |
db9d7d36 | 201 | if (port->priv->hw_version == MVPP21) { |
7b9c7d7d | 202 | tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr); |
db9d7d36 | 203 | tx_desc->pp21.packet_offset = offset; |
0c6d9b44 | 204 | } else { |
7b9c7d7d | 205 | __le64 val = cpu_to_le64(addr); |
3f518509 | 206 | |
7b9c7d7d | 207 | tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK); |
db9d7d36 MC |
208 | tx_desc->pp22.buf_dma_addr_ptp |= val; |
209 | tx_desc->pp22.packet_offset = offset; | |
210 | } | |
3f518509 MW |
211 | } |
212 | ||
db9d7d36 MC |
213 | static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, |
214 | struct mvpp2_tx_desc *tx_desc) | |
3f518509 | 215 | { |
db9d7d36 | 216 | if (port->priv->hw_version == MVPP21) |
7b9c7d7d | 217 | return le16_to_cpu(tx_desc->pp21.data_size); |
db9d7d36 | 218 | else |
7b9c7d7d | 219 | return le16_to_cpu(tx_desc->pp22.data_size); |
3f518509 MW |
220 | } |
221 | ||
db9d7d36 MC |
222 | static void mvpp2_txdesc_size_set(struct mvpp2_port *port, |
223 | struct mvpp2_tx_desc *tx_desc, | |
224 | size_t size) | |
3f518509 | 225 | { |
db9d7d36 | 226 | if (port->priv->hw_version == MVPP21) |
7b9c7d7d | 227 | tx_desc->pp21.data_size = cpu_to_le16(size); |
db9d7d36 | 228 | else |
7b9c7d7d | 229 | tx_desc->pp22.data_size = cpu_to_le16(size); |
3f518509 MW |
230 | } |
231 | ||
db9d7d36 MC |
232 | static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, |
233 | struct mvpp2_tx_desc *tx_desc, | |
234 | unsigned int txq) | |
3f518509 | 235 | { |
db9d7d36 MC |
236 | if (port->priv->hw_version == MVPP21) |
237 | tx_desc->pp21.phys_txq = txq; | |
238 | else | |
239 | tx_desc->pp22.phys_txq = txq; | |
3f518509 MW |
240 | } |
241 | ||
db9d7d36 MC |
242 | static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, |
243 | struct mvpp2_tx_desc *tx_desc, | |
244 | unsigned int command) | |
3f518509 | 245 | { |
db9d7d36 | 246 | if (port->priv->hw_version == MVPP21) |
7b9c7d7d | 247 | tx_desc->pp21.command = cpu_to_le32(command); |
db9d7d36 | 248 | else |
7b9c7d7d | 249 | tx_desc->pp22.command = cpu_to_le32(command); |
db9d7d36 | 250 | } |
3f518509 | 251 | |
db9d7d36 MC |
252 | static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, |
253 | struct mvpp2_tx_desc *tx_desc) | |
254 | { | |
255 | if (port->priv->hw_version == MVPP21) | |
256 | return tx_desc->pp21.packet_offset; | |
257 | else | |
258 | return tx_desc->pp22.packet_offset; | |
259 | } | |
3f518509 | 260 | |
db9d7d36 MC |
261 | static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, |
262 | struct mvpp2_rx_desc *rx_desc) | |
263 | { | |
264 | if (port->priv->hw_version == MVPP21) | |
7b9c7d7d | 265 | return le32_to_cpu(rx_desc->pp21.buf_dma_addr); |
db9d7d36 | 266 | else |
7b9c7d7d MC |
267 | return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) & |
268 | MVPP2_DESC_DMA_MASK; | |
db9d7d36 | 269 | } |
3f518509 | 270 | |
db9d7d36 MC |
271 | static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, |
272 | struct mvpp2_rx_desc *rx_desc) | |
273 | { | |
274 | if (port->priv->hw_version == MVPP21) | |
7b9c7d7d | 275 | return le32_to_cpu(rx_desc->pp21.buf_cookie); |
db9d7d36 | 276 | else |
7b9c7d7d MC |
277 | return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) & |
278 | MVPP2_DESC_DMA_MASK; | |
db9d7d36 | 279 | } |
3f518509 | 280 | |
db9d7d36 MC |
281 | static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, |
282 | struct mvpp2_rx_desc *rx_desc) | |
283 | { | |
284 | if (port->priv->hw_version == MVPP21) | |
7b9c7d7d | 285 | return le16_to_cpu(rx_desc->pp21.data_size); |
db9d7d36 | 286 | else |
7b9c7d7d | 287 | return le16_to_cpu(rx_desc->pp22.data_size); |
db9d7d36 | 288 | } |
3f518509 | 289 | |
db9d7d36 MC |
290 | static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, |
291 | struct mvpp2_rx_desc *rx_desc) | |
292 | { | |
293 | if (port->priv->hw_version == MVPP21) | |
7b9c7d7d | 294 | return le32_to_cpu(rx_desc->pp21.status); |
db9d7d36 | 295 | else |
7b9c7d7d | 296 | return le32_to_cpu(rx_desc->pp22.status); |
3f518509 MW |
297 | } |
298 | ||
db9d7d36 | 299 | static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) |
3f518509 | 300 | { |
db9d7d36 MC |
301 | txq_pcpu->txq_get_index++; |
302 | if (txq_pcpu->txq_get_index == txq_pcpu->size) | |
303 | txq_pcpu->txq_get_index = 0; | |
304 | } | |
3f518509 | 305 | |
db9d7d36 MC |
306 | static void mvpp2_txq_inc_put(struct mvpp2_port *port, |
307 | struct mvpp2_txq_pcpu *txq_pcpu, | |
c2d6fe61 MC |
308 | void *data, |
309 | struct mvpp2_tx_desc *tx_desc, | |
310 | enum mvpp2_tx_buf_type buf_type) | |
db9d7d36 MC |
311 | { |
312 | struct mvpp2_txq_pcpu_buf *tx_buf = | |
313 | txq_pcpu->buffs + txq_pcpu->txq_put_index; | |
c2d6fe61 MC |
314 | tx_buf->type = buf_type; |
315 | if (buf_type == MVPP2_TYPE_SKB) | |
316 | tx_buf->skb = data; | |
317 | else | |
318 | tx_buf->xdpf = data; | |
db9d7d36 MC |
319 | tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); |
320 | tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + | |
321 | mvpp2_txdesc_offset_get(port, tx_desc); | |
322 | txq_pcpu->txq_put_index++; | |
323 | if (txq_pcpu->txq_put_index == txq_pcpu->size) | |
324 | txq_pcpu->txq_put_index = 0; | |
325 | } | |
3f518509 | 326 | |
7d04b0b1 MC |
327 | /* Get number of maximum RXQ */ |
328 | static int mvpp2_get_nrxqs(struct mvpp2 *priv) | |
329 | { | |
330 | unsigned int nrxqs; | |
331 | ||
f704177e | 332 | if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) |
7d04b0b1 MC |
333 | return 1; |
334 | ||
335 | /* According to the PPv2.2 datasheet and our experiments on | |
336 | * PPv2.1, RX queues have an allocation granularity of 4 (when | |
337 | * more than a single one on PPv2.2). | |
338 | * Round up to nearest multiple of 4. | |
339 | */ | |
340 | nrxqs = (num_possible_cpus() + 3) & ~0x3; | |
341 | if (nrxqs > MVPP2_PORT_MAX_RXQ) | |
342 | nrxqs = MVPP2_PORT_MAX_RXQ; | |
343 | ||
344 | return nrxqs; | |
345 | } | |
346 | ||
db9d7d36 MC |
347 | /* Get number of physical egress port */ |
348 | static inline int mvpp2_egress_port(struct mvpp2_port *port) | |
349 | { | |
350 | return MVPP2_MAX_TCONT + port->id; | |
351 | } | |
3f518509 | 352 | |
db9d7d36 MC |
353 | /* Get number of physical TXQ */ |
354 | static inline int mvpp2_txq_phys(int port, int txq) | |
355 | { | |
356 | return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; | |
3f518509 MW |
357 | } |
358 | ||
b27db227 MC |
359 | /* Returns a struct page if page_pool is set, otherwise a buffer */ |
360 | static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool, | |
361 | struct page_pool *page_pool) | |
0e037281 | 362 | { |
b27db227 MC |
363 | if (page_pool) |
364 | return page_pool_dev_alloc_pages(page_pool); | |
365 | ||
0e037281 TP |
366 | if (likely(pool->frag_size <= PAGE_SIZE)) |
367 | return netdev_alloc_frag(pool->frag_size); | |
b27db227 MC |
368 | |
369 | return kmalloc(pool->frag_size, GFP_ATOMIC); | |
0e037281 TP |
370 | } |
371 | ||
b27db227 MC |
372 | static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, |
373 | struct page_pool *page_pool, void *data) | |
0e037281 | 374 | { |
b27db227 MC |
375 | if (page_pool) |
376 | page_pool_put_full_page(page_pool, virt_to_head_page(data), false); | |
377 | else if (likely(pool->frag_size <= PAGE_SIZE)) | |
0e037281 TP |
378 | skb_free_frag(data); |
379 | else | |
380 | kfree(data); | |
381 | } | |
382 | ||
3f518509 MW |
383 | /* Buffer Manager configuration routines */ |
384 | ||
385 | /* Create pool */ | |
13616361 | 386 | static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv, |
3f518509 MW |
387 | struct mvpp2_bm_pool *bm_pool, int size) |
388 | { | |
3f518509 MW |
389 | u32 val; |
390 | ||
d01524d8 TP |
391 | /* Number of buffer pointers must be a multiple of 16, as per |
392 | * hardware constraints | |
393 | */ | |
394 | if (!IS_ALIGNED(size, 16)) | |
395 | return -EINVAL; | |
396 | ||
6af27a1d | 397 | /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16 |
d01524d8 TP |
398 | * bytes per buffer pointer |
399 | */ | |
400 | if (priv->hw_version == MVPP21) | |
401 | bm_pool->size_bytes = 2 * sizeof(u32) * size; | |
402 | else | |
403 | bm_pool->size_bytes = 2 * sizeof(u64) * size; | |
404 | ||
13616361 | 405 | bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes, |
20396136 | 406 | &bm_pool->dma_addr, |
3f518509 MW |
407 | GFP_KERNEL); |
408 | if (!bm_pool->virt_addr) | |
409 | return -ENOMEM; | |
410 | ||
d3158807 TP |
411 | if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, |
412 | MVPP2_BM_POOL_PTR_ALIGN)) { | |
13616361 | 413 | dma_free_coherent(dev, bm_pool->size_bytes, |
d01524d8 | 414 | bm_pool->virt_addr, bm_pool->dma_addr); |
13616361 | 415 | dev_err(dev, "BM pool %d is not %d bytes aligned\n", |
3f518509 MW |
416 | bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); |
417 | return -ENOMEM; | |
418 | } | |
419 | ||
420 | mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), | |
d01524d8 | 421 | lower_32_bits(bm_pool->dma_addr)); |
3f518509 MW |
422 | mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); |
423 | ||
424 | val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); | |
425 | val |= MVPP2_BM_START_MASK; | |
eb30b269 SC |
426 | |
427 | val &= ~MVPP2_BM_LOW_THRESH_MASK; | |
428 | val &= ~MVPP2_BM_HIGH_THRESH_MASK; | |
429 | ||
430 | /* Set 8 Pools BPPI threshold for MVPP23 */ | |
431 | if (priv->hw_version == MVPP23) { | |
432 | val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH); | |
433 | val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH); | |
434 | } else { | |
435 | val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH); | |
436 | val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH); | |
437 | } | |
438 | ||
3f518509 MW |
439 | mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); |
440 | ||
3f518509 MW |
441 | bm_pool->size = size; |
442 | bm_pool->pkt_size = 0; | |
443 | bm_pool->buf_num = 0; | |
3f518509 MW |
444 | |
445 | return 0; | |
446 | } | |
447 | ||
448 | /* Set pool buffer size */ | |
449 | static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, | |
450 | struct mvpp2_bm_pool *bm_pool, | |
451 | int buf_size) | |
452 | { | |
453 | u32 val; | |
454 | ||
455 | bm_pool->buf_size = buf_size; | |
456 | ||
457 | val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); | |
458 | mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); | |
459 | } | |
460 | ||
d01524d8 TP |
461 | static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, |
462 | struct mvpp2_bm_pool *bm_pool, | |
463 | dma_addr_t *dma_addr, | |
464 | phys_addr_t *phys_addr) | |
465 | { | |
e531f767 | 466 | unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); |
a786841d | 467 | |
1068549c | 468 | *dma_addr = mvpp2_thread_read(priv, thread, |
a786841d | 469 | MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); |
1068549c | 470 | *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG); |
d01524d8 | 471 | |
f704177e | 472 | if (priv->hw_version >= MVPP22) { |
d01524d8 TP |
473 | u32 val; |
474 | u32 dma_addr_highbits, phys_addr_highbits; | |
475 | ||
1068549c | 476 | val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); |
d01524d8 TP |
477 | dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); |
478 | phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> | |
479 | MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; | |
480 | ||
481 | if (sizeof(dma_addr_t) == 8) | |
482 | *dma_addr |= (u64)dma_addr_highbits << 32; | |
483 | ||
484 | if (sizeof(phys_addr_t) == 8) | |
485 | *phys_addr |= (u64)phys_addr_highbits << 32; | |
486 | } | |
a704bb5c TP |
487 | |
488 | put_cpu(); | |
d01524d8 TP |
489 | } |
490 | ||
7861f12b | 491 | /* Free all buffers from the pool */ |
4229d502 | 492 | static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, |
effbf5f5 | 493 | struct mvpp2_bm_pool *bm_pool, int buf_num) |
3f518509 | 494 | { |
b27db227 | 495 | struct page_pool *pp = NULL; |
3f518509 MW |
496 | int i; |
497 | ||
effbf5f5 SC |
498 | if (buf_num > bm_pool->buf_num) { |
499 | WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", | |
500 | bm_pool->id, buf_num); | |
501 | buf_num = bm_pool->buf_num; | |
502 | } | |
503 | ||
b27db227 MC |
504 | if (priv->percpu_pools) |
505 | pp = priv->page_pool[bm_pool->id]; | |
506 | ||
effbf5f5 | 507 | for (i = 0; i < buf_num; i++) { |
20396136 | 508 | dma_addr_t buf_dma_addr; |
4e4a105f TP |
509 | phys_addr_t buf_phys_addr; |
510 | void *data; | |
3f518509 | 511 | |
d01524d8 TP |
512 | mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, |
513 | &buf_dma_addr, &buf_phys_addr); | |
4229d502 | 514 | |
b27db227 MC |
515 | if (!pp) |
516 | dma_unmap_single(dev, buf_dma_addr, | |
517 | bm_pool->buf_size, DMA_FROM_DEVICE); | |
4229d502 | 518 | |
4e4a105f TP |
519 | data = (void *)phys_to_virt(buf_phys_addr); |
520 | if (!data) | |
3f518509 | 521 | break; |
0e037281 | 522 | |
b27db227 | 523 | mvpp2_frag_free(bm_pool, pp, data); |
3f518509 MW |
524 | } |
525 | ||
526 | /* Update BM driver with number of buffers removed from pool */ | |
527 | bm_pool->buf_num -= i; | |
3f518509 MW |
528 | } |
529 | ||
effbf5f5 | 530 | /* Check number of buffers in BM pool */ |
6e61e10a | 531 | static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) |
effbf5f5 SC |
532 | { |
533 | int buf_num = 0; | |
534 | ||
535 | buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & | |
536 | MVPP22_BM_POOL_PTRS_NUM_MASK; | |
537 | buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & | |
538 | MVPP2_BM_BPPI_PTR_NUM_MASK; | |
539 | ||
540 | /* HW has one buffer ready which is not reflected in the counters */ | |
541 | if (buf_num) | |
542 | buf_num += 1; | |
543 | ||
544 | return buf_num; | |
545 | } | |
546 | ||
3f518509 | 547 | /* Cleanup pool */ |
13616361 | 548 | static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, |
3f518509 MW |
549 | struct mvpp2_bm_pool *bm_pool) |
550 | { | |
effbf5f5 | 551 | int buf_num; |
3f518509 MW |
552 | u32 val; |
553 | ||
effbf5f5 | 554 | buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); |
13616361 | 555 | mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); |
effbf5f5 SC |
556 | |
557 | /* Check buffer counters after free */ | |
558 | buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); | |
559 | if (buf_num) { | |
560 | WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", | |
561 | bm_pool->id, bm_pool->buf_num); | |
3f518509 MW |
562 | return 0; |
563 | } | |
564 | ||
565 | val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); | |
566 | val |= MVPP2_BM_STOP_MASK; | |
567 | mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); | |
568 | ||
4e48978c | 569 | if (priv->percpu_pools) { |
c2d6fe61 | 570 | page_pool_destroy(priv->page_pool[bm_pool->id]); |
4e48978c MC |
571 | priv->page_pool[bm_pool->id] = NULL; |
572 | } | |
c2d6fe61 | 573 | |
13616361 | 574 | dma_free_coherent(dev, bm_pool->size_bytes, |
3f518509 | 575 | bm_pool->virt_addr, |
20396136 | 576 | bm_pool->dma_addr); |
3f518509 MW |
577 | return 0; |
578 | } | |
579 | ||
13616361 | 580 | static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) |
3f518509 | 581 | { |
7d04b0b1 | 582 | int i, err, size, poolnum = MVPP2_BM_POOLS_NUM; |
3f518509 MW |
583 | struct mvpp2_bm_pool *bm_pool; |
584 | ||
7d04b0b1 MC |
585 | if (priv->percpu_pools) |
586 | poolnum = mvpp2_get_nrxqs(priv) * 2; | |
587 | ||
3f518509 MW |
588 | /* Create all pools with maximum size */ |
589 | size = MVPP2_BM_POOL_SIZE_MAX; | |
7d04b0b1 | 590 | for (i = 0; i < poolnum; i++) { |
3f518509 MW |
591 | bm_pool = &priv->bm_pools[i]; |
592 | bm_pool->id = i; | |
13616361 | 593 | err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); |
3f518509 MW |
594 | if (err) |
595 | goto err_unroll_pools; | |
596 | mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); | |
597 | } | |
598 | return 0; | |
599 | ||
600 | err_unroll_pools: | |
13616361 | 601 | dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); |
3f518509 | 602 | for (i = i - 1; i >= 0; i--) |
13616361 | 603 | mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); |
3f518509 MW |
604 | return err; |
605 | } | |
606 | ||
eb30b269 SC |
607 | /* Routine enable PPv23 8 pool mode */ |
608 | static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv) | |
609 | { | |
610 | int val; | |
611 | ||
612 | val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG); | |
613 | val |= MVPP23_BM_8POOL_MODE; | |
614 | mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val); | |
615 | } | |
616 | ||
9f538b41 JMP |
617 | /* Cleanup pool before actual initialization in the OS */ |
618 | static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id) | |
619 | { | |
620 | unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); | |
621 | u32 val; | |
622 | int i; | |
623 | ||
624 | /* Drain the BM from all possible residues left by firmware */ | |
625 | for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++) | |
626 | mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id)); | |
627 | ||
628 | put_cpu(); | |
629 | ||
630 | /* Stop the BM pool */ | |
631 | val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id)); | |
632 | val |= MVPP2_BM_STOP_MASK; | |
633 | mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val); | |
634 | } | |
635 | ||
13616361 | 636 | static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) |
3f518509 | 637 | { |
c2d6fe61 | 638 | enum dma_data_direction dma_dir = DMA_FROM_DEVICE; |
7d04b0b1 | 639 | int i, err, poolnum = MVPP2_BM_POOLS_NUM; |
c2d6fe61 | 640 | struct mvpp2_port *port; |
7d04b0b1 | 641 | |
9f538b41 JMP |
642 | if (priv->percpu_pools) |
643 | poolnum = mvpp2_get_nrxqs(priv) * 2; | |
644 | ||
645 | /* Clean up the pool state in case it contains stale state */ | |
646 | for (i = 0; i < poolnum; i++) | |
647 | mvpp2_bm_pool_cleanup(priv, i); | |
648 | ||
b27db227 | 649 | if (priv->percpu_pools) { |
c2d6fe61 MC |
650 | for (i = 0; i < priv->port_count; i++) { |
651 | port = priv->port_list[i]; | |
652 | if (port->xdp_prog) { | |
653 | dma_dir = DMA_BIDIRECTIONAL; | |
654 | break; | |
655 | } | |
656 | } | |
657 | ||
b27db227 MC |
658 | for (i = 0; i < poolnum; i++) { |
659 | /* the pool in use */ | |
660 | int pn = i / (poolnum / 2); | |
661 | ||
662 | priv->page_pool[i] = | |
663 | mvpp2_create_page_pool(dev, | |
664 | mvpp2_pools[pn].buf_num, | |
c2d6fe61 MC |
665 | mvpp2_pools[pn].pkt_size, |
666 | dma_dir); | |
4e48978c MC |
667 | if (IS_ERR(priv->page_pool[i])) { |
668 | int j; | |
669 | ||
670 | for (j = 0; j < i; j++) { | |
671 | page_pool_destroy(priv->page_pool[j]); | |
672 | priv->page_pool[j] = NULL; | |
673 | } | |
b27db227 | 674 | return PTR_ERR(priv->page_pool[i]); |
4e48978c | 675 | } |
b27db227 MC |
676 | } |
677 | } | |
3f518509 | 678 | |
7d04b0b1 MC |
679 | dev_info(dev, "using %d %s buffers\n", poolnum, |
680 | priv->percpu_pools ? "per-cpu" : "shared"); | |
681 | ||
682 | for (i = 0; i < poolnum; i++) { | |
3f518509 MW |
683 | /* Mask BM all interrupts */ |
684 | mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); | |
685 | /* Clear BM cause register */ | |
686 | mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); | |
687 | } | |
688 | ||
689 | /* Allocate and initialize BM pools */ | |
7d04b0b1 | 690 | priv->bm_pools = devm_kcalloc(dev, poolnum, |
81f915eb | 691 | sizeof(*priv->bm_pools), GFP_KERNEL); |
3f518509 MW |
692 | if (!priv->bm_pools) |
693 | return -ENOMEM; | |
694 | ||
eb30b269 SC |
695 | if (priv->hw_version == MVPP23) |
696 | mvpp23_bm_set_8pool_mode(priv); | |
697 | ||
13616361 | 698 | err = mvpp2_bm_pools_init(dev, priv); |
3f518509 MW |
699 | if (err < 0) |
700 | return err; | |
701 | return 0; | |
702 | } | |
703 | ||
01d04936 SC |
704 | static void mvpp2_setup_bm_pool(void) |
705 | { | |
706 | /* Short pool */ | |
707 | mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; | |
708 | mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; | |
709 | ||
710 | /* Long pool */ | |
711 | mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; | |
712 | mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; | |
576193f2 SC |
713 | |
714 | /* Jumbo pool */ | |
715 | mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; | |
716 | mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; | |
01d04936 SC |
717 | } |
718 | ||
3f518509 MW |
719 | /* Attach long pool to rxq */ |
720 | static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, | |
721 | int lrxq, int long_pool) | |
722 | { | |
5eac892a | 723 | u32 val, mask; |
3f518509 MW |
724 | int prxq; |
725 | ||
726 | /* Get queue physical ID */ | |
727 | prxq = port->rxqs[lrxq]->id; | |
728 | ||
5eac892a TP |
729 | if (port->priv->hw_version == MVPP21) |
730 | mask = MVPP21_RXQ_POOL_LONG_MASK; | |
731 | else | |
732 | mask = MVPP22_RXQ_POOL_LONG_MASK; | |
3f518509 | 733 | |
5eac892a TP |
734 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); |
735 | val &= ~mask; | |
736 | val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; | |
3f518509 MW |
737 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); |
738 | } | |
739 | ||
740 | /* Attach short pool to rxq */ | |
741 | static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, | |
742 | int lrxq, int short_pool) | |
743 | { | |
5eac892a | 744 | u32 val, mask; |
3f518509 MW |
745 | int prxq; |
746 | ||
747 | /* Get queue physical ID */ | |
748 | prxq = port->rxqs[lrxq]->id; | |
749 | ||
5eac892a TP |
750 | if (port->priv->hw_version == MVPP21) |
751 | mask = MVPP21_RXQ_POOL_SHORT_MASK; | |
752 | else | |
753 | mask = MVPP22_RXQ_POOL_SHORT_MASK; | |
3f518509 | 754 | |
5eac892a TP |
755 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); |
756 | val &= ~mask; | |
757 | val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; | |
3f518509 MW |
758 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); |
759 | } | |
760 | ||
0e037281 TP |
761 | static void *mvpp2_buf_alloc(struct mvpp2_port *port, |
762 | struct mvpp2_bm_pool *bm_pool, | |
b27db227 | 763 | struct page_pool *page_pool, |
20396136 | 764 | dma_addr_t *buf_dma_addr, |
4e4a105f | 765 | phys_addr_t *buf_phys_addr, |
0e037281 | 766 | gfp_t gfp_mask) |
3f518509 | 767 | { |
20396136 | 768 | dma_addr_t dma_addr; |
b27db227 | 769 | struct page *page; |
0e037281 | 770 | void *data; |
3f518509 | 771 | |
b27db227 | 772 | data = mvpp2_frag_alloc(bm_pool, page_pool); |
0e037281 | 773 | if (!data) |
3f518509 MW |
774 | return NULL; |
775 | ||
b27db227 MC |
776 | if (page_pool) { |
777 | page = (struct page *)data; | |
778 | dma_addr = page_pool_get_dma_addr(page); | |
779 | data = page_to_virt(page); | |
780 | } else { | |
781 | dma_addr = dma_map_single(port->dev->dev.parent, data, | |
782 | MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), | |
783 | DMA_FROM_DEVICE); | |
784 | if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { | |
785 | mvpp2_frag_free(bm_pool, NULL, data); | |
786 | return NULL; | |
787 | } | |
3f518509 | 788 | } |
20396136 | 789 | *buf_dma_addr = dma_addr; |
4e4a105f | 790 | *buf_phys_addr = virt_to_phys(data); |
3f518509 | 791 | |
0e037281 | 792 | return data; |
3f518509 MW |
793 | } |
794 | ||
3bd17fdc SC |
795 | /* Routine enable flow control for RXQs condition */ |
796 | static void mvpp2_rxq_enable_fc(struct mvpp2_port *port) | |
797 | { | |
798 | int val, cm3_state, host_id, q; | |
799 | int fq = port->first_rxq; | |
800 | unsigned long flags; | |
801 | ||
802 | spin_lock_irqsave(&port->priv->mss_spinlock, flags); | |
803 | ||
804 | /* Remove Flow control enable bit to prevent race between FW and Kernel | |
805 | * If Flow control was enabled, it would be re-enabled. | |
806 | */ | |
807 | val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); | |
808 | cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); | |
809 | val &= ~FLOW_CONTROL_ENABLE_BIT; | |
810 | mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); | |
811 | ||
812 | /* Set same Flow control for all RXQs */ | |
813 | for (q = 0; q < port->nrxqs; q++) { | |
814 | /* Set stop and start Flow control RXQ thresholds */ | |
815 | val = MSS_THRESHOLD_START; | |
816 | val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS); | |
817 | mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); | |
818 | ||
819 | val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); | |
820 | /* Set RXQ port ID */ | |
821 | val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); | |
822 | val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq)); | |
823 | val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) | |
824 | + MSS_RXQ_ASS_HOSTID_OFFS)); | |
825 | ||
826 | /* Calculate RXQ host ID: | |
827 | * In Single queue mode: Host ID equal to Host ID used for | |
828 | * shared RX interrupt | |
829 | * In Multi queue mode: Host ID equal to number of | |
830 | * RXQ ID / number of CoS queues | |
831 | * In Single resource mode: Host ID always equal to 0 | |
832 | */ | |
833 | if (queue_mode == MVPP2_QDIST_SINGLE_MODE) | |
834 | host_id = port->nqvecs; | |
835 | else if (queue_mode == MVPP2_QDIST_MULTI_MODE) | |
836 | host_id = q; | |
837 | else | |
838 | host_id = 0; | |
839 | ||
840 | /* Set RXQ host ID */ | |
841 | val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq) | |
842 | + MSS_RXQ_ASS_HOSTID_OFFS)); | |
843 | ||
844 | mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); | |
845 | } | |
846 | ||
847 | /* Notify Firmware that Flow control config space ready for update */ | |
848 | val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); | |
849 | val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; | |
850 | val |= cm3_state; | |
851 | mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); | |
852 | ||
853 | spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); | |
854 | } | |
855 | ||
856 | /* Routine disable flow control for RXQs condition */ | |
857 | static void mvpp2_rxq_disable_fc(struct mvpp2_port *port) | |
858 | { | |
859 | int val, cm3_state, q; | |
860 | unsigned long flags; | |
861 | int fq = port->first_rxq; | |
862 | ||
863 | spin_lock_irqsave(&port->priv->mss_spinlock, flags); | |
864 | ||
865 | /* Remove Flow control enable bit to prevent race between FW and Kernel | |
866 | * If Flow control was enabled, it would be re-enabled. | |
867 | */ | |
868 | val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); | |
869 | cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); | |
870 | val &= ~FLOW_CONTROL_ENABLE_BIT; | |
871 | mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); | |
872 | ||
873 | /* Disable Flow control for all RXQs */ | |
874 | for (q = 0; q < port->nrxqs; q++) { | |
875 | /* Set threshold 0 to disable Flow control */ | |
876 | val = 0; | |
877 | val |= (0 << MSS_RXQ_TRESH_STOP_OFFS); | |
878 | mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); | |
879 | ||
880 | val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); | |
881 | ||
882 | val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); | |
883 | ||
884 | val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) | |
885 | + MSS_RXQ_ASS_HOSTID_OFFS)); | |
886 | ||
887 | mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); | |
888 | } | |
889 | ||
890 | /* Notify Firmware that Flow control config space ready for update */ | |
891 | val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); | |
892 | val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; | |
893 | val |= cm3_state; | |
894 | mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); | |
895 | ||
896 | spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); | |
897 | } | |
898 | ||
76055831 SC |
899 | /* Routine disable/enable flow control for BM pool condition */ |
900 | static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port, | |
901 | struct mvpp2_bm_pool *pool, | |
902 | bool en) | |
903 | { | |
904 | int val, cm3_state; | |
905 | unsigned long flags; | |
906 | ||
907 | spin_lock_irqsave(&port->priv->mss_spinlock, flags); | |
908 | ||
909 | /* Remove Flow control enable bit to prevent race between FW and Kernel | |
910 | * If Flow control were enabled, it would be re-enabled. | |
911 | */ | |
912 | val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); | |
913 | cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); | |
914 | val &= ~FLOW_CONTROL_ENABLE_BIT; | |
915 | mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); | |
916 | ||
917 | /* Check if BM pool should be enabled/disable */ | |
918 | if (en) { | |
919 | /* Set BM pool start and stop thresholds per port */ | |
920 | val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); | |
921 | val |= MSS_BUF_POOL_PORT_OFFS(port->id); | |
922 | val &= ~MSS_BUF_POOL_START_MASK; | |
923 | val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS); | |
924 | val &= ~MSS_BUF_POOL_STOP_MASK; | |
925 | val |= MSS_THRESHOLD_STOP; | |
926 | mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); | |
927 | } else { | |
928 | /* Remove BM pool from the port */ | |
929 | val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); | |
930 | val &= ~MSS_BUF_POOL_PORT_OFFS(port->id); | |
931 | ||
932 | /* Zero BM pool start and stop thresholds to disable pool | |
933 | * flow control if pool empty (not used by any port) | |
934 | */ | |
935 | if (!pool->buf_num) { | |
936 | val &= ~MSS_BUF_POOL_START_MASK; | |
937 | val &= ~MSS_BUF_POOL_STOP_MASK; | |
938 | } | |
939 | ||
940 | mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); | |
941 | } | |
942 | ||
943 | /* Notify Firmware that Flow control config space ready for update */ | |
944 | val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); | |
945 | val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; | |
946 | val |= cm3_state; | |
947 | mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); | |
948 | ||
949 | spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); | |
950 | } | |
951 | ||
3a616b92 SC |
952 | /* disable/enable flow control for BM pool on all ports */ |
953 | static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en) | |
954 | { | |
955 | struct mvpp2_port *port; | |
0aa3ca95 | 956 | int i, j; |
3a616b92 SC |
957 | |
958 | for (i = 0; i < priv->port_count; i++) { | |
959 | port = priv->port_list[i]; | |
960 | if (port->priv->percpu_pools) { | |
0aa3ca95 DC |
961 | for (j = 0; j < port->nrxqs; j++) |
962 | mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j], | |
3a616b92 SC |
963 | port->tx_fc & en); |
964 | } else { | |
965 | mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en); | |
966 | mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en); | |
967 | } | |
968 | } | |
969 | } | |
970 | ||
9ca5e767 SC |
971 | static int mvpp2_enable_global_fc(struct mvpp2 *priv) |
972 | { | |
973 | int val, timeout = 0; | |
974 | ||
975 | /* Enable global flow control. In this stage global | |
976 | * flow control enabled, but still disabled per port. | |
977 | */ | |
978 | val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); | |
979 | val |= FLOW_CONTROL_ENABLE_BIT; | |
980 | mvpp2_cm3_write(priv, MSS_FC_COM_REG, val); | |
981 | ||
982 | /* Check if Firmware running and disable FC if not*/ | |
983 | val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; | |
984 | mvpp2_cm3_write(priv, MSS_FC_COM_REG, val); | |
985 | ||
986 | while (timeout < MSS_FC_MAX_TIMEOUT) { | |
987 | val = mvpp2_cm3_read(priv, MSS_FC_COM_REG); | |
988 | ||
989 | if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT)) | |
990 | return 0; | |
991 | usleep_range(10, 20); | |
992 | timeout++; | |
993 | } | |
994 | ||
995 | priv->global_tx_fc = false; | |
996 | return -EOPNOTSUPP; | |
997 | } | |
998 | ||
3f518509 MW |
999 | /* Release buffer to BM */ |
1000 | static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, | |
20396136 | 1001 | dma_addr_t buf_dma_addr, |
4e4a105f | 1002 | phys_addr_t buf_phys_addr) |
3f518509 | 1003 | { |
e531f767 AT |
1004 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
1005 | unsigned long flags = 0; | |
1006 | ||
1007 | if (test_bit(thread, &port->priv->lock_map)) | |
1008 | spin_lock_irqsave(&port->bm_lock[thread], flags); | |
a786841d | 1009 | |
f704177e | 1010 | if (port->priv->hw_version >= MVPP22) { |
d01524d8 TP |
1011 | u32 val = 0; |
1012 | ||
1013 | if (sizeof(dma_addr_t) == 8) | |
1014 | val |= upper_32_bits(buf_dma_addr) & | |
1015 | MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; | |
1016 | ||
1017 | if (sizeof(phys_addr_t) == 8) | |
1018 | val |= (upper_32_bits(buf_phys_addr) | |
1019 | << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & | |
1020 | MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; | |
1021 | ||
1068549c | 1022 | mvpp2_thread_write_relaxed(port->priv, thread, |
cdcfeb0f | 1023 | MVPP22_BM_ADDR_HIGH_RLS_REG, val); |
d01524d8 TP |
1024 | } |
1025 | ||
4e4a105f TP |
1026 | /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply |
1027 | * returned in the "cookie" field of the RX | |
1028 | * descriptor. Instead of storing the virtual address, we | |
1029 | * store the physical address | |
1030 | */ | |
1068549c | 1031 | mvpp2_thread_write_relaxed(port->priv, thread, |
cdcfeb0f | 1032 | MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); |
1068549c | 1033 | mvpp2_thread_write_relaxed(port->priv, thread, |
cdcfeb0f | 1034 | MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); |
a704bb5c | 1035 | |
e531f767 AT |
1036 | if (test_bit(thread, &port->priv->lock_map)) |
1037 | spin_unlock_irqrestore(&port->bm_lock[thread], flags); | |
1038 | ||
a704bb5c | 1039 | put_cpu(); |
3f518509 MW |
1040 | } |
1041 | ||
3f518509 MW |
1042 | /* Allocate buffers for the pool */ |
1043 | static int mvpp2_bm_bufs_add(struct mvpp2_port *port, | |
1044 | struct mvpp2_bm_pool *bm_pool, int buf_num) | |
1045 | { | |
3f518509 | 1046 | int i, buf_size, total_size; |
20396136 | 1047 | dma_addr_t dma_addr; |
4e4a105f | 1048 | phys_addr_t phys_addr; |
b27db227 | 1049 | struct page_pool *pp = NULL; |
0e037281 | 1050 | void *buf; |
3f518509 | 1051 | |
7d04b0b1 MC |
1052 | if (port->priv->percpu_pools && |
1053 | bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { | |
1054 | netdev_err(port->dev, | |
1055 | "attempted to use jumbo frames with per-cpu pools"); | |
1056 | return 0; | |
1057 | } | |
1058 | ||
3f518509 MW |
1059 | buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); |
1060 | total_size = MVPP2_RX_TOTAL_SIZE(buf_size); | |
1061 | ||
1062 | if (buf_num < 0 || | |
1063 | (buf_num + bm_pool->buf_num > bm_pool->size)) { | |
1064 | netdev_err(port->dev, | |
1065 | "cannot allocate %d buffers for pool %d\n", | |
1066 | buf_num, bm_pool->id); | |
1067 | return 0; | |
1068 | } | |
1069 | ||
b27db227 MC |
1070 | if (port->priv->percpu_pools) |
1071 | pp = port->priv->page_pool[bm_pool->id]; | |
3f518509 | 1072 | for (i = 0; i < buf_num; i++) { |
b27db227 | 1073 | buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr, |
4e4a105f | 1074 | &phys_addr, GFP_KERNEL); |
0e037281 | 1075 | if (!buf) |
3f518509 MW |
1076 | break; |
1077 | ||
20396136 | 1078 | mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, |
4e4a105f | 1079 | phys_addr); |
3f518509 MW |
1080 | } |
1081 | ||
1082 | /* Update BM driver with number of buffers added to pool */ | |
1083 | bm_pool->buf_num += i; | |
3f518509 MW |
1084 | |
1085 | netdev_dbg(port->dev, | |
01d04936 | 1086 | "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", |
3f518509 MW |
1087 | bm_pool->id, bm_pool->pkt_size, buf_size, total_size); |
1088 | ||
1089 | netdev_dbg(port->dev, | |
01d04936 | 1090 | "pool %d: %d of %d buffers added\n", |
3f518509 MW |
1091 | bm_pool->id, i, buf_num); |
1092 | return i; | |
1093 | } | |
1094 | ||
1095 | /* Notify the driver that BM pool is being used as specific type and return the | |
1096 | * pool pointer on success | |
1097 | */ | |
1098 | static struct mvpp2_bm_pool * | |
01d04936 | 1099 | mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) |
3f518509 | 1100 | { |
3f518509 MW |
1101 | struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; |
1102 | int num; | |
1103 | ||
7d04b0b1 MC |
1104 | if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) || |
1105 | (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) { | |
1106 | netdev_err(port->dev, "Invalid pool %d\n", pool); | |
1107 | return NULL; | |
1108 | } | |
1109 | ||
1110 | /* Allocate buffers in case BM pool is used as long pool, but packet | |
1111 | * size doesn't match MTU or BM pool hasn't being used yet | |
1112 | */ | |
1113 | if (new_pool->pkt_size == 0) { | |
1114 | int pkts_num; | |
1115 | ||
1116 | /* Set default buffer number or free all the buffers in case | |
1117 | * the pool is not empty | |
1118 | */ | |
1119 | pkts_num = new_pool->buf_num; | |
1120 | if (pkts_num == 0) { | |
1121 | if (port->priv->percpu_pools) { | |
1122 | if (pool < port->nrxqs) | |
1123 | pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num; | |
1124 | else | |
1125 | pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num; | |
1126 | } else { | |
1127 | pkts_num = mvpp2_pools[pool].buf_num; | |
1128 | } | |
1129 | } else { | |
1130 | mvpp2_bm_bufs_free(port->dev->dev.parent, | |
1131 | port->priv, new_pool, pkts_num); | |
1132 | } | |
1133 | ||
1134 | new_pool->pkt_size = pkt_size; | |
1135 | new_pool->frag_size = | |
1136 | SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + | |
1137 | MVPP2_SKB_SHINFO_SIZE; | |
1138 | ||
1139 | /* Allocate buffers for this pool */ | |
1140 | num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); | |
1141 | if (num != pkts_num) { | |
1142 | WARN(1, "pool %d: %d of %d allocated\n", | |
1143 | new_pool->id, num, pkts_num); | |
1144 | return NULL; | |
1145 | } | |
1146 | } | |
1147 | ||
1148 | mvpp2_bm_pool_bufsize_set(port->priv, new_pool, | |
1149 | MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); | |
1150 | ||
1151 | return new_pool; | |
1152 | } | |
1153 | ||
1154 | static struct mvpp2_bm_pool * | |
1155 | mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type, | |
1156 | unsigned int pool, int pkt_size) | |
1157 | { | |
1158 | struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; | |
1159 | int num; | |
1160 | ||
1161 | if (pool > port->nrxqs * 2) { | |
01d04936 | 1162 | netdev_err(port->dev, "Invalid pool %d\n", pool); |
3f518509 MW |
1163 | return NULL; |
1164 | } | |
1165 | ||
3f518509 MW |
1166 | /* Allocate buffers in case BM pool is used as long pool, but packet |
1167 | * size doesn't match MTU or BM pool hasn't being used yet | |
1168 | */ | |
01d04936 | 1169 | if (new_pool->pkt_size == 0) { |
3f518509 MW |
1170 | int pkts_num; |
1171 | ||
1172 | /* Set default buffer number or free all the buffers in case | |
1173 | * the pool is not empty | |
1174 | */ | |
1175 | pkts_num = new_pool->buf_num; | |
1176 | if (pkts_num == 0) | |
7d04b0b1 | 1177 | pkts_num = mvpp2_pools[type].buf_num; |
3f518509 | 1178 | else |
4229d502 | 1179 | mvpp2_bm_bufs_free(port->dev->dev.parent, |
effbf5f5 | 1180 | port->priv, new_pool, pkts_num); |
3f518509 MW |
1181 | |
1182 | new_pool->pkt_size = pkt_size; | |
0e037281 TP |
1183 | new_pool->frag_size = |
1184 | SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + | |
1185 | MVPP2_SKB_SHINFO_SIZE; | |
3f518509 MW |
1186 | |
1187 | /* Allocate buffers for this pool */ | |
1188 | num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); | |
1189 | if (num != pkts_num) { | |
1190 | WARN(1, "pool %d: %d of %d allocated\n", | |
1191 | new_pool->id, num, pkts_num); | |
3f518509 MW |
1192 | return NULL; |
1193 | } | |
1194 | } | |
1195 | ||
1196 | mvpp2_bm_pool_bufsize_set(port->priv, new_pool, | |
1197 | MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); | |
1198 | ||
3f518509 MW |
1199 | return new_pool; |
1200 | } | |
1201 | ||
7d04b0b1 MC |
1202 | /* Initialize pools for swf, shared buffers variant */ |
1203 | static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) | |
3f518509 | 1204 | { |
576193f2 | 1205 | enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; |
7d04b0b1 | 1206 | int rxq; |
576193f2 SC |
1207 | |
1208 | /* If port pkt_size is higher than 1518B: | |
1209 | * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool | |
1210 | * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool | |
1211 | */ | |
1212 | if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { | |
1213 | long_log_pool = MVPP2_BM_JUMBO; | |
1214 | short_log_pool = MVPP2_BM_LONG; | |
1215 | } else { | |
1216 | long_log_pool = MVPP2_BM_LONG; | |
1217 | short_log_pool = MVPP2_BM_SHORT; | |
1218 | } | |
3f518509 MW |
1219 | |
1220 | if (!port->pool_long) { | |
1221 | port->pool_long = | |
576193f2 SC |
1222 | mvpp2_bm_pool_use(port, long_log_pool, |
1223 | mvpp2_pools[long_log_pool].pkt_size); | |
3f518509 MW |
1224 | if (!port->pool_long) |
1225 | return -ENOMEM; | |
1226 | ||
576193f2 | 1227 | port->pool_long->port_map |= BIT(port->id); |
3f518509 | 1228 | |
09f83975 | 1229 | for (rxq = 0; rxq < port->nrxqs; rxq++) |
3f518509 MW |
1230 | mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); |
1231 | } | |
1232 | ||
1233 | if (!port->pool_short) { | |
1234 | port->pool_short = | |
576193f2 | 1235 | mvpp2_bm_pool_use(port, short_log_pool, |
e2e03164 | 1236 | mvpp2_pools[short_log_pool].pkt_size); |
3f518509 MW |
1237 | if (!port->pool_short) |
1238 | return -ENOMEM; | |
1239 | ||
576193f2 | 1240 | port->pool_short->port_map |= BIT(port->id); |
3f518509 | 1241 | |
09f83975 | 1242 | for (rxq = 0; rxq < port->nrxqs; rxq++) |
3f518509 MW |
1243 | mvpp2_rxq_short_pool_set(port, rxq, |
1244 | port->pool_short->id); | |
1245 | } | |
1246 | ||
1247 | return 0; | |
1248 | } | |
1249 | ||
7d04b0b1 MC |
1250 | /* Initialize pools for swf, percpu buffers variant */ |
1251 | static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) | |
1252 | { | |
136bcd84 | 1253 | struct mvpp2_bm_pool *bm_pool; |
7d04b0b1 MC |
1254 | int i; |
1255 | ||
1256 | for (i = 0; i < port->nrxqs; i++) { | |
136bcd84 MC |
1257 | bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, |
1258 | mvpp2_pools[MVPP2_BM_SHORT].pkt_size); | |
1259 | if (!bm_pool) | |
7d04b0b1 MC |
1260 | return -ENOMEM; |
1261 | ||
136bcd84 MC |
1262 | bm_pool->port_map |= BIT(port->id); |
1263 | mvpp2_rxq_short_pool_set(port, i, bm_pool->id); | |
7d04b0b1 MC |
1264 | } |
1265 | ||
1266 | for (i = 0; i < port->nrxqs; i++) { | |
136bcd84 MC |
1267 | bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, |
1268 | mvpp2_pools[MVPP2_BM_LONG].pkt_size); | |
1269 | if (!bm_pool) | |
7d04b0b1 MC |
1270 | return -ENOMEM; |
1271 | ||
136bcd84 MC |
1272 | bm_pool->port_map |= BIT(port->id); |
1273 | mvpp2_rxq_long_pool_set(port, i, bm_pool->id); | |
7d04b0b1 MC |
1274 | } |
1275 | ||
1276 | port->pool_long = NULL; | |
1277 | port->pool_short = NULL; | |
1278 | ||
1279 | return 0; | |
1280 | } | |
1281 | ||
1282 | static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) | |
1283 | { | |
1284 | if (port->priv->percpu_pools) | |
1285 | return mvpp2_swf_bm_pool_init_percpu(port); | |
1286 | else | |
1287 | return mvpp2_swf_bm_pool_init_shared(port); | |
1288 | } | |
1289 | ||
d66503c4 MC |
1290 | static void mvpp2_set_hw_csum(struct mvpp2_port *port, |
1291 | enum mvpp2_bm_pool_log_num new_long_pool) | |
1292 | { | |
1293 | const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | |
1294 | ||
1295 | /* Update L4 checksum when jumbo enable/disable on port. | |
1296 | * Only port 0 supports hardware checksum offload due to | |
1297 | * the Tx FIFO size limitation. | |
1298 | * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor | |
1299 | * has 7 bits, so the maximum L3 offset is 128. | |
1300 | */ | |
1301 | if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { | |
1302 | port->dev->features &= ~csums; | |
1303 | port->dev->hw_features &= ~csums; | |
1304 | } else { | |
1305 | port->dev->features |= csums; | |
1306 | port->dev->hw_features |= csums; | |
1307 | } | |
1308 | } | |
1309 | ||
3f518509 MW |
1310 | static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) |
1311 | { | |
1312 | struct mvpp2_port *port = netdev_priv(dev); | |
576193f2 SC |
1313 | enum mvpp2_bm_pool_log_num new_long_pool; |
1314 | int pkt_size = MVPP2_RX_PKT_SIZE(mtu); | |
3f518509 | 1315 | |
7d04b0b1 MC |
1316 | if (port->priv->percpu_pools) |
1317 | goto out_set; | |
1318 | ||
576193f2 SC |
1319 | /* If port MTU is higher than 1518B: |
1320 | * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool | |
1321 | * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool | |
1322 | */ | |
1323 | if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) | |
1324 | new_long_pool = MVPP2_BM_JUMBO; | |
1325 | else | |
1326 | new_long_pool = MVPP2_BM_LONG; | |
1327 | ||
1328 | if (new_long_pool != port->pool_long->id) { | |
76055831 SC |
1329 | if (port->tx_fc) { |
1330 | if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) | |
1331 | mvpp2_bm_pool_update_fc(port, | |
1332 | port->pool_short, | |
1333 | false); | |
1334 | else | |
1335 | mvpp2_bm_pool_update_fc(port, port->pool_long, | |
1336 | false); | |
1337 | } | |
1338 | ||
576193f2 SC |
1339 | /* Remove port from old short & long pool */ |
1340 | port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, | |
1341 | port->pool_long->pkt_size); | |
1342 | port->pool_long->port_map &= ~BIT(port->id); | |
1343 | port->pool_long = NULL; | |
1344 | ||
1345 | port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, | |
1346 | port->pool_short->pkt_size); | |
1347 | port->pool_short->port_map &= ~BIT(port->id); | |
1348 | port->pool_short = NULL; | |
1349 | ||
1350 | port->pkt_size = pkt_size; | |
1351 | ||
1352 | /* Add port to new short & long pool */ | |
1353 | mvpp2_swf_bm_pool_init(port); | |
1354 | ||
d66503c4 | 1355 | mvpp2_set_hw_csum(port, new_long_pool); |
76055831 SC |
1356 | |
1357 | if (port->tx_fc) { | |
1358 | if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) | |
1359 | mvpp2_bm_pool_update_fc(port, port->pool_long, | |
1360 | true); | |
1361 | else | |
1362 | mvpp2_bm_pool_update_fc(port, port->pool_short, | |
1363 | true); | |
1364 | } | |
1365 | ||
1366 | /* Update L4 checksum when jumbo enable/disable on port */ | |
1367 | if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { | |
1368 | dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | |
1369 | dev->hw_features &= ~(NETIF_F_IP_CSUM | | |
1370 | NETIF_F_IPV6_CSUM); | |
1371 | } else { | |
1372 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | |
1373 | dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | |
1374 | } | |
3f518509 MW |
1375 | } |
1376 | ||
7d04b0b1 | 1377 | out_set: |
1eb2cded | 1378 | WRITE_ONCE(dev->mtu, mtu); |
576193f2 SC |
1379 | dev->wanted_features = dev->features; |
1380 | ||
3f518509 MW |
1381 | netdev_update_features(dev); |
1382 | return 0; | |
1383 | } | |
1384 | ||
1385 | static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) | |
1386 | { | |
591f4cfa TP |
1387 | int i, sw_thread_mask = 0; |
1388 | ||
1389 | for (i = 0; i < port->nqvecs; i++) | |
1390 | sw_thread_mask |= port->qvecs[i].sw_thread_mask; | |
3f518509 | 1391 | |
3f518509 | 1392 | mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), |
591f4cfa | 1393 | MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); |
3f518509 MW |
1394 | } |
1395 | ||
1396 | static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) | |
1397 | { | |
591f4cfa TP |
1398 | int i, sw_thread_mask = 0; |
1399 | ||
1400 | for (i = 0; i < port->nqvecs; i++) | |
1401 | sw_thread_mask |= port->qvecs[i].sw_thread_mask; | |
1402 | ||
1403 | mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), | |
1404 | MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); | |
1405 | } | |
1406 | ||
1407 | static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) | |
1408 | { | |
1409 | struct mvpp2_port *port = qvec->port; | |
3f518509 | 1410 | |
3f518509 | 1411 | mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), |
591f4cfa TP |
1412 | MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); |
1413 | } | |
1414 | ||
1415 | static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) | |
1416 | { | |
1417 | struct mvpp2_port *port = qvec->port; | |
1418 | ||
1419 | mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), | |
1420 | MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); | |
3f518509 MW |
1421 | } |
1422 | ||
543ec376 | 1423 | /* Mask the current thread's Rx/Tx interrupts |
e0af22d9 TP |
1424 | * Called by on_each_cpu(), guaranteed to run with migration disabled, |
1425 | * using smp_processor_id() is OK. | |
1426 | */ | |
3f518509 MW |
1427 | static void mvpp2_interrupts_mask(void *arg) |
1428 | { | |
1429 | struct mvpp2_port *port = arg; | |
bf270fa3 SC |
1430 | int cpu = smp_processor_id(); |
1431 | u32 thread; | |
3f518509 | 1432 | |
e531f767 | 1433 | /* If the thread isn't used, don't do anything */ |
bf270fa3 | 1434 | if (cpu > port->priv->nthreads) |
e531f767 AT |
1435 | return; |
1436 | ||
bf270fa3 SC |
1437 | thread = mvpp2_cpu_to_thread(port->priv, cpu); |
1438 | ||
1439 | mvpp2_thread_write(port->priv, thread, | |
a786841d | 1440 | MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); |
bf270fa3 SC |
1441 | mvpp2_thread_write(port->priv, thread, |
1442 | MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0); | |
3f518509 MW |
1443 | } |
1444 | ||
543ec376 | 1445 | /* Unmask the current thread's Rx/Tx interrupts. |
e0af22d9 TP |
1446 | * Called by on_each_cpu(), guaranteed to run with migration disabled, |
1447 | * using smp_processor_id() is OK. | |
1448 | */ | |
3f518509 MW |
1449 | static void mvpp2_interrupts_unmask(void *arg) |
1450 | { | |
1451 | struct mvpp2_port *port = arg; | |
bf270fa3 SC |
1452 | int cpu = smp_processor_id(); |
1453 | u32 val, thread; | |
213f428f | 1454 | |
e531f767 | 1455 | /* If the thread isn't used, don't do anything */ |
7867299c | 1456 | if (cpu >= port->priv->nthreads) |
e531f767 AT |
1457 | return; |
1458 | ||
bf270fa3 SC |
1459 | thread = mvpp2_cpu_to_thread(port->priv, cpu); |
1460 | ||
213f428f | 1461 | val = MVPP2_CAUSE_MISC_SUM_MASK | |
70afb58e | 1462 | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); |
213f428f TP |
1463 | if (port->has_tx_irqs) |
1464 | val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; | |
3f518509 | 1465 | |
bf270fa3 | 1466 | mvpp2_thread_write(port->priv, thread, |
213f428f | 1467 | MVPP2_ISR_RX_TX_MASK_REG(port->id), val); |
bf270fa3 SC |
1468 | mvpp2_thread_write(port->priv, thread, |
1469 | MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), | |
1470 | MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); | |
213f428f TP |
1471 | } |
1472 | ||
1473 | static void | |
1474 | mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) | |
1475 | { | |
1476 | u32 val; | |
1477 | int i; | |
1478 | ||
60dcd6b7 | 1479 | if (port->priv->hw_version == MVPP21) |
213f428f TP |
1480 | return; |
1481 | ||
1482 | if (mask) | |
1483 | val = 0; | |
1484 | else | |
70afb58e | 1485 | val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22); |
213f428f TP |
1486 | |
1487 | for (i = 0; i < port->nqvecs; i++) { | |
1488 | struct mvpp2_queue_vector *v = port->qvecs + i; | |
1489 | ||
1490 | if (v->type != MVPP2_QUEUE_VECTOR_SHARED) | |
1491 | continue; | |
1492 | ||
1068549c | 1493 | mvpp2_thread_write(port->priv, v->sw_thread_id, |
213f428f | 1494 | MVPP2_ISR_RX_TX_MASK_REG(port->id), val); |
bf270fa3 SC |
1495 | mvpp2_thread_write(port->priv, v->sw_thread_id, |
1496 | MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), | |
1497 | MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK); | |
213f428f | 1498 | } |
3f518509 MW |
1499 | } |
1500 | ||
a9a33202 RK |
1501 | /* Only GOP port 0 has an XLG MAC */ |
1502 | static bool mvpp2_port_supports_xlg(struct mvpp2_port *port) | |
1503 | { | |
1504 | return port->gop_id == 0; | |
1505 | } | |
1506 | ||
1507 | static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) | |
1508 | { | |
f704177e | 1509 | return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0); |
a9a33202 RK |
1510 | } |
1511 | ||
3f518509 | 1512 | /* Port configuration routines */ |
b7d286f0 RK |
1513 | static bool mvpp2_is_xlg(phy_interface_t interface) |
1514 | { | |
e0f909bc | 1515 | return interface == PHY_INTERFACE_MODE_10GBASER || |
4043ec70 | 1516 | interface == PHY_INTERFACE_MODE_5GBASER || |
b7d286f0 RK |
1517 | interface == PHY_INTERFACE_MODE_XAUI; |
1518 | } | |
3f518509 | 1519 | |
bd45f644 RK |
1520 | static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set) |
1521 | { | |
1522 | u32 old, val; | |
1523 | ||
1524 | old = val = readl(ptr); | |
1525 | val &= ~mask; | |
1526 | val |= set; | |
1527 | if (old != val) | |
1528 | writel(val, ptr); | |
1529 | } | |
1530 | ||
f84bf386 AT |
1531 | static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) |
1532 | { | |
1533 | struct mvpp2 *priv = port->priv; | |
1534 | u32 val; | |
1535 | ||
1536 | regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); | |
1537 | val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; | |
1538 | regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); | |
1539 | ||
1540 | regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); | |
1b666016 | 1541 | if (port->gop_id == 2) { |
935a1184 | 1542 | val |= GENCONF_CTRL0_PORT2_RGMII; |
1b666016 | 1543 | } else if (port->gop_id == 3) { |
935a1184 | 1544 | val |= GENCONF_CTRL0_PORT3_RGMII_MII; |
1b666016 SE |
1545 | |
1546 | /* According to the specification, GENCONF_CTRL0_PORT3_RGMII | |
1547 | * should be set to 1 for RGMII and 0 for MII. However, tests | |
1548 | * show that it is the other way around. This is also what | |
1549 | * U-Boot does for mvpp2, so it is assumed to be correct. | |
1550 | */ | |
1551 | if (port->phy_interface == PHY_INTERFACE_MODE_MII) | |
1552 | val |= GENCONF_CTRL0_PORT3_RGMII; | |
1553 | else | |
1554 | val &= ~GENCONF_CTRL0_PORT3_RGMII; | |
1555 | } | |
f84bf386 AT |
1556 | regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); |
1557 | } | |
1558 | ||
1559 | static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) | |
1560 | { | |
1561 | struct mvpp2 *priv = port->priv; | |
1562 | u32 val; | |
1563 | ||
1564 | regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); | |
1565 | val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | | |
1566 | GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; | |
1567 | regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); | |
1568 | ||
1569 | if (port->gop_id > 1) { | |
1570 | regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); | |
1571 | if (port->gop_id == 2) | |
935a1184 | 1572 | val &= ~GENCONF_CTRL0_PORT2_RGMII; |
f84bf386 | 1573 | else if (port->gop_id == 3) |
935a1184 | 1574 | val &= ~GENCONF_CTRL0_PORT3_RGMII_MII; |
f84bf386 AT |
1575 | regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); |
1576 | } | |
1577 | } | |
1578 | ||
1579 | static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) | |
1580 | { | |
1581 | struct mvpp2 *priv = port->priv; | |
1582 | void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); | |
1583 | void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); | |
1584 | u32 val; | |
1585 | ||
f84bf386 AT |
1586 | val = readl(xpcs + MVPP22_XPCS_CFG0); |
1587 | val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | | |
1588 | MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); | |
1589 | val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); | |
1590 | writel(val, xpcs + MVPP22_XPCS_CFG0); | |
1591 | ||
f84bf386 AT |
1592 | val = readl(mpcs + MVPP22_MPCS_CTRL); |
1593 | val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; | |
1594 | writel(val, mpcs + MVPP22_MPCS_CTRL); | |
1595 | ||
1596 | val = readl(mpcs + MVPP22_MPCS_CLK_RESET); | |
7409e66e | 1597 | val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7); |
f84bf386 AT |
1598 | val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); |
1599 | writel(val, mpcs + MVPP22_MPCS_CLK_RESET); | |
f84bf386 AT |
1600 | } |
1601 | ||
2788d841 SC |
1602 | static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en) |
1603 | { | |
1604 | struct mvpp2 *priv = port->priv; | |
1605 | void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); | |
1606 | u32 val; | |
1607 | ||
1608 | val = readl(fca + MVPP22_FCA_CONTROL_REG); | |
1609 | val &= ~MVPP22_FCA_ENABLE_PERIODIC; | |
1610 | if (en) | |
1611 | val |= MVPP22_FCA_ENABLE_PERIODIC; | |
1612 | writel(val, fca + MVPP22_FCA_CONTROL_REG); | |
1613 | } | |
1614 | ||
1615 | static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer) | |
1616 | { | |
1617 | struct mvpp2 *priv = port->priv; | |
1618 | void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id); | |
1619 | u32 lsb, msb; | |
1620 | ||
1621 | lsb = timer & MVPP22_FCA_REG_MASK; | |
1622 | msb = timer >> MVPP22_FCA_REG_SIZE; | |
1623 | ||
1624 | writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG); | |
1625 | writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG); | |
1626 | } | |
1627 | ||
1628 | /* Set Flow Control timer x100 faster than pause quanta to ensure that link | |
1629 | * partner won't send traffic if port is in XOFF mode. | |
1630 | */ | |
1631 | static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port) | |
1632 | { | |
1633 | u32 timer; | |
1634 | ||
1635 | timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER)) | |
1636 | * FC_QUANTA; | |
1637 | ||
1638 | mvpp22_gop_fca_enable_periodic(port, false); | |
1639 | ||
1640 | mvpp22_gop_fca_set_timer(port, timer); | |
1641 | ||
1642 | mvpp22_gop_fca_enable_periodic(port, true); | |
1643 | } | |
1644 | ||
bb7bbb6e | 1645 | static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface) |
f84bf386 AT |
1646 | { |
1647 | struct mvpp2 *priv = port->priv; | |
1648 | u32 val; | |
1649 | ||
1650 | if (!priv->sysctrl_base) | |
1651 | return 0; | |
1652 | ||
bb7bbb6e | 1653 | switch (interface) { |
1b666016 | 1654 | case PHY_INTERFACE_MODE_MII: |
f84bf386 AT |
1655 | case PHY_INTERFACE_MODE_RGMII: |
1656 | case PHY_INTERFACE_MODE_RGMII_ID: | |
1657 | case PHY_INTERFACE_MODE_RGMII_RXID: | |
1658 | case PHY_INTERFACE_MODE_RGMII_TXID: | |
a9a33202 | 1659 | if (!mvpp2_port_supports_rgmii(port)) |
f84bf386 AT |
1660 | goto invalid_conf; |
1661 | mvpp22_gop_init_rgmii(port); | |
1662 | break; | |
1663 | case PHY_INTERFACE_MODE_SGMII: | |
d97c9f4a | 1664 | case PHY_INTERFACE_MODE_1000BASEX: |
a6fe31de | 1665 | case PHY_INTERFACE_MODE_2500BASEX: |
f84bf386 AT |
1666 | mvpp22_gop_init_sgmii(port); |
1667 | break; | |
4043ec70 | 1668 | case PHY_INTERFACE_MODE_5GBASER: |
e0f909bc | 1669 | case PHY_INTERFACE_MODE_10GBASER: |
a9a33202 | 1670 | if (!mvpp2_port_supports_xlg(port)) |
f84bf386 AT |
1671 | goto invalid_conf; |
1672 | mvpp22_gop_init_10gkr(port); | |
1673 | break; | |
1674 | default: | |
1675 | goto unsupported_conf; | |
1676 | } | |
1677 | ||
1678 | regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); | |
1679 | val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | | |
1680 | GENCONF_PORT_CTRL1_EN(port->gop_id); | |
1681 | regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); | |
1682 | ||
1683 | regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); | |
1684 | val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; | |
1685 | regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); | |
1686 | ||
1687 | regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); | |
1688 | val |= GENCONF_SOFT_RESET1_GOP; | |
1689 | regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); | |
1690 | ||
2788d841 SC |
1691 | mvpp22_gop_fca_set_periodic_timer(port); |
1692 | ||
f84bf386 AT |
1693 | unsupported_conf: |
1694 | return 0; | |
1695 | ||
1696 | invalid_conf: | |
1697 | netdev_err(port->dev, "Invalid port configuration\n"); | |
1698 | return -EINVAL; | |
1699 | } | |
1700 | ||
fd3651b2 AT |
1701 | static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) |
1702 | { | |
1703 | u32 val; | |
1704 | ||
1705 | if (phy_interface_mode_is_rgmii(port->phy_interface) || | |
4a4cec72 RK |
1706 | phy_interface_mode_is_8023z(port->phy_interface) || |
1707 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { | |
fd3651b2 AT |
1708 | /* Enable the GMAC link status irq for this port */ |
1709 | val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); | |
1710 | val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; | |
1711 | writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); | |
1712 | } | |
1713 | ||
a9a33202 | 1714 | if (mvpp2_port_supports_xlg(port)) { |
fd3651b2 AT |
1715 | /* Enable the XLG/GIG irqs for this port */ |
1716 | val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); | |
1d9b041e | 1717 | if (mvpp2_is_xlg(port->phy_interface)) |
fd3651b2 AT |
1718 | val |= MVPP22_XLG_EXT_INT_MASK_XLG; |
1719 | else | |
1720 | val |= MVPP22_XLG_EXT_INT_MASK_GIG; | |
1721 | writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); | |
1722 | } | |
1723 | } | |
1724 | ||
1725 | static void mvpp22_gop_mask_irq(struct mvpp2_port *port) | |
1726 | { | |
1727 | u32 val; | |
1728 | ||
a9a33202 | 1729 | if (mvpp2_port_supports_xlg(port)) { |
fd3651b2 AT |
1730 | val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); |
1731 | val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | | |
a3302baa | 1732 | MVPP22_XLG_EXT_INT_MASK_GIG); |
fd3651b2 AT |
1733 | writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); |
1734 | } | |
1735 | ||
1736 | if (phy_interface_mode_is_rgmii(port->phy_interface) || | |
4a4cec72 RK |
1737 | phy_interface_mode_is_8023z(port->phy_interface) || |
1738 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { | |
fd3651b2 AT |
1739 | val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); |
1740 | val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; | |
1741 | writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); | |
1742 | } | |
1743 | } | |
1744 | ||
1745 | static void mvpp22_gop_setup_irq(struct mvpp2_port *port) | |
1746 | { | |
1747 | u32 val; | |
1748 | ||
f5015a59 RK |
1749 | mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK, |
1750 | MVPP22_GMAC_INT_SUM_MASK_PTP, | |
1751 | MVPP22_GMAC_INT_SUM_MASK_PTP); | |
1752 | ||
bf2fa125 RK |
1753 | if (port->phylink || |
1754 | phy_interface_mode_is_rgmii(port->phy_interface) || | |
4a4cec72 RK |
1755 | phy_interface_mode_is_8023z(port->phy_interface) || |
1756 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { | |
fd3651b2 AT |
1757 | val = readl(port->base + MVPP22_GMAC_INT_MASK); |
1758 | val |= MVPP22_GMAC_INT_MASK_LINK_STAT; | |
1759 | writel(val, port->base + MVPP22_GMAC_INT_MASK); | |
1760 | } | |
1761 | ||
a9a33202 | 1762 | if (mvpp2_port_supports_xlg(port)) { |
fd3651b2 AT |
1763 | val = readl(port->base + MVPP22_XLG_INT_MASK); |
1764 | val |= MVPP22_XLG_INT_MASK_LINK; | |
1765 | writel(val, port->base + MVPP22_XLG_INT_MASK); | |
f5015a59 RK |
1766 | |
1767 | mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK, | |
1768 | MVPP22_XLG_EXT_INT_MASK_PTP, | |
1769 | MVPP22_XLG_EXT_INT_MASK_PTP); | |
fd3651b2 AT |
1770 | } |
1771 | ||
1772 | mvpp22_gop_unmask_irq(port); | |
1773 | } | |
1774 | ||
a6fe31de AT |
1775 | /* Sets the PHY mode of the COMPHY (which configures the serdes lanes). |
1776 | * | |
1777 | * The PHY mode used by the PPv2 driver comes from the network subsystem, while | |
1778 | * the one given to the COMPHY comes from the generic PHY subsystem. Hence they | |
1779 | * differ. | |
1780 | * | |
1781 | * The COMPHY configures the serdes lanes regardless of the actual use of the | |
1782 | * lanes by the physical layer. This is why configurations like | |
1783 | * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. | |
1784 | */ | |
bb7bbb6e MB |
1785 | static int mvpp22_comphy_init(struct mvpp2_port *port, |
1786 | phy_interface_t interface) | |
542897d9 | 1787 | { |
542897d9 AT |
1788 | int ret; |
1789 | ||
1790 | if (!port->comphy) | |
1791 | return 0; | |
1792 | ||
bb7bbb6e | 1793 | ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface); |
542897d9 AT |
1794 | if (ret) |
1795 | return ret; | |
1796 | ||
1797 | return phy_power_on(port->comphy); | |
1798 | } | |
1799 | ||
3f518509 MW |
1800 | static void mvpp2_port_enable(struct mvpp2_port *port) |
1801 | { | |
1802 | u32 val; | |
1803 | ||
a9a33202 RK |
1804 | if (mvpp2_port_supports_xlg(port) && |
1805 | mvpp2_is_xlg(port->phy_interface)) { | |
725757ae | 1806 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); |
649e51d5 | 1807 | val |= MVPP22_XLG_CTRL0_PORT_EN; |
725757ae AT |
1808 | val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; |
1809 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); | |
1810 | } else { | |
1811 | val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); | |
1812 | val |= MVPP2_GMAC_PORT_EN_MASK; | |
1813 | val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; | |
1814 | writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); | |
1815 | } | |
3f518509 MW |
1816 | } |
1817 | ||
1818 | static void mvpp2_port_disable(struct mvpp2_port *port) | |
1819 | { | |
1820 | u32 val; | |
1821 | ||
a9a33202 RK |
1822 | if (mvpp2_port_supports_xlg(port) && |
1823 | mvpp2_is_xlg(port->phy_interface)) { | |
725757ae | 1824 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); |
4bb04326 AT |
1825 | val &= ~MVPP22_XLG_CTRL0_PORT_EN; |
1826 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); | |
725757ae | 1827 | } |
6b10bfc5 AT |
1828 | |
1829 | val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); | |
1830 | val &= ~(MVPP2_GMAC_PORT_EN_MASK); | |
1831 | writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); | |
3f518509 MW |
1832 | } |
1833 | ||
1834 | /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ | |
1835 | static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) | |
1836 | { | |
1837 | u32 val; | |
1838 | ||
1839 | val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & | |
1840 | ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; | |
1841 | writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); | |
1842 | } | |
1843 | ||
1844 | /* Configure loopback port */ | |
4bb04326 AT |
1845 | static void mvpp2_port_loopback_set(struct mvpp2_port *port, |
1846 | const struct phylink_link_state *state) | |
3f518509 MW |
1847 | { |
1848 | u32 val; | |
1849 | ||
1850 | val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); | |
1851 | ||
4bb04326 | 1852 | if (state->speed == 1000) |
3f518509 MW |
1853 | val |= MVPP2_GMAC_GMII_LB_EN_MASK; |
1854 | else | |
1855 | val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; | |
1856 | ||
94bfe438 RK |
1857 | if (phy_interface_mode_is_8023z(state->interface) || |
1858 | state->interface == PHY_INTERFACE_MODE_SGMII) | |
3f518509 MW |
1859 | val |= MVPP2_GMAC_PCS_LB_EN_MASK; |
1860 | else | |
1861 | val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; | |
1862 | ||
1863 | writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); | |
1864 | } | |
1865 | ||
39b96315 SA |
1866 | enum { |
1867 | ETHTOOL_XDP_REDIRECT, | |
1868 | ETHTOOL_XDP_PASS, | |
1869 | ETHTOOL_XDP_DROP, | |
1870 | ETHTOOL_XDP_TX, | |
1871 | ETHTOOL_XDP_TX_ERR, | |
1872 | ETHTOOL_XDP_XMIT, | |
1873 | ETHTOOL_XDP_XMIT_ERR, | |
1874 | }; | |
1875 | ||
118d6298 MR |
1876 | struct mvpp2_ethtool_counter { |
1877 | unsigned int offset; | |
1878 | const char string[ETH_GSTRING_LEN]; | |
1879 | bool reg_is_64b; | |
1880 | }; | |
1881 | ||
1882 | static u64 mvpp2_read_count(struct mvpp2_port *port, | |
1883 | const struct mvpp2_ethtool_counter *counter) | |
1884 | { | |
1885 | u64 val; | |
1886 | ||
1887 | val = readl(port->stats_base + counter->offset); | |
1888 | if (counter->reg_is_64b) | |
1889 | val += (u64)readl(port->stats_base + counter->offset + 4) << 32; | |
1890 | ||
1891 | return val; | |
1892 | } | |
1893 | ||
9bea6897 MC |
1894 | /* Some counters are accessed indirectly by first writing an index to |
1895 | * MVPP2_CTRS_IDX. The index can represent various resources depending on the | |
1896 | * register we access, it can be a hit counter for some classification tables, | |
1897 | * a counter specific to a rxq, a txq or a buffer pool. | |
1898 | */ | |
1899 | static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg) | |
1900 | { | |
1901 | mvpp2_write(priv, MVPP2_CTRS_IDX, index); | |
1902 | return mvpp2_read(priv, reg); | |
1903 | } | |
1904 | ||
118d6298 MR |
1905 | /* Due to the fact that software statistics and hardware statistics are, by |
1906 | * design, incremented at different moments in the chain of packet processing, | |
1907 | * it is very likely that incoming packets could have been dropped after being | |
1908 | * counted by hardware but before reaching software statistics (most probably | |
e34be16b | 1909 | * multicast packets), and in the opposite way, during transmission, FCS bytes |
118d6298 MR |
1910 | * are added in between as well as TSO skb will be split and header bytes added. |
1911 | * Hence, statistics gathered from userspace with ifconfig (software) and | |
1912 | * ethtool (hardware) cannot be compared. | |
1913 | */ | |
f9fa96b9 | 1914 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = { |
118d6298 MR |
1915 | { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, |
1916 | { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, | |
1917 | { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, | |
1918 | { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, | |
1919 | { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, | |
1920 | { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, | |
1921 | { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, | |
1922 | { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, | |
1923 | { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, | |
1924 | { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, | |
1925 | { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, | |
1926 | { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, | |
1927 | { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, | |
1928 | { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, | |
1929 | { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, | |
1930 | { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, | |
1931 | { MVPP2_MIB_FC_SENT, "fc_sent" }, | |
1932 | { MVPP2_MIB_FC_RCVD, "fc_received" }, | |
1933 | { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, | |
1934 | { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, | |
1935 | { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, | |
1936 | { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, | |
1937 | { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, | |
1938 | { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, | |
1939 | { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, | |
1940 | { MVPP2_MIB_COLLISION, "collision" }, | |
1941 | { MVPP2_MIB_LATE_COLLISION, "late_collision" }, | |
1942 | }; | |
1943 | ||
9bea6897 MC |
1944 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = { |
1945 | { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" }, | |
1946 | { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" }, | |
1947 | }; | |
1948 | ||
1949 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = { | |
1950 | { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" }, | |
1951 | { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" }, | |
1952 | { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" }, | |
1953 | { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" }, | |
1954 | { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" }, | |
1955 | { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" }, | |
1956 | { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" }, | |
1957 | { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" }, | |
1958 | { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" }, | |
1959 | }; | |
1960 | ||
1961 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = { | |
1962 | { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" }, | |
1963 | { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" }, | |
1964 | { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" }, | |
1965 | { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" }, | |
1966 | }; | |
1967 | ||
39b96315 SA |
1968 | static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = { |
1969 | { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", }, | |
1970 | { ETHTOOL_XDP_PASS, "rx_xdp_pass", }, | |
1971 | { ETHTOOL_XDP_DROP, "rx_xdp_drop", }, | |
1972 | { ETHTOOL_XDP_TX, "rx_xdp_tx", }, | |
1973 | { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", }, | |
1974 | { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", }, | |
1975 | { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", }, | |
1976 | }; | |
1977 | ||
9bea6897 MC |
1978 | #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \ |
1979 | ARRAY_SIZE(mvpp2_ethtool_port_regs) + \ | |
1980 | (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \ | |
39b96315 SA |
1981 | (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \ |
1982 | ARRAY_SIZE(mvpp2_ethtool_xdp)) | |
9bea6897 | 1983 | |
118d6298 MR |
1984 | static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, |
1985 | u8 *data) | |
1986 | { | |
9bea6897 | 1987 | struct mvpp2_port *port = netdev_priv(netdev); |
2d7dfe2d | 1988 | const char *str; |
9bea6897 | 1989 | int i, q; |
118d6298 | 1990 | |
9bea6897 MC |
1991 | if (sset != ETH_SS_STATS) |
1992 | return; | |
1993 | ||
2d7dfe2d RP |
1994 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) |
1995 | ethtool_puts(&data, mvpp2_ethtool_mib_regs[i].string); | |
9bea6897 | 1996 | |
2d7dfe2d RP |
1997 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) |
1998 | ethtool_puts(&data, mvpp2_ethtool_port_regs[i].string); | |
9bea6897 | 1999 | |
2d7dfe2d | 2000 | for (q = 0; q < port->ntxqs; q++) |
9bea6897 | 2001 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) { |
2d7dfe2d RP |
2002 | str = mvpp2_ethtool_txq_regs[i].string; |
2003 | ethtool_sprintf(&data, str, q); | |
9bea6897 | 2004 | } |
9bea6897 | 2005 | |
2d7dfe2d | 2006 | for (q = 0; q < port->nrxqs; q++) |
9bea6897 | 2007 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) { |
2d7dfe2d RP |
2008 | str = mvpp2_ethtool_rxq_regs[i].string; |
2009 | ethtool_sprintf(&data, str, q); | |
9bea6897 | 2010 | } |
39b96315 | 2011 | |
2d7dfe2d RP |
2012 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) |
2013 | ethtool_puts(&data, mvpp2_ethtool_xdp[i].string); | |
39b96315 SA |
2014 | } |
2015 | ||
2016 | static void | |
2017 | mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) | |
2018 | { | |
2019 | unsigned int start; | |
2020 | unsigned int cpu; | |
2021 | ||
2022 | /* Gather XDP Statistics */ | |
2023 | for_each_possible_cpu(cpu) { | |
2024 | struct mvpp2_pcpu_stats *cpu_stats; | |
2025 | u64 xdp_redirect; | |
2026 | u64 xdp_pass; | |
2027 | u64 xdp_drop; | |
2028 | u64 xdp_xmit; | |
2029 | u64 xdp_xmit_err; | |
2030 | u64 xdp_tx; | |
2031 | u64 xdp_tx_err; | |
2032 | ||
2033 | cpu_stats = per_cpu_ptr(port->stats, cpu); | |
2034 | do { | |
068c38ad | 2035 | start = u64_stats_fetch_begin(&cpu_stats->syncp); |
39b96315 SA |
2036 | xdp_redirect = cpu_stats->xdp_redirect; |
2037 | xdp_pass = cpu_stats->xdp_pass; | |
2038 | xdp_drop = cpu_stats->xdp_drop; | |
2039 | xdp_xmit = cpu_stats->xdp_xmit; | |
2040 | xdp_xmit_err = cpu_stats->xdp_xmit_err; | |
2041 | xdp_tx = cpu_stats->xdp_tx; | |
2042 | xdp_tx_err = cpu_stats->xdp_tx_err; | |
068c38ad | 2043 | } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); |
39b96315 SA |
2044 | |
2045 | xdp_stats->xdp_redirect += xdp_redirect; | |
2046 | xdp_stats->xdp_pass += xdp_pass; | |
2047 | xdp_stats->xdp_drop += xdp_drop; | |
2048 | xdp_stats->xdp_xmit += xdp_xmit; | |
2049 | xdp_stats->xdp_xmit_err += xdp_xmit_err; | |
2050 | xdp_stats->xdp_tx += xdp_tx; | |
2051 | xdp_stats->xdp_tx_err += xdp_tx_err; | |
2052 | } | |
118d6298 MR |
2053 | } |
2054 | ||
9bea6897 MC |
2055 | static void mvpp2_read_stats(struct mvpp2_port *port) |
2056 | { | |
39b96315 SA |
2057 | struct mvpp2_pcpu_stats xdp_stats = {}; |
2058 | const struct mvpp2_ethtool_counter *s; | |
9bea6897 MC |
2059 | u64 *pstats; |
2060 | int i, q; | |
2061 | ||
2062 | pstats = port->ethtool_stats; | |
2063 | ||
2064 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) | |
2065 | *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]); | |
2066 | ||
2067 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) | |
2068 | *pstats++ += mvpp2_read(port->priv, | |
2069 | mvpp2_ethtool_port_regs[i].offset + | |
2070 | 4 * port->id); | |
2071 | ||
2072 | for (q = 0; q < port->ntxqs; q++) | |
2073 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) | |
2074 | *pstats++ += mvpp2_read_index(port->priv, | |
cc970925 | 2075 | MVPP22_CTRS_TX_CTR(port->id, q), |
9bea6897 MC |
2076 | mvpp2_ethtool_txq_regs[i].offset); |
2077 | ||
2078 | /* Rxqs are numbered from 0 from the user standpoint, but not from the | |
2079 | * driver's. We need to add the port->first_rxq offset. | |
2080 | */ | |
2081 | for (q = 0; q < port->nrxqs; q++) | |
2082 | for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) | |
2083 | *pstats++ += mvpp2_read_index(port->priv, | |
cc970925 | 2084 | port->first_rxq + q, |
9bea6897 | 2085 | mvpp2_ethtool_rxq_regs[i].offset); |
39b96315 SA |
2086 | |
2087 | /* Gather XDP Statistics */ | |
2088 | mvpp2_get_xdp_stats(port, &xdp_stats); | |
2089 | ||
2090 | for (i = 0, s = mvpp2_ethtool_xdp; | |
2091 | s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp); | |
2092 | s++, i++) { | |
2093 | switch (s->offset) { | |
2094 | case ETHTOOL_XDP_REDIRECT: | |
2095 | *pstats++ = xdp_stats.xdp_redirect; | |
2096 | break; | |
2097 | case ETHTOOL_XDP_PASS: | |
2098 | *pstats++ = xdp_stats.xdp_pass; | |
2099 | break; | |
2100 | case ETHTOOL_XDP_DROP: | |
2101 | *pstats++ = xdp_stats.xdp_drop; | |
2102 | break; | |
2103 | case ETHTOOL_XDP_TX: | |
2104 | *pstats++ = xdp_stats.xdp_tx; | |
2105 | break; | |
2106 | case ETHTOOL_XDP_TX_ERR: | |
2107 | *pstats++ = xdp_stats.xdp_tx_err; | |
2108 | break; | |
2109 | case ETHTOOL_XDP_XMIT: | |
2110 | *pstats++ = xdp_stats.xdp_xmit; | |
2111 | break; | |
2112 | case ETHTOOL_XDP_XMIT_ERR: | |
2113 | *pstats++ = xdp_stats.xdp_xmit_err; | |
2114 | break; | |
2115 | } | |
2116 | } | |
9bea6897 MC |
2117 | } |
2118 | ||
118d6298 MR |
2119 | static void mvpp2_gather_hw_statistics(struct work_struct *work) |
2120 | { | |
2121 | struct delayed_work *del_work = to_delayed_work(work); | |
e5c500eb MR |
2122 | struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, |
2123 | stats_work); | |
118d6298 | 2124 | |
e5c500eb | 2125 | mutex_lock(&port->gather_stats_lock); |
118d6298 | 2126 | |
9bea6897 | 2127 | mvpp2_read_stats(port); |
118d6298 MR |
2128 | |
2129 | /* No need to read again the counters right after this function if it | |
2130 | * was called asynchronously by the user (ie. use of ethtool). | |
2131 | */ | |
e5c500eb MR |
2132 | cancel_delayed_work(&port->stats_work); |
2133 | queue_delayed_work(port->priv->stats_queue, &port->stats_work, | |
118d6298 MR |
2134 | MVPP2_MIB_COUNTERS_STATS_DELAY); |
2135 | ||
e5c500eb | 2136 | mutex_unlock(&port->gather_stats_lock); |
118d6298 MR |
2137 | } |
2138 | ||
2139 | static void mvpp2_ethtool_get_stats(struct net_device *dev, | |
2140 | struct ethtool_stats *stats, u64 *data) | |
2141 | { | |
2142 | struct mvpp2_port *port = netdev_priv(dev); | |
2143 | ||
e5c500eb MR |
2144 | /* Update statistics for the given port, then take the lock to avoid |
2145 | * concurrent accesses on the ethtool_stats structure during its copy. | |
2146 | */ | |
2147 | mvpp2_gather_hw_statistics(&port->stats_work.work); | |
118d6298 | 2148 | |
e5c500eb | 2149 | mutex_lock(&port->gather_stats_lock); |
118d6298 | 2150 | memcpy(data, port->ethtool_stats, |
9bea6897 | 2151 | sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs)); |
e5c500eb | 2152 | mutex_unlock(&port->gather_stats_lock); |
118d6298 MR |
2153 | } |
2154 | ||
2155 | static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) | |
2156 | { | |
9bea6897 MC |
2157 | struct mvpp2_port *port = netdev_priv(dev); |
2158 | ||
118d6298 | 2159 | if (sset == ETH_SS_STATS) |
9bea6897 | 2160 | return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs); |
118d6298 MR |
2161 | |
2162 | return -EOPNOTSUPP; | |
2163 | } | |
2164 | ||
649e51d5 | 2165 | static void mvpp2_mac_reset_assert(struct mvpp2_port *port) |
3f518509 | 2166 | { |
649e51d5 | 2167 | u32 val; |
118d6298 | 2168 | |
316734fd RK |
2169 | val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | |
2170 | MVPP2_GMAC_PORT_RESET_MASK; | |
3f518509 | 2171 | writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); |
649e51d5 | 2172 | |
f704177e | 2173 | if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) { |
649e51d5 AT |
2174 | val = readl(port->base + MVPP22_XLG_CTRL0_REG) & |
2175 | ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; | |
2176 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); | |
2177 | } | |
3f518509 MW |
2178 | } |
2179 | ||
7409e66e AT |
2180 | static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) |
2181 | { | |
2182 | struct mvpp2 *priv = port->priv; | |
2183 | void __iomem *mpcs, *xpcs; | |
2184 | u32 val; | |
2185 | ||
60dcd6b7 | 2186 | if (port->priv->hw_version == MVPP21 || port->gop_id != 0) |
7409e66e AT |
2187 | return; |
2188 | ||
2189 | mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); | |
2190 | xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); | |
2191 | ||
2192 | val = readl(mpcs + MVPP22_MPCS_CLK_RESET); | |
2193 | val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); | |
2194 | val |= MVPP22_MPCS_CLK_RESET_DIV_SET; | |
2195 | writel(val, mpcs + MVPP22_MPCS_CLK_RESET); | |
2196 | ||
2197 | val = readl(xpcs + MVPP22_XPCS_CFG0); | |
2198 | writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); | |
2199 | } | |
2200 | ||
bb7bbb6e MB |
2201 | static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port, |
2202 | phy_interface_t interface) | |
7409e66e AT |
2203 | { |
2204 | struct mvpp2 *priv = port->priv; | |
2205 | void __iomem *mpcs, *xpcs; | |
2206 | u32 val; | |
2207 | ||
60dcd6b7 | 2208 | if (port->priv->hw_version == MVPP21 || port->gop_id != 0) |
7409e66e AT |
2209 | return; |
2210 | ||
2211 | mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); | |
2212 | xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); | |
2213 | ||
bb7bbb6e | 2214 | switch (interface) { |
4043ec70 | 2215 | case PHY_INTERFACE_MODE_5GBASER: |
e0f909bc | 2216 | case PHY_INTERFACE_MODE_10GBASER: |
7409e66e AT |
2217 | val = readl(mpcs + MVPP22_MPCS_CLK_RESET); |
2218 | val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | | |
2219 | MAC_CLK_RESET_SD_TX; | |
2220 | val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; | |
2221 | writel(val, mpcs + MVPP22_MPCS_CLK_RESET); | |
2222 | break; | |
2223 | case PHY_INTERFACE_MODE_XAUI: | |
2224 | case PHY_INTERFACE_MODE_RXAUI: | |
2225 | val = readl(xpcs + MVPP22_XPCS_CFG0); | |
2226 | writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); | |
2227 | break; | |
2228 | default: | |
2229 | break; | |
2230 | } | |
2231 | } | |
2232 | ||
3f518509 MW |
2233 | /* Change maximum receive size of the port */ |
2234 | static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) | |
2235 | { | |
2236 | u32 val; | |
2237 | ||
2238 | val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); | |
2239 | val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; | |
2240 | val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << | |
2241 | MVPP2_GMAC_MAX_RX_SIZE_OFFS); | |
2242 | writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); | |
2243 | } | |
2244 | ||
76eb1b1d SC |
2245 | /* Change maximum receive size of the port */ |
2246 | static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) | |
2247 | { | |
2248 | u32 val; | |
2249 | ||
2250 | val = readl(port->base + MVPP22_XLG_CTRL1_REG); | |
2251 | val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; | |
2252 | val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << | |
ec15ecde | 2253 | MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; |
76eb1b1d SC |
2254 | writel(val, port->base + MVPP22_XLG_CTRL1_REG); |
2255 | } | |
2256 | ||
3f518509 MW |
2257 | /* Set defaults to the MVPP2 port */ |
2258 | static void mvpp2_defaults_set(struct mvpp2_port *port) | |
2259 | { | |
21808437 | 2260 | int tx_port_num, val, queue, lrxq; |
3f518509 | 2261 | |
3d9017d9 | 2262 | if (port->priv->hw_version == MVPP21) { |
3d9017d9 TP |
2263 | /* Update TX FIFO MIN Threshold */ |
2264 | val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); | |
2265 | val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; | |
2266 | /* Min. TX threshold must be less than minimal packet length */ | |
2267 | val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); | |
2268 | writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); | |
2269 | } | |
3f518509 MW |
2270 | |
2271 | /* Disable Legacy WRR, Disable EJP, Release from reset */ | |
2272 | tx_port_num = mvpp2_egress_port(port); | |
2273 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, | |
2274 | tx_port_num); | |
2275 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); | |
2276 | ||
4251ea5b MC |
2277 | /* Set TXQ scheduling to Round-Robin */ |
2278 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); | |
2279 | ||
3f518509 | 2280 | /* Close bandwidth for all queues */ |
21808437 | 2281 | for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) |
3f518509 | 2282 | mvpp2_write(port->priv, |
21808437 | 2283 | MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0); |
3f518509 MW |
2284 | |
2285 | /* Set refill period to 1 usec, refill tokens | |
2286 | * and bucket size to maximum | |
2287 | */ | |
2288 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, | |
2289 | port->priv->tclk / USEC_PER_SEC); | |
2290 | val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); | |
2291 | val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; | |
2292 | val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); | |
2293 | val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; | |
2294 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); | |
2295 | val = MVPP2_TXP_TOKEN_SIZE_MAX; | |
2296 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); | |
2297 | ||
2298 | /* Set MaximumLowLatencyPacketSize value to 256 */ | |
2299 | mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), | |
2300 | MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | | |
2301 | MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); | |
2302 | ||
2303 | /* Enable Rx cache snoop */ | |
09f83975 | 2304 | for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { |
3f518509 MW |
2305 | queue = port->rxqs[lrxq]->id; |
2306 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); | |
2307 | val |= MVPP2_SNOOP_PKT_SIZE_MASK | | |
2308 | MVPP2_SNOOP_BUF_HDR_MASK; | |
2309 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); | |
2310 | } | |
2311 | ||
2312 | /* At default, mask all interrupts to all present cpus */ | |
2313 | mvpp2_interrupts_disable(port); | |
2314 | } | |
2315 | ||
2316 | /* Enable/disable receiving packets */ | |
2317 | static void mvpp2_ingress_enable(struct mvpp2_port *port) | |
2318 | { | |
2319 | u32 val; | |
2320 | int lrxq, queue; | |
2321 | ||
09f83975 | 2322 | for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { |
3f518509 MW |
2323 | queue = port->rxqs[lrxq]->id; |
2324 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); | |
2325 | val &= ~MVPP2_RXQ_DISABLE_MASK; | |
2326 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); | |
2327 | } | |
2328 | } | |
2329 | ||
2330 | static void mvpp2_ingress_disable(struct mvpp2_port *port) | |
2331 | { | |
2332 | u32 val; | |
2333 | int lrxq, queue; | |
2334 | ||
09f83975 | 2335 | for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { |
3f518509 MW |
2336 | queue = port->rxqs[lrxq]->id; |
2337 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); | |
2338 | val |= MVPP2_RXQ_DISABLE_MASK; | |
2339 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); | |
2340 | } | |
2341 | } | |
2342 | ||
2343 | /* Enable transmit via physical egress queue | |
2344 | * - HW starts take descriptors from DRAM | |
2345 | */ | |
2346 | static void mvpp2_egress_enable(struct mvpp2_port *port) | |
2347 | { | |
2348 | u32 qmap; | |
2349 | int queue; | |
2350 | int tx_port_num = mvpp2_egress_port(port); | |
2351 | ||
2352 | /* Enable all initialized TXs. */ | |
2353 | qmap = 0; | |
09f83975 | 2354 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
2355 | struct mvpp2_tx_queue *txq = port->txqs[queue]; |
2356 | ||
dbbb2f03 | 2357 | if (txq->descs) |
3f518509 MW |
2358 | qmap |= (1 << queue); |
2359 | } | |
2360 | ||
2361 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
2362 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); | |
2363 | } | |
2364 | ||
2365 | /* Disable transmit via physical egress queue | |
2366 | * - HW doesn't take descriptors from DRAM | |
2367 | */ | |
2368 | static void mvpp2_egress_disable(struct mvpp2_port *port) | |
2369 | { | |
2370 | u32 reg_data; | |
2371 | int delay; | |
2372 | int tx_port_num = mvpp2_egress_port(port); | |
2373 | ||
2374 | /* Issue stop command for active channels only */ | |
2375 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
2376 | reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & | |
2377 | MVPP2_TXP_SCHED_ENQ_MASK; | |
2378 | if (reg_data != 0) | |
2379 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, | |
2380 | (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); | |
2381 | ||
2382 | /* Wait for all Tx activity to terminate. */ | |
2383 | delay = 0; | |
2384 | do { | |
2385 | if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { | |
2386 | netdev_warn(port->dev, | |
2387 | "Tx stop timed out, status=0x%08x\n", | |
2388 | reg_data); | |
2389 | break; | |
2390 | } | |
2391 | mdelay(1); | |
2392 | delay++; | |
2393 | ||
2394 | /* Check port TX Command register that all | |
2395 | * Tx queues are stopped | |
2396 | */ | |
2397 | reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); | |
2398 | } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); | |
2399 | } | |
2400 | ||
2401 | /* Rx descriptors helper methods */ | |
2402 | ||
2403 | /* Get number of Rx descriptors occupied by received packets */ | |
2404 | static inline int | |
2405 | mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) | |
2406 | { | |
2407 | u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); | |
2408 | ||
2409 | return val & MVPP2_RXQ_OCCUPIED_MASK; | |
2410 | } | |
2411 | ||
2412 | /* Update Rx queue status with the number of occupied and available | |
2413 | * Rx descriptor slots. | |
2414 | */ | |
2415 | static inline void | |
2416 | mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, | |
2417 | int used_count, int free_count) | |
2418 | { | |
2419 | /* Decrement the number of used descriptors and increment count | |
2420 | * increment the number of free descriptors. | |
2421 | */ | |
2422 | u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); | |
2423 | ||
2424 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); | |
2425 | } | |
2426 | ||
2427 | /* Get pointer to next RX descriptor to be processed by SW */ | |
2428 | static inline struct mvpp2_rx_desc * | |
2429 | mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) | |
2430 | { | |
2431 | int rx_desc = rxq->next_desc_to_proc; | |
2432 | ||
2433 | rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); | |
2434 | prefetch(rxq->descs + rxq->next_desc_to_proc); | |
2435 | return rxq->descs + rx_desc; | |
2436 | } | |
2437 | ||
2438 | /* Set rx queue offset */ | |
2439 | static void mvpp2_rxq_offset_set(struct mvpp2_port *port, | |
2440 | int prxq, int offset) | |
2441 | { | |
2442 | u32 val; | |
2443 | ||
2444 | /* Convert offset from bytes to units of 32 bytes */ | |
2445 | offset = offset >> 5; | |
2446 | ||
2447 | val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); | |
2448 | val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; | |
2449 | ||
2450 | /* Offset is in */ | |
2451 | val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & | |
2452 | MVPP2_RXQ_PACKET_OFFSET_MASK); | |
2453 | ||
2454 | mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); | |
2455 | } | |
2456 | ||
3f518509 MW |
2457 | /* Tx descriptors helper methods */ |
2458 | ||
3f518509 MW |
2459 | /* Get pointer to next Tx descriptor to be processed (send) by HW */ |
2460 | static struct mvpp2_tx_desc * | |
2461 | mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) | |
2462 | { | |
2463 | int tx_desc = txq->next_desc_to_proc; | |
2464 | ||
2465 | txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); | |
2466 | return txq->descs + tx_desc; | |
2467 | } | |
2468 | ||
e0af22d9 TP |
2469 | /* Update HW with number of aggregated Tx descriptors to be sent |
2470 | * | |
2471 | * Called only from mvpp2_tx(), so migration is disabled, using | |
2472 | * smp_processor_id() is OK. | |
2473 | */ | |
3f518509 MW |
2474 | static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) |
2475 | { | |
2476 | /* aggregated access - relevant TXQ number is written in TX desc */ | |
1068549c | 2477 | mvpp2_thread_write(port->priv, |
e531f767 | 2478 | mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
a786841d | 2479 | MVPP2_AGGR_TXQ_UPDATE_REG, pending); |
3f518509 MW |
2480 | } |
2481 | ||
3f518509 MW |
2482 | /* Check if there are enough free descriptors in aggregated txq. |
2483 | * If not, update the number of occupied descriptors and repeat the check. | |
e0af22d9 TP |
2484 | * |
2485 | * Called only from mvpp2_tx(), so migration is disabled, using | |
2486 | * smp_processor_id() is OK. | |
3f518509 | 2487 | */ |
e531f767 | 2488 | static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, |
3f518509 MW |
2489 | struct mvpp2_tx_queue *aggr_txq, int num) |
2490 | { | |
02856a3b | 2491 | if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { |
3f518509 | 2492 | /* Update number of occupied aggregated Tx descriptors */ |
e531f767 AT |
2493 | unsigned int thread = |
2494 | mvpp2_cpu_to_thread(port->priv, smp_processor_id()); | |
2495 | u32 val = mvpp2_read_relaxed(port->priv, | |
543ec376 | 2496 | MVPP2_AGGR_TXQ_STATUS_REG(thread)); |
3f518509 MW |
2497 | |
2498 | aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; | |
3f518509 | 2499 | |
914365f1 YM |
2500 | if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) |
2501 | return -ENOMEM; | |
2502 | } | |
3f518509 MW |
2503 | return 0; |
2504 | } | |
2505 | ||
e0af22d9 TP |
2506 | /* Reserved Tx descriptors allocation request |
2507 | * | |
2508 | * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called | |
2509 | * only by mvpp2_tx(), so migration is disabled, using | |
2510 | * smp_processor_id() is OK. | |
2511 | */ | |
e531f767 | 2512 | static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, |
3f518509 MW |
2513 | struct mvpp2_tx_queue *txq, int num) |
2514 | { | |
e531f767 AT |
2515 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
2516 | struct mvpp2 *priv = port->priv; | |
3f518509 MW |
2517 | u32 val; |
2518 | ||
2519 | val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; | |
1068549c | 2520 | mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val); |
3f518509 | 2521 | |
1068549c | 2522 | val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG); |
3f518509 MW |
2523 | |
2524 | return val & MVPP2_TXQ_RSVD_RSLT_MASK; | |
2525 | } | |
2526 | ||
2527 | /* Check if there are enough reserved descriptors for transmission. | |
2528 | * If not, request chunk of reserved descriptors and check again. | |
2529 | */ | |
074c74df | 2530 | static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, |
3f518509 MW |
2531 | struct mvpp2_tx_queue *txq, |
2532 | struct mvpp2_txq_pcpu *txq_pcpu, | |
2533 | int num) | |
2534 | { | |
850623b3 | 2535 | int req, desc_count; |
074c74df | 2536 | unsigned int thread; |
3f518509 MW |
2537 | |
2538 | if (txq_pcpu->reserved_num >= num) | |
2539 | return 0; | |
2540 | ||
2541 | /* Not enough descriptors reserved! Update the reserved descriptor | |
2542 | * count and check again. | |
2543 | */ | |
2544 | ||
2545 | desc_count = 0; | |
2546 | /* Compute total of used descriptors */ | |
e531f767 | 2547 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
3f518509 MW |
2548 | struct mvpp2_txq_pcpu *txq_pcpu_aux; |
2549 | ||
074c74df | 2550 | txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); |
3f518509 MW |
2551 | desc_count += txq_pcpu_aux->count; |
2552 | desc_count += txq_pcpu_aux->reserved_num; | |
2553 | } | |
2554 | ||
2555 | req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); | |
2556 | desc_count += req; | |
2557 | ||
2558 | if (desc_count > | |
074c74df | 2559 | (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) |
3f518509 MW |
2560 | return -ENOMEM; |
2561 | ||
e531f767 | 2562 | txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req); |
3f518509 | 2563 | |
a3302baa | 2564 | /* OK, the descriptor could have been updated: check again. */ |
3f518509 MW |
2565 | if (txq_pcpu->reserved_num < num) |
2566 | return -ENOMEM; | |
2567 | return 0; | |
2568 | } | |
2569 | ||
2570 | /* Release the last allocated Tx descriptor. Useful to handle DMA | |
2571 | * mapping failures in the Tx path. | |
2572 | */ | |
2573 | static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) | |
2574 | { | |
2575 | if (txq->next_desc_to_proc == 0) | |
2576 | txq->next_desc_to_proc = txq->last_desc - 1; | |
2577 | else | |
2578 | txq->next_desc_to_proc--; | |
2579 | } | |
2580 | ||
2581 | /* Set Tx descriptors fields relevant for CSUM calculation */ | |
35f3625c | 2582 | static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, |
3f518509 MW |
2583 | int ip_hdr_len, int l4_proto) |
2584 | { | |
2585 | u32 command; | |
2586 | ||
2587 | /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, | |
2588 | * G_L4_chk, L4_type required only for checksum calculation | |
2589 | */ | |
2590 | command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); | |
2591 | command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); | |
2592 | command |= MVPP2_TXD_IP_CSUM_DISABLE; | |
2593 | ||
dc734dbe | 2594 | if (l3_proto == htons(ETH_P_IP)) { |
3f518509 MW |
2595 | command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ |
2596 | command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ | |
2597 | } else { | |
2598 | command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ | |
2599 | } | |
2600 | ||
2601 | if (l4_proto == IPPROTO_TCP) { | |
2602 | command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ | |
2603 | command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ | |
2604 | } else if (l4_proto == IPPROTO_UDP) { | |
2605 | command |= MVPP2_TXD_L4_UDP; /* enable UDP */ | |
2606 | command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ | |
2607 | } else { | |
2608 | command |= MVPP2_TXD_L4_CSUM_NOT; | |
2609 | } | |
2610 | ||
2611 | return command; | |
2612 | } | |
2613 | ||
2614 | /* Get number of sent descriptors and decrement counter. | |
2615 | * The number of sent descriptors is returned. | |
543ec376 | 2616 | * Per-thread access |
e0af22d9 TP |
2617 | * |
2618 | * Called only from mvpp2_txq_done(), called from mvpp2_tx() | |
2619 | * (migration disabled) and from the TX completion tasklet (migration | |
2620 | * disabled) so using smp_processor_id() is OK. | |
3f518509 MW |
2621 | */ |
2622 | static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, | |
2623 | struct mvpp2_tx_queue *txq) | |
2624 | { | |
2625 | u32 val; | |
2626 | ||
2627 | /* Reading status reg resets transmitted descriptor counter */ | |
1068549c | 2628 | val = mvpp2_thread_read_relaxed(port->priv, |
e531f767 | 2629 | mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
cdcfeb0f | 2630 | MVPP2_TXQ_SENT_REG(txq->id)); |
3f518509 MW |
2631 | |
2632 | return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> | |
2633 | MVPP2_TRANSMITTED_COUNT_OFFSET; | |
2634 | } | |
2635 | ||
e0af22d9 TP |
2636 | /* Called through on_each_cpu(), so runs on all CPUs, with migration |
2637 | * disabled, therefore using smp_processor_id() is OK. | |
2638 | */ | |
3f518509 MW |
2639 | static void mvpp2_txq_sent_counter_clear(void *arg) |
2640 | { | |
2641 | struct mvpp2_port *port = arg; | |
2642 | int queue; | |
2643 | ||
e531f767 | 2644 | /* If the thread isn't used, don't do anything */ |
7867299c | 2645 | if (smp_processor_id() >= port->priv->nthreads) |
e531f767 AT |
2646 | return; |
2647 | ||
09f83975 | 2648 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
2649 | int id = port->txqs[queue]->id; |
2650 | ||
1068549c | 2651 | mvpp2_thread_read(port->priv, |
e531f767 | 2652 | mvpp2_cpu_to_thread(port->priv, smp_processor_id()), |
a786841d | 2653 | MVPP2_TXQ_SENT_REG(id)); |
3f518509 MW |
2654 | } |
2655 | } | |
2656 | ||
2657 | /* Set max sizes for Tx queues */ | |
2658 | static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) | |
2659 | { | |
2660 | u32 val, size, mtu; | |
2661 | int txq, tx_port_num; | |
2662 | ||
2663 | mtu = port->pkt_size * 8; | |
2664 | if (mtu > MVPP2_TXP_MTU_MAX) | |
2665 | mtu = MVPP2_TXP_MTU_MAX; | |
2666 | ||
2667 | /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ | |
2668 | mtu = 3 * mtu; | |
2669 | ||
2670 | /* Indirect access to registers */ | |
2671 | tx_port_num = mvpp2_egress_port(port); | |
2672 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
2673 | ||
2674 | /* Set MTU */ | |
2675 | val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); | |
2676 | val &= ~MVPP2_TXP_MTU_MAX; | |
2677 | val |= mtu; | |
2678 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); | |
2679 | ||
2680 | /* TXP token size and all TXQs token size must be larger that MTU */ | |
2681 | val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); | |
2682 | size = val & MVPP2_TXP_TOKEN_SIZE_MAX; | |
2683 | if (size < mtu) { | |
2684 | size = mtu; | |
2685 | val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; | |
2686 | val |= size; | |
2687 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); | |
2688 | } | |
2689 | ||
09f83975 | 2690 | for (txq = 0; txq < port->ntxqs; txq++) { |
3f518509 MW |
2691 | val = mvpp2_read(port->priv, |
2692 | MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); | |
2693 | size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; | |
2694 | ||
2695 | if (size < mtu) { | |
2696 | size = mtu; | |
2697 | val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; | |
2698 | val |= size; | |
2699 | mvpp2_write(port->priv, | |
2700 | MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), | |
2701 | val); | |
2702 | } | |
2703 | } | |
2704 | } | |
2705 | ||
bf270fa3 SC |
2706 | /* Set the number of non-occupied descriptors threshold */ |
2707 | static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port, | |
2708 | struct mvpp2_rx_queue *rxq) | |
2709 | { | |
2710 | u32 val; | |
2711 | ||
2712 | mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); | |
2713 | ||
2714 | val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG); | |
2715 | val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK; | |
2716 | val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET; | |
2717 | mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); | |
2718 | } | |
2719 | ||
3f518509 MW |
2720 | /* Set the number of packets that will be received before Rx interrupt |
2721 | * will be generated by HW. | |
2722 | */ | |
2723 | static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, | |
d63f9e41 | 2724 | struct mvpp2_rx_queue *rxq) |
3f518509 | 2725 | { |
e531f767 | 2726 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
a786841d | 2727 | |
f8b0d5f8 TP |
2728 | if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) |
2729 | rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; | |
3f518509 | 2730 | |
1068549c AT |
2731 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); |
2732 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, | |
a786841d | 2733 | rxq->pkts_coal); |
a704bb5c TP |
2734 | |
2735 | put_cpu(); | |
3f518509 MW |
2736 | } |
2737 | ||
213f428f TP |
2738 | /* For some reason in the LSP this is done on each CPU. Why ? */ |
2739 | static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, | |
2740 | struct mvpp2_tx_queue *txq) | |
2741 | { | |
4f374d2c | 2742 | unsigned int thread; |
213f428f TP |
2743 | u32 val; |
2744 | ||
2745 | if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) | |
2746 | txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; | |
2747 | ||
2748 | val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); | |
4f374d2c SC |
2749 | /* PKT-coalescing registers are per-queue + per-thread */ |
2750 | for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) { | |
2751 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); | |
2752 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val); | |
2753 | } | |
213f428f TP |
2754 | } |
2755 | ||
ba7a0f44 | 2756 | static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) |
ab42676a TP |
2757 | { |
2758 | u64 tmp = (u64)clk_hz * usec; | |
2759 | ||
2760 | do_div(tmp, USEC_PER_SEC); | |
2761 | ||
ba7a0f44 | 2762 | return tmp > U32_MAX ? U32_MAX : tmp; |
ab42676a TP |
2763 | } |
2764 | ||
ba7a0f44 | 2765 | static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) |
ab42676a TP |
2766 | { |
2767 | u64 tmp = (u64)cycles * USEC_PER_SEC; | |
2768 | ||
2769 | do_div(tmp, clk_hz); | |
2770 | ||
ba7a0f44 | 2771 | return tmp > U32_MAX ? U32_MAX : tmp; |
ab42676a TP |
2772 | } |
2773 | ||
3f518509 MW |
2774 | /* Set the time delay in usec before Rx interrupt */ |
2775 | static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, | |
d63f9e41 | 2776 | struct mvpp2_rx_queue *rxq) |
3f518509 | 2777 | { |
ba7a0f44 | 2778 | unsigned long freq = port->priv->tclk; |
ab42676a TP |
2779 | u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); |
2780 | ||
2781 | if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { | |
2782 | rxq->time_coal = | |
2783 | mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); | |
2784 | ||
2785 | /* re-evaluate to get actual register value */ | |
2786 | val = mvpp2_usec_to_cycles(rxq->time_coal, freq); | |
2787 | } | |
3f518509 | 2788 | |
3f518509 | 2789 | mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); |
3f518509 MW |
2790 | } |
2791 | ||
213f428f TP |
2792 | static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) |
2793 | { | |
ba7a0f44 | 2794 | unsigned long freq = port->priv->tclk; |
213f428f TP |
2795 | u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); |
2796 | ||
2797 | if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { | |
2798 | port->tx_time_coal = | |
2799 | mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); | |
2800 | ||
2801 | /* re-evaluate to get actual register value */ | |
2802 | val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); | |
2803 | } | |
2804 | ||
2805 | mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); | |
2806 | } | |
2807 | ||
3f518509 MW |
2808 | /* Free Tx queue skbuffs */ |
2809 | static void mvpp2_txq_bufs_free(struct mvpp2_port *port, | |
2810 | struct mvpp2_tx_queue *txq, | |
2811 | struct mvpp2_txq_pcpu *txq_pcpu, int num) | |
2812 | { | |
dbef19cc | 2813 | struct xdp_frame_bulk bq; |
3f518509 MW |
2814 | int i; |
2815 | ||
dbef19cc LB |
2816 | xdp_frame_bulk_init(&bq); |
2817 | ||
2818 | rcu_read_lock(); /* need for xdp_return_frame_bulk */ | |
2819 | ||
3f518509 | 2820 | for (i = 0; i < num; i++) { |
8354491c TP |
2821 | struct mvpp2_txq_pcpu_buf *tx_buf = |
2822 | txq_pcpu->buffs + txq_pcpu->txq_get_index; | |
3f518509 | 2823 | |
c2d6fe61 MC |
2824 | if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) && |
2825 | tx_buf->type != MVPP2_TYPE_XDP_TX) | |
20920267 AT |
2826 | dma_unmap_single(port->dev->dev.parent, tx_buf->dma, |
2827 | tx_buf->size, DMA_TO_DEVICE); | |
c2d6fe61 | 2828 | if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb) |
36fb7435 | 2829 | dev_kfree_skb_any(tx_buf->skb); |
c2d6fe61 MC |
2830 | else if (tx_buf->type == MVPP2_TYPE_XDP_TX || |
2831 | tx_buf->type == MVPP2_TYPE_XDP_NDO) | |
dbef19cc | 2832 | xdp_return_frame_bulk(tx_buf->xdpf, &bq); |
36fb7435 TP |
2833 | |
2834 | mvpp2_txq_inc_get(txq_pcpu); | |
3f518509 | 2835 | } |
dbef19cc LB |
2836 | xdp_flush_frame_bulk(&bq); |
2837 | ||
2838 | rcu_read_unlock(); | |
3f518509 MW |
2839 | } |
2840 | ||
2841 | static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, | |
2842 | u32 cause) | |
2843 | { | |
2844 | int queue = fls(cause) - 1; | |
2845 | ||
2846 | return port->rxqs[queue]; | |
2847 | } | |
2848 | ||
2849 | static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, | |
2850 | u32 cause) | |
2851 | { | |
edc660fa | 2852 | int queue = fls(cause) - 1; |
3f518509 MW |
2853 | |
2854 | return port->txqs[queue]; | |
2855 | } | |
2856 | ||
2857 | /* Handle end of transmission */ | |
2858 | static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, | |
2859 | struct mvpp2_txq_pcpu *txq_pcpu) | |
2860 | { | |
2861 | struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); | |
2862 | int tx_done; | |
2863 | ||
e531f767 | 2864 | if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) |
3f518509 MW |
2865 | netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); |
2866 | ||
2867 | tx_done = mvpp2_txq_sent_desc_proc(port, txq); | |
2868 | if (!tx_done) | |
2869 | return; | |
2870 | mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); | |
2871 | ||
2872 | txq_pcpu->count -= tx_done; | |
2873 | ||
2874 | if (netif_tx_queue_stopped(nq)) | |
1d17db08 | 2875 | if (txq_pcpu->count <= txq_pcpu->wake_threshold) |
3f518509 MW |
2876 | netif_tx_wake_queue(nq); |
2877 | } | |
2878 | ||
213f428f | 2879 | static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, |
543ec376 | 2880 | unsigned int thread) |
edc660fa MW |
2881 | { |
2882 | struct mvpp2_tx_queue *txq; | |
2883 | struct mvpp2_txq_pcpu *txq_pcpu; | |
2884 | unsigned int tx_todo = 0; | |
2885 | ||
2886 | while (cause) { | |
2887 | txq = mvpp2_get_tx_queue(port, cause); | |
2888 | if (!txq) | |
2889 | break; | |
2890 | ||
543ec376 | 2891 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
edc660fa MW |
2892 | |
2893 | if (txq_pcpu->count) { | |
2894 | mvpp2_txq_done(port, txq, txq_pcpu); | |
2895 | tx_todo += txq_pcpu->count; | |
2896 | } | |
2897 | ||
2898 | cause &= ~(1 << txq->log_id); | |
2899 | } | |
2900 | return tx_todo; | |
2901 | } | |
2902 | ||
3f518509 MW |
2903 | /* Rx/Tx queue initialization/cleanup methods */ |
2904 | ||
2905 | /* Allocate and initialize descriptors for aggr TXQ */ | |
2906 | static int mvpp2_aggr_txq_init(struct platform_device *pdev, | |
850623b3 | 2907 | struct mvpp2_tx_queue *aggr_txq, |
543ec376 | 2908 | unsigned int thread, struct mvpp2 *priv) |
3f518509 | 2909 | { |
b02f31fb TP |
2910 | u32 txq_dma; |
2911 | ||
3f518509 | 2912 | /* Allocate memory for TX descriptors */ |
750afb08 LC |
2913 | aggr_txq->descs = dma_alloc_coherent(&pdev->dev, |
2914 | MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, | |
2915 | &aggr_txq->descs_dma, GFP_KERNEL); | |
3f518509 MW |
2916 | if (!aggr_txq->descs) |
2917 | return -ENOMEM; | |
2918 | ||
02856a3b | 2919 | aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; |
3f518509 MW |
2920 | |
2921 | /* Aggr TXQ no reset WA */ | |
2922 | aggr_txq->next_desc_to_proc = mvpp2_read(priv, | |
543ec376 | 2923 | MVPP2_AGGR_TXQ_INDEX_REG(thread)); |
3f518509 | 2924 | |
b02f31fb TP |
2925 | /* Set Tx descriptors queue starting address indirect |
2926 | * access | |
2927 | */ | |
2928 | if (priv->hw_version == MVPP21) | |
2929 | txq_dma = aggr_txq->descs_dma; | |
2930 | else | |
2931 | txq_dma = aggr_txq->descs_dma >> | |
2932 | MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; | |
2933 | ||
543ec376 AT |
2934 | mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); |
2935 | mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), | |
85affd7e | 2936 | MVPP2_AGGR_TXQ_SIZE); |
3f518509 MW |
2937 | |
2938 | return 0; | |
2939 | } | |
2940 | ||
2941 | /* Create a specified Rx queue */ | |
2942 | static int mvpp2_rxq_init(struct mvpp2_port *port, | |
2943 | struct mvpp2_rx_queue *rxq) | |
3f518509 | 2944 | { |
b27db227 | 2945 | struct mvpp2 *priv = port->priv; |
543ec376 | 2946 | unsigned int thread; |
b02f31fb | 2947 | u32 rxq_dma; |
b27db227 | 2948 | int err; |
b02f31fb | 2949 | |
3f518509 MW |
2950 | rxq->size = port->rx_ring_size; |
2951 | ||
2952 | /* Allocate memory for RX descriptors */ | |
2953 | rxq->descs = dma_alloc_coherent(port->dev->dev.parent, | |
2954 | rxq->size * MVPP2_DESC_ALIGNED_SIZE, | |
20396136 | 2955 | &rxq->descs_dma, GFP_KERNEL); |
3f518509 MW |
2956 | if (!rxq->descs) |
2957 | return -ENOMEM; | |
2958 | ||
3f518509 MW |
2959 | rxq->last_desc = rxq->size - 1; |
2960 | ||
2961 | /* Zero occupied and non-occupied counters - direct access */ | |
2962 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); | |
2963 | ||
2964 | /* Set Rx descriptors queue starting address - indirect access */ | |
e531f767 | 2965 | thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
1068549c | 2966 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); |
b02f31fb TP |
2967 | if (port->priv->hw_version == MVPP21) |
2968 | rxq_dma = rxq->descs_dma; | |
2969 | else | |
2970 | rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; | |
1068549c AT |
2971 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); |
2972 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); | |
2973 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); | |
a704bb5c | 2974 | put_cpu(); |
3f518509 MW |
2975 | |
2976 | /* Set Offset */ | |
07dd0a7a | 2977 | mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM); |
3f518509 MW |
2978 | |
2979 | /* Set coalescing pkts and time */ | |
d63f9e41 TP |
2980 | mvpp2_rx_pkts_coal_set(port, rxq); |
2981 | mvpp2_rx_time_coal_set(port, rxq); | |
3f518509 | 2982 | |
bf270fa3 SC |
2983 | /* Set the number of non occupied descriptors threshold */ |
2984 | mvpp2_set_rxq_free_tresh(port, rxq); | |
2985 | ||
3f518509 MW |
2986 | /* Add number of descriptors ready for receiving packets */ |
2987 | mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); | |
2988 | ||
b27db227 | 2989 | if (priv->percpu_pools) { |
a50e659b | 2990 | err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0); |
b27db227 MC |
2991 | if (err < 0) |
2992 | goto err_free_dma; | |
2993 | ||
a50e659b | 2994 | err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0); |
b27db227 MC |
2995 | if (err < 0) |
2996 | goto err_unregister_rxq_short; | |
2997 | ||
2998 | /* Every RXQ has a pool for short and another for long packets */ | |
2999 | err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short, | |
3000 | MEM_TYPE_PAGE_POOL, | |
3001 | priv->page_pool[rxq->logic_rxq]); | |
3002 | if (err < 0) | |
3003 | goto err_unregister_rxq_long; | |
3004 | ||
3005 | err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long, | |
3006 | MEM_TYPE_PAGE_POOL, | |
3007 | priv->page_pool[rxq->logic_rxq + | |
3008 | port->nrxqs]); | |
3009 | if (err < 0) | |
3010 | goto err_unregister_mem_rxq_short; | |
3011 | } | |
3012 | ||
3f518509 | 3013 | return 0; |
b27db227 MC |
3014 | |
3015 | err_unregister_mem_rxq_short: | |
3016 | xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short); | |
3017 | err_unregister_rxq_long: | |
3018 | xdp_rxq_info_unreg(&rxq->xdp_rxq_long); | |
3019 | err_unregister_rxq_short: | |
3020 | xdp_rxq_info_unreg(&rxq->xdp_rxq_short); | |
3021 | err_free_dma: | |
3022 | dma_free_coherent(port->dev->dev.parent, | |
3023 | rxq->size * MVPP2_DESC_ALIGNED_SIZE, | |
3024 | rxq->descs, rxq->descs_dma); | |
3025 | return err; | |
3f518509 MW |
3026 | } |
3027 | ||
3028 | /* Push packets received by the RXQ to BM pool */ | |
3029 | static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, | |
3030 | struct mvpp2_rx_queue *rxq) | |
3031 | { | |
3032 | int rx_received, i; | |
3033 | ||
3034 | rx_received = mvpp2_rxq_received(port, rxq->id); | |
3035 | if (!rx_received) | |
3036 | return; | |
3037 | ||
3038 | for (i = 0; i < rx_received; i++) { | |
3039 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); | |
56b8aae9 TP |
3040 | u32 status = mvpp2_rxdesc_status_get(port, rx_desc); |
3041 | int pool; | |
3042 | ||
3043 | pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> | |
3044 | MVPP2_RXD_BM_POOL_ID_OFFS; | |
3f518509 | 3045 | |
7d7627ba | 3046 | mvpp2_bm_pool_put(port, pool, |
ac3dd277 TP |
3047 | mvpp2_rxdesc_dma_addr_get(port, rx_desc), |
3048 | mvpp2_rxdesc_cookie_get(port, rx_desc)); | |
3f518509 MW |
3049 | } |
3050 | mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); | |
3051 | } | |
3052 | ||
3053 | /* Cleanup Rx queue */ | |
3054 | static void mvpp2_rxq_deinit(struct mvpp2_port *port, | |
3055 | struct mvpp2_rx_queue *rxq) | |
3056 | { | |
543ec376 | 3057 | unsigned int thread; |
a786841d | 3058 | |
b27db227 MC |
3059 | if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short)) |
3060 | xdp_rxq_info_unreg(&rxq->xdp_rxq_short); | |
3061 | ||
3062 | if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long)) | |
3063 | xdp_rxq_info_unreg(&rxq->xdp_rxq_long); | |
3064 | ||
3f518509 MW |
3065 | mvpp2_rxq_drop_pkts(port, rxq); |
3066 | ||
3067 | if (rxq->descs) | |
3068 | dma_free_coherent(port->dev->dev.parent, | |
3069 | rxq->size * MVPP2_DESC_ALIGNED_SIZE, | |
3070 | rxq->descs, | |
20396136 | 3071 | rxq->descs_dma); |
3f518509 MW |
3072 | |
3073 | rxq->descs = NULL; | |
3074 | rxq->last_desc = 0; | |
3075 | rxq->next_desc_to_proc = 0; | |
20396136 | 3076 | rxq->descs_dma = 0; |
3f518509 MW |
3077 | |
3078 | /* Clear Rx descriptors queue starting address and size; | |
3079 | * free descriptor number | |
3080 | */ | |
3081 | mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); | |
e531f767 | 3082 | thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
1068549c AT |
3083 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); |
3084 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); | |
3085 | mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); | |
a704bb5c | 3086 | put_cpu(); |
3f518509 MW |
3087 | } |
3088 | ||
3089 | /* Create and initialize a Tx queue */ | |
3090 | static int mvpp2_txq_init(struct mvpp2_port *port, | |
3091 | struct mvpp2_tx_queue *txq) | |
3092 | { | |
3093 | u32 val; | |
074c74df | 3094 | unsigned int thread; |
850623b3 | 3095 | int desc, desc_per_txq, tx_port_num; |
3f518509 MW |
3096 | struct mvpp2_txq_pcpu *txq_pcpu; |
3097 | ||
3098 | txq->size = port->tx_ring_size; | |
3099 | ||
3100 | /* Allocate memory for Tx descriptors */ | |
3101 | txq->descs = dma_alloc_coherent(port->dev->dev.parent, | |
3102 | txq->size * MVPP2_DESC_ALIGNED_SIZE, | |
20396136 | 3103 | &txq->descs_dma, GFP_KERNEL); |
3f518509 MW |
3104 | if (!txq->descs) |
3105 | return -ENOMEM; | |
3106 | ||
3f518509 MW |
3107 | txq->last_desc = txq->size - 1; |
3108 | ||
3109 | /* Set Tx descriptors queue starting address - indirect access */ | |
e531f767 | 3110 | thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
1068549c AT |
3111 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); |
3112 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, | |
a786841d | 3113 | txq->descs_dma); |
1068549c | 3114 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, |
a786841d | 3115 | txq->size & MVPP2_TXQ_DESC_SIZE_MASK); |
1068549c AT |
3116 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); |
3117 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, | |
a786841d | 3118 | txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); |
1068549c | 3119 | val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); |
3f518509 | 3120 | val &= ~MVPP2_TXQ_PENDING_MASK; |
1068549c | 3121 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val); |
3f518509 MW |
3122 | |
3123 | /* Calculate base address in prefetch buffer. We reserve 16 descriptors | |
3124 | * for each existing TXQ. | |
3125 | * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT | |
a3302baa | 3126 | * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS |
3f518509 MW |
3127 | */ |
3128 | desc_per_txq = 16; | |
3129 | desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + | |
3130 | (txq->log_id * desc_per_txq); | |
3131 | ||
1068549c | 3132 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, |
a786841d TP |
3133 | MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | |
3134 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); | |
a704bb5c | 3135 | put_cpu(); |
3f518509 MW |
3136 | |
3137 | /* WRR / EJP configuration - indirect access */ | |
3138 | tx_port_num = mvpp2_egress_port(port); | |
3139 | mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); | |
3140 | ||
3141 | val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); | |
3142 | val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; | |
3143 | val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); | |
3144 | val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; | |
3145 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); | |
3146 | ||
3147 | val = MVPP2_TXQ_TOKEN_SIZE_MAX; | |
3148 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), | |
3149 | val); | |
3150 | ||
e531f767 | 3151 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
074c74df | 3152 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3f518509 | 3153 | txq_pcpu->size = txq->size; |
02c91ece ME |
3154 | txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, |
3155 | sizeof(*txq_pcpu->buffs), | |
3156 | GFP_KERNEL); | |
8354491c | 3157 | if (!txq_pcpu->buffs) |
ba2d8d88 | 3158 | return -ENOMEM; |
3f518509 MW |
3159 | |
3160 | txq_pcpu->count = 0; | |
3161 | txq_pcpu->reserved_num = 0; | |
3162 | txq_pcpu->txq_put_index = 0; | |
3163 | txq_pcpu->txq_get_index = 0; | |
b70d4a51 | 3164 | txq_pcpu->tso_headers = NULL; |
186cd4d4 | 3165 | |
1d17db08 AT |
3166 | txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; |
3167 | txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; | |
3168 | ||
186cd4d4 AT |
3169 | txq_pcpu->tso_headers = |
3170 | dma_alloc_coherent(port->dev->dev.parent, | |
822eaf7c | 3171 | txq_pcpu->size * TSO_HEADER_SIZE, |
186cd4d4 AT |
3172 | &txq_pcpu->tso_headers_dma, |
3173 | GFP_KERNEL); | |
3174 | if (!txq_pcpu->tso_headers) | |
ba2d8d88 | 3175 | return -ENOMEM; |
3f518509 MW |
3176 | } |
3177 | ||
3178 | return 0; | |
3179 | } | |
3180 | ||
3181 | /* Free allocated TXQ resources */ | |
3182 | static void mvpp2_txq_deinit(struct mvpp2_port *port, | |
3183 | struct mvpp2_tx_queue *txq) | |
3184 | { | |
3185 | struct mvpp2_txq_pcpu *txq_pcpu; | |
074c74df | 3186 | unsigned int thread; |
3f518509 | 3187 | |
e531f767 | 3188 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
074c74df | 3189 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
8354491c | 3190 | kfree(txq_pcpu->buffs); |
186cd4d4 | 3191 | |
b70d4a51 AT |
3192 | if (txq_pcpu->tso_headers) |
3193 | dma_free_coherent(port->dev->dev.parent, | |
3194 | txq_pcpu->size * TSO_HEADER_SIZE, | |
3195 | txq_pcpu->tso_headers, | |
3196 | txq_pcpu->tso_headers_dma); | |
3197 | ||
3198 | txq_pcpu->tso_headers = NULL; | |
3f518509 MW |
3199 | } |
3200 | ||
3201 | if (txq->descs) | |
3202 | dma_free_coherent(port->dev->dev.parent, | |
3203 | txq->size * MVPP2_DESC_ALIGNED_SIZE, | |
20396136 | 3204 | txq->descs, txq->descs_dma); |
3f518509 MW |
3205 | |
3206 | txq->descs = NULL; | |
3207 | txq->last_desc = 0; | |
3208 | txq->next_desc_to_proc = 0; | |
20396136 | 3209 | txq->descs_dma = 0; |
3f518509 MW |
3210 | |
3211 | /* Set minimum bandwidth for disabled TXQs */ | |
21808437 | 3212 | mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0); |
3f518509 MW |
3213 | |
3214 | /* Set Tx descriptors queue starting address and size */ | |
e531f767 | 3215 | thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
1068549c AT |
3216 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); |
3217 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); | |
3218 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); | |
a704bb5c | 3219 | put_cpu(); |
3f518509 MW |
3220 | } |
3221 | ||
3222 | /* Cleanup Tx ports */ | |
3223 | static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) | |
3224 | { | |
3225 | struct mvpp2_txq_pcpu *txq_pcpu; | |
850623b3 | 3226 | int delay, pending; |
e531f767 | 3227 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); |
3f518509 MW |
3228 | u32 val; |
3229 | ||
1068549c AT |
3230 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); |
3231 | val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); | |
3f518509 | 3232 | val |= MVPP2_TXQ_DRAIN_EN_MASK; |
1068549c | 3233 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); |
3f518509 MW |
3234 | |
3235 | /* The napi queue has been stopped so wait for all packets | |
3236 | * to be transmitted. | |
3237 | */ | |
3238 | delay = 0; | |
3239 | do { | |
3240 | if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { | |
3241 | netdev_warn(port->dev, | |
3242 | "port %d: cleaning queue %d timed out\n", | |
3243 | port->id, txq->log_id); | |
3244 | break; | |
3245 | } | |
3246 | mdelay(1); | |
3247 | delay++; | |
3248 | ||
1068549c | 3249 | pending = mvpp2_thread_read(port->priv, thread, |
a786841d TP |
3250 | MVPP2_TXQ_PENDING_REG); |
3251 | pending &= MVPP2_TXQ_PENDING_MASK; | |
3f518509 MW |
3252 | } while (pending); |
3253 | ||
3254 | val &= ~MVPP2_TXQ_DRAIN_EN_MASK; | |
1068549c | 3255 | mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); |
a704bb5c | 3256 | put_cpu(); |
3f518509 | 3257 | |
e531f767 | 3258 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
074c74df | 3259 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3f518509 MW |
3260 | |
3261 | /* Release all packets */ | |
3262 | mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); | |
3263 | ||
3264 | /* Reset queue */ | |
3265 | txq_pcpu->count = 0; | |
3266 | txq_pcpu->txq_put_index = 0; | |
3267 | txq_pcpu->txq_get_index = 0; | |
3268 | } | |
3269 | } | |
3270 | ||
3271 | /* Cleanup all Tx queues */ | |
3272 | static void mvpp2_cleanup_txqs(struct mvpp2_port *port) | |
3273 | { | |
3274 | struct mvpp2_tx_queue *txq; | |
3275 | int queue; | |
3276 | u32 val; | |
3277 | ||
3278 | val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); | |
3279 | ||
3280 | /* Reset Tx ports and delete Tx queues */ | |
3281 | val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); | |
3282 | mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); | |
3283 | ||
09f83975 | 3284 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
3285 | txq = port->txqs[queue]; |
3286 | mvpp2_txq_clean(port, txq); | |
3287 | mvpp2_txq_deinit(port, txq); | |
3288 | } | |
3289 | ||
3290 | on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); | |
3291 | ||
3292 | val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); | |
3293 | mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); | |
3294 | } | |
3295 | ||
3296 | /* Cleanup all Rx queues */ | |
3297 | static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) | |
3298 | { | |
3299 | int queue; | |
3300 | ||
09f83975 | 3301 | for (queue = 0; queue < port->nrxqs; queue++) |
3f518509 | 3302 | mvpp2_rxq_deinit(port, port->rxqs[queue]); |
3bd17fdc SC |
3303 | |
3304 | if (port->tx_fc) | |
3305 | mvpp2_rxq_disable_fc(port); | |
3f518509 MW |
3306 | } |
3307 | ||
3308 | /* Init all Rx queues for port */ | |
3309 | static int mvpp2_setup_rxqs(struct mvpp2_port *port) | |
3310 | { | |
3311 | int queue, err; | |
3312 | ||
09f83975 | 3313 | for (queue = 0; queue < port->nrxqs; queue++) { |
3f518509 MW |
3314 | err = mvpp2_rxq_init(port, port->rxqs[queue]); |
3315 | if (err) | |
3316 | goto err_cleanup; | |
3317 | } | |
3bd17fdc SC |
3318 | |
3319 | if (port->tx_fc) | |
3320 | mvpp2_rxq_enable_fc(port); | |
3321 | ||
3f518509 MW |
3322 | return 0; |
3323 | ||
3324 | err_cleanup: | |
3325 | mvpp2_cleanup_rxqs(port); | |
3326 | return err; | |
3327 | } | |
3328 | ||
3329 | /* Init all tx queues for port */ | |
3330 | static int mvpp2_setup_txqs(struct mvpp2_port *port) | |
3331 | { | |
3332 | struct mvpp2_tx_queue *txq; | |
c2d6fe61 | 3333 | int queue, err; |
3f518509 | 3334 | |
09f83975 | 3335 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
3336 | txq = port->txqs[queue]; |
3337 | err = mvpp2_txq_init(port, txq); | |
3338 | if (err) | |
3339 | goto err_cleanup; | |
0d283ab5 MC |
3340 | |
3341 | /* Assign this queue to a CPU */ | |
c2d6fe61 MC |
3342 | if (queue < num_possible_cpus()) |
3343 | netif_set_xps_queue(port->dev, cpumask_of(queue), queue); | |
3f518509 MW |
3344 | } |
3345 | ||
213f428f TP |
3346 | if (port->has_tx_irqs) { |
3347 | mvpp2_tx_time_coal_set(port); | |
3348 | for (queue = 0; queue < port->ntxqs; queue++) { | |
3349 | txq = port->txqs[queue]; | |
3350 | mvpp2_tx_pkts_coal_set(port, txq); | |
3351 | } | |
3352 | } | |
3353 | ||
3f518509 MW |
3354 | on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); |
3355 | return 0; | |
3356 | ||
3357 | err_cleanup: | |
3358 | mvpp2_cleanup_txqs(port); | |
3359 | return err; | |
3360 | } | |
3361 | ||
3362 | /* The callback for per-port interrupt */ | |
3363 | static irqreturn_t mvpp2_isr(int irq, void *dev_id) | |
3364 | { | |
591f4cfa | 3365 | struct mvpp2_queue_vector *qv = dev_id; |
3f518509 | 3366 | |
591f4cfa | 3367 | mvpp2_qvec_interrupt_disable(qv); |
3f518509 | 3368 | |
591f4cfa | 3369 | napi_schedule(&qv->napi); |
3f518509 MW |
3370 | |
3371 | return IRQ_HANDLED; | |
3372 | } | |
3373 | ||
f5015a59 RK |
3374 | static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) |
3375 | { | |
3376 | struct skb_shared_hwtstamps shhwtstamps; | |
3377 | struct mvpp2_hwtstamp_queue *queue; | |
3378 | struct sk_buff *skb; | |
3379 | void __iomem *ptp_q; | |
3380 | unsigned int id; | |
3381 | u32 r0, r1, r2; | |
3382 | ||
3383 | ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); | |
3384 | if (nq) | |
3385 | ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0; | |
3386 | ||
3387 | queue = &port->tx_hwtstamp_queue[nq]; | |
3388 | ||
3389 | while (1) { | |
3390 | r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff; | |
3391 | if (!r0) | |
3392 | break; | |
3393 | ||
3394 | r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff; | |
3395 | r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff; | |
3396 | ||
3397 | id = (r0 >> 1) & 31; | |
3398 | ||
3399 | skb = queue->skb[id]; | |
3400 | queue->skb[id] = NULL; | |
3401 | if (skb) { | |
3402 | u32 ts = r2 << 19 | r1 << 3 | r0 >> 13; | |
3403 | ||
3404 | mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps); | |
3405 | skb_tstamp_tx(skb, &shhwtstamps); | |
3406 | dev_kfree_skb_any(skb); | |
3407 | } | |
3408 | } | |
3409 | } | |
3410 | ||
3411 | static void mvpp2_isr_handle_ptp(struct mvpp2_port *port) | |
3412 | { | |
3413 | void __iomem *ptp; | |
3414 | u32 val; | |
3415 | ||
3416 | ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); | |
3417 | val = readl(ptp + MVPP22_PTP_INT_CAUSE); | |
3418 | if (val & MVPP22_PTP_INT_CAUSE_QUEUE0) | |
3419 | mvpp2_isr_handle_ptp_queue(port, 0); | |
3420 | if (val & MVPP22_PTP_INT_CAUSE_QUEUE1) | |
3421 | mvpp2_isr_handle_ptp_queue(port, 1); | |
3422 | } | |
3423 | ||
45f54a91 RKO |
3424 | static void mvpp2_isr_handle_link(struct mvpp2_port *port, |
3425 | struct phylink_pcs *pcs, bool link) | |
fd3651b2 | 3426 | { |
fd3651b2 | 3427 | struct net_device *dev = port->dev; |
fd3651b2 | 3428 | |
4bb04326 | 3429 | if (port->phylink) { |
45f54a91 | 3430 | phylink_pcs_change(pcs, link); |
36cfd3a6 | 3431 | return; |
4bb04326 AT |
3432 | } |
3433 | ||
36cfd3a6 RK |
3434 | if (!netif_running(dev)) |
3435 | return; | |
fd3651b2 AT |
3436 | |
3437 | if (link) { | |
3438 | mvpp2_interrupts_enable(port); | |
3439 | ||
3440 | mvpp2_egress_enable(port); | |
3441 | mvpp2_ingress_enable(port); | |
3442 | netif_carrier_on(dev); | |
3443 | netif_tx_wake_all_queues(dev); | |
3444 | } else { | |
3445 | netif_tx_stop_all_queues(dev); | |
3446 | netif_carrier_off(dev); | |
3447 | mvpp2_ingress_disable(port); | |
3448 | mvpp2_egress_disable(port); | |
3449 | ||
3450 | mvpp2_interrupts_disable(port); | |
3451 | } | |
36cfd3a6 RK |
3452 | } |
3453 | ||
3454 | static void mvpp2_isr_handle_xlg(struct mvpp2_port *port) | |
3455 | { | |
3456 | bool link; | |
3457 | u32 val; | |
3458 | ||
3459 | val = readl(port->base + MVPP22_XLG_INT_STAT); | |
3460 | if (val & MVPP22_XLG_INT_STAT_LINK) { | |
3461 | val = readl(port->base + MVPP22_XLG_STATUS); | |
cdd0a379 | 3462 | link = (val & MVPP22_XLG_STATUS_LINK_UP); |
45f54a91 | 3463 | mvpp2_isr_handle_link(port, &port->pcs_xlg, link); |
36cfd3a6 RK |
3464 | } |
3465 | } | |
3466 | ||
3467 | static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port) | |
3468 | { | |
3469 | bool link; | |
3470 | u32 val; | |
3471 | ||
3472 | if (phy_interface_mode_is_rgmii(port->phy_interface) || | |
3473 | phy_interface_mode_is_8023z(port->phy_interface) || | |
3474 | port->phy_interface == PHY_INTERFACE_MODE_SGMII) { | |
3475 | val = readl(port->base + MVPP22_GMAC_INT_STAT); | |
3476 | if (val & MVPP22_GMAC_INT_STAT_LINK) { | |
3477 | val = readl(port->base + MVPP2_GMAC_STATUS0); | |
cdd0a379 | 3478 | link = (val & MVPP2_GMAC_STATUS0_LINK_UP); |
45f54a91 | 3479 | mvpp2_isr_handle_link(port, &port->pcs_gmac, link); |
36cfd3a6 RK |
3480 | } |
3481 | } | |
3482 | } | |
3483 | ||
3484 | /* Per-port interrupt for link status changes */ | |
89141972 | 3485 | static irqreturn_t mvpp2_port_isr(int irq, void *dev_id) |
36cfd3a6 RK |
3486 | { |
3487 | struct mvpp2_port *port = (struct mvpp2_port *)dev_id; | |
b4b17714 | 3488 | u32 val; |
36cfd3a6 RK |
3489 | |
3490 | mvpp22_gop_mask_irq(port); | |
3491 | ||
3492 | if (mvpp2_port_supports_xlg(port) && | |
3493 | mvpp2_is_xlg(port->phy_interface)) { | |
b4b17714 RK |
3494 | /* Check the external status register */ |
3495 | val = readl(port->base + MVPP22_XLG_EXT_INT_STAT); | |
3496 | if (val & MVPP22_XLG_EXT_INT_STAT_XLG) | |
3497 | mvpp2_isr_handle_xlg(port); | |
f5015a59 RK |
3498 | if (val & MVPP22_XLG_EXT_INT_STAT_PTP) |
3499 | mvpp2_isr_handle_ptp(port); | |
36cfd3a6 | 3500 | } else { |
b4b17714 RK |
3501 | /* If it's not the XLG, we must be using the GMAC. |
3502 | * Check the summary status. | |
3503 | */ | |
3504 | val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT); | |
3505 | if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL) | |
3506 | mvpp2_isr_handle_gmac_internal(port); | |
f5015a59 RK |
3507 | if (val & MVPP22_GMAC_INT_SUM_STAT_PTP) |
3508 | mvpp2_isr_handle_ptp(port); | |
36cfd3a6 | 3509 | } |
fd3651b2 | 3510 | |
fd3651b2 AT |
3511 | mvpp22_gop_unmask_irq(port); |
3512 | return IRQ_HANDLED; | |
3513 | } | |
3514 | ||
ecb9f80d | 3515 | static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) |
edc660fa | 3516 | { |
ecb9f80d TG |
3517 | struct net_device *dev; |
3518 | struct mvpp2_port *port; | |
074c74df | 3519 | struct mvpp2_port_pcpu *port_pcpu; |
edc660fa MW |
3520 | unsigned int tx_todo, cause; |
3521 | ||
ecb9f80d TG |
3522 | port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer); |
3523 | dev = port_pcpu->dev; | |
074c74df | 3524 | |
edc660fa | 3525 | if (!netif_running(dev)) |
ecb9f80d TG |
3526 | return HRTIMER_NORESTART; |
3527 | ||
edc660fa | 3528 | port_pcpu->timer_scheduled = false; |
ecb9f80d | 3529 | port = netdev_priv(dev); |
edc660fa MW |
3530 | |
3531 | /* Process all the Tx queues */ | |
09f83975 | 3532 | cause = (1 << port->ntxqs) - 1; |
074c74df | 3533 | tx_todo = mvpp2_tx_done(port, cause, |
e531f767 | 3534 | mvpp2_cpu_to_thread(port->priv, smp_processor_id())); |
edc660fa MW |
3535 | |
3536 | /* Set the timer in case not all the packets were processed */ | |
ecb9f80d TG |
3537 | if (tx_todo && !port_pcpu->timer_scheduled) { |
3538 | port_pcpu->timer_scheduled = true; | |
3539 | hrtimer_forward_now(&port_pcpu->tx_done_timer, | |
3540 | MVPP2_TXDONE_HRTIMER_PERIOD_NS); | |
edc660fa | 3541 | |
ecb9f80d TG |
3542 | return HRTIMER_RESTART; |
3543 | } | |
edc660fa MW |
3544 | return HRTIMER_NORESTART; |
3545 | } | |
3546 | ||
3f518509 MW |
3547 | /* Main RX/TX processing routines */ |
3548 | ||
3549 | /* Display more error info */ | |
3550 | static void mvpp2_rx_error(struct mvpp2_port *port, | |
3551 | struct mvpp2_rx_desc *rx_desc) | |
3552 | { | |
ac3dd277 TP |
3553 | u32 status = mvpp2_rxdesc_status_get(port, rx_desc); |
3554 | size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); | |
934e0f83 | 3555 | char *err_str = NULL; |
3f518509 MW |
3556 | |
3557 | switch (status & MVPP2_RXD_ERR_CODE_MASK) { | |
3558 | case MVPP2_RXD_ERR_CRC: | |
934e0f83 | 3559 | err_str = "crc"; |
3f518509 MW |
3560 | break; |
3561 | case MVPP2_RXD_ERR_OVERRUN: | |
934e0f83 | 3562 | err_str = "overrun"; |
3f518509 MW |
3563 | break; |
3564 | case MVPP2_RXD_ERR_RESOURCE: | |
934e0f83 | 3565 | err_str = "resource"; |
3f518509 MW |
3566 | break; |
3567 | } | |
934e0f83 YM |
3568 | if (err_str && net_ratelimit()) |
3569 | netdev_err(port->dev, | |
3570 | "bad rx status %08x (%s error), size=%zu\n", | |
3571 | status, err_str, sz); | |
3f518509 MW |
3572 | } |
3573 | ||
3574 | /* Handle RX checksum offload */ | |
aff0824d | 3575 | static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status) |
3f518509 MW |
3576 | { |
3577 | if (((status & MVPP2_RXD_L3_IP4) && | |
3578 | !(status & MVPP2_RXD_IP4_HEADER_ERR)) || | |
3579 | (status & MVPP2_RXD_L3_IP6)) | |
3580 | if (((status & MVPP2_RXD_L4_UDP) || | |
3581 | (status & MVPP2_RXD_L4_TCP)) && | |
aff0824d LB |
3582 | (status & MVPP2_RXD_L4_CSUM_OK)) |
3583 | return CHECKSUM_UNNECESSARY; | |
3f518509 | 3584 | |
aff0824d | 3585 | return CHECKSUM_NONE; |
3f518509 MW |
3586 | } |
3587 | ||
80f60a91 | 3588 | /* Allocate a new skb and add it to BM pool */ |
3f518509 | 3589 | static int mvpp2_rx_refill(struct mvpp2_port *port, |
b27db227 MC |
3590 | struct mvpp2_bm_pool *bm_pool, |
3591 | struct page_pool *page_pool, int pool) | |
3f518509 | 3592 | { |
20396136 | 3593 | dma_addr_t dma_addr; |
4e4a105f | 3594 | phys_addr_t phys_addr; |
0e037281 | 3595 | void *buf; |
3f518509 | 3596 | |
b27db227 MC |
3597 | buf = mvpp2_buf_alloc(port, bm_pool, page_pool, |
3598 | &dma_addr, &phys_addr, GFP_ATOMIC); | |
0e037281 | 3599 | if (!buf) |
3f518509 MW |
3600 | return -ENOMEM; |
3601 | ||
7d7627ba | 3602 | mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); |
7ef7e1d9 | 3603 | |
3f518509 MW |
3604 | return 0; |
3605 | } | |
3606 | ||
3607 | /* Handle tx checksum */ | |
3608 | static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) | |
3609 | { | |
3610 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
3611 | int ip_hdr_len = 0; | |
3612 | u8 l4_proto; | |
35f3625c | 3613 | __be16 l3_proto = vlan_get_protocol(skb); |
3f518509 | 3614 | |
35f3625c | 3615 | if (l3_proto == htons(ETH_P_IP)) { |
3f518509 MW |
3616 | struct iphdr *ip4h = ip_hdr(skb); |
3617 | ||
3618 | /* Calculate IPv4 checksum and L4 checksum */ | |
3619 | ip_hdr_len = ip4h->ihl; | |
3620 | l4_proto = ip4h->protocol; | |
35f3625c | 3621 | } else if (l3_proto == htons(ETH_P_IPV6)) { |
3f518509 MW |
3622 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
3623 | ||
3624 | /* Read l4_protocol from one of IPv6 extra headers */ | |
3625 | if (skb_network_header_len(skb) > 0) | |
3626 | ip_hdr_len = (skb_network_header_len(skb) >> 2); | |
3627 | l4_proto = ip6h->nexthdr; | |
3628 | } else { | |
3629 | return MVPP2_TXD_L4_CSUM_NOT; | |
3630 | } | |
3631 | ||
3632 | return mvpp2_txq_desc_csum(skb_network_offset(skb), | |
35f3625c | 3633 | l3_proto, ip_hdr_len, l4_proto); |
3f518509 MW |
3634 | } |
3635 | ||
3636 | return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; | |
3637 | } | |
3638 | ||
c2d6fe61 MC |
3639 | static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte) |
3640 | { | |
3641 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); | |
c2d6fe61 MC |
3642 | struct mvpp2_tx_queue *aggr_txq; |
3643 | struct mvpp2_txq_pcpu *txq_pcpu; | |
3644 | struct mvpp2_tx_queue *txq; | |
3645 | struct netdev_queue *nq; | |
3646 | ||
3647 | txq = port->txqs[txq_id]; | |
3648 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); | |
3649 | nq = netdev_get_tx_queue(port->dev, txq_id); | |
3650 | aggr_txq = &port->priv->aggr_txqs[thread]; | |
3651 | ||
3652 | txq_pcpu->reserved_num -= nxmit; | |
3653 | txq_pcpu->count += nxmit; | |
3654 | aggr_txq->count += nxmit; | |
3655 | ||
3656 | /* Enable transmit */ | |
3657 | wmb(); | |
3658 | mvpp2_aggr_txq_pend_desc_add(port, nxmit); | |
3659 | ||
3660 | if (txq_pcpu->count >= txq_pcpu->stop_threshold) | |
3661 | netif_tx_stop_queue(nq); | |
3662 | ||
c2d6fe61 MC |
3663 | /* Finalize TX processing */ |
3664 | if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) | |
3665 | mvpp2_txq_done(port, txq, txq_pcpu); | |
3666 | } | |
3667 | ||
3668 | static int | |
3669 | mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id, | |
3670 | struct xdp_frame *xdpf, bool dma_map) | |
3671 | { | |
3672 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); | |
3673 | u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE | | |
3674 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; | |
3675 | enum mvpp2_tx_buf_type buf_type; | |
3676 | struct mvpp2_txq_pcpu *txq_pcpu; | |
3677 | struct mvpp2_tx_queue *aggr_txq; | |
3678 | struct mvpp2_tx_desc *tx_desc; | |
3679 | struct mvpp2_tx_queue *txq; | |
3680 | int ret = MVPP2_XDP_TX; | |
3681 | dma_addr_t dma_addr; | |
3682 | ||
3683 | txq = port->txqs[txq_id]; | |
3684 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); | |
3685 | aggr_txq = &port->priv->aggr_txqs[thread]; | |
3686 | ||
3687 | /* Check number of available descriptors */ | |
3688 | if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) || | |
3689 | mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) { | |
3690 | ret = MVPP2_XDP_DROPPED; | |
3691 | goto out; | |
3692 | } | |
3693 | ||
3694 | /* Get a descriptor for the first part of the packet */ | |
3695 | tx_desc = mvpp2_txq_next_desc_get(aggr_txq); | |
3696 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); | |
3697 | mvpp2_txdesc_size_set(port, tx_desc, xdpf->len); | |
3698 | ||
3699 | if (dma_map) { | |
3700 | /* XDP_REDIRECT or AF_XDP */ | |
3701 | dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data, | |
3702 | xdpf->len, DMA_TO_DEVICE); | |
3703 | ||
3704 | if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { | |
3705 | mvpp2_txq_desc_put(txq); | |
3706 | ret = MVPP2_XDP_DROPPED; | |
3707 | goto out; | |
3708 | } | |
3709 | ||
3710 | buf_type = MVPP2_TYPE_XDP_NDO; | |
3711 | } else { | |
3712 | /* XDP_TX */ | |
3713 | struct page *page = virt_to_page(xdpf->data); | |
3714 | ||
3715 | dma_addr = page_pool_get_dma_addr(page) + | |
3716 | sizeof(*xdpf) + xdpf->headroom; | |
3717 | dma_sync_single_for_device(port->dev->dev.parent, dma_addr, | |
3718 | xdpf->len, DMA_BIDIRECTIONAL); | |
3719 | ||
3720 | buf_type = MVPP2_TYPE_XDP_TX; | |
3721 | } | |
3722 | ||
3723 | mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr); | |
3724 | ||
3725 | mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); | |
3726 | mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type); | |
3727 | ||
3728 | out: | |
3729 | return ret; | |
3730 | } | |
3731 | ||
3732 | static int | |
3733 | mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) | |
3734 | { | |
39b96315 | 3735 | struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); |
c2d6fe61 MC |
3736 | struct xdp_frame *xdpf; |
3737 | u16 txq_id; | |
3738 | int ret; | |
3739 | ||
3740 | xdpf = xdp_convert_buff_to_frame(xdp); | |
3741 | if (unlikely(!xdpf)) | |
3742 | return MVPP2_XDP_DROPPED; | |
3743 | ||
3744 | /* The first of the TX queues are used for XPS, | |
3745 | * the second half for XDP_TX | |
3746 | */ | |
3747 | txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); | |
3748 | ||
3749 | ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false); | |
39b96315 SA |
3750 | if (ret == MVPP2_XDP_TX) { |
3751 | u64_stats_update_begin(&stats->syncp); | |
3752 | stats->tx_bytes += xdpf->len; | |
3753 | stats->tx_packets++; | |
3754 | stats->xdp_tx++; | |
3755 | u64_stats_update_end(&stats->syncp); | |
3756 | ||
c2d6fe61 | 3757 | mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len); |
39b96315 SA |
3758 | } else { |
3759 | u64_stats_update_begin(&stats->syncp); | |
3760 | stats->xdp_tx_err++; | |
3761 | u64_stats_update_end(&stats->syncp); | |
3762 | } | |
c2d6fe61 MC |
3763 | |
3764 | return ret; | |
3765 | } | |
3766 | ||
3767 | static int | |
3768 | mvpp2_xdp_xmit(struct net_device *dev, int num_frame, | |
3769 | struct xdp_frame **frames, u32 flags) | |
3770 | { | |
3771 | struct mvpp2_port *port = netdev_priv(dev); | |
fdc13979 | 3772 | int i, nxmit_byte = 0, nxmit = 0; |
39b96315 | 3773 | struct mvpp2_pcpu_stats *stats; |
c2d6fe61 MC |
3774 | u16 txq_id; |
3775 | u32 ret; | |
3776 | ||
3777 | if (unlikely(test_bit(0, &port->state))) | |
3778 | return -ENETDOWN; | |
3779 | ||
3780 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) | |
3781 | return -EINVAL; | |
3782 | ||
3783 | /* The first of the TX queues are used for XPS, | |
3784 | * the second half for XDP_TX | |
3785 | */ | |
3786 | txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); | |
3787 | ||
3788 | for (i = 0; i < num_frame; i++) { | |
3789 | ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true); | |
fdc13979 LB |
3790 | if (ret != MVPP2_XDP_TX) |
3791 | break; | |
3792 | ||
3793 | nxmit_byte += frames[i]->len; | |
3794 | nxmit++; | |
c2d6fe61 MC |
3795 | } |
3796 | ||
39b96315 | 3797 | if (likely(nxmit > 0)) |
c2d6fe61 MC |
3798 | mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte); |
3799 | ||
39b96315 SA |
3800 | stats = this_cpu_ptr(port->stats); |
3801 | u64_stats_update_begin(&stats->syncp); | |
3802 | stats->tx_bytes += nxmit_byte; | |
3803 | stats->tx_packets += nxmit; | |
3804 | stats->xdp_xmit += nxmit; | |
3805 | stats->xdp_xmit_err += num_frame - nxmit; | |
3806 | u64_stats_update_end(&stats->syncp); | |
3807 | ||
c2d6fe61 MC |
3808 | return nxmit; |
3809 | } | |
3810 | ||
07dd0a7a | 3811 | static int |
376d6892 MC |
3812 | mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog, |
3813 | struct xdp_buff *xdp, struct page_pool *pp, | |
3814 | struct mvpp2_pcpu_stats *stats) | |
07dd0a7a MC |
3815 | { |
3816 | unsigned int len, sync, err; | |
3817 | struct page *page; | |
3818 | u32 ret, act; | |
3819 | ||
3820 | len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; | |
3821 | act = bpf_prog_run_xdp(prog, xdp); | |
3822 | ||
3823 | /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ | |
3824 | sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; | |
3825 | sync = max(sync, len); | |
3826 | ||
3827 | switch (act) { | |
3828 | case XDP_PASS: | |
39b96315 | 3829 | stats->xdp_pass++; |
07dd0a7a MC |
3830 | ret = MVPP2_XDP_PASS; |
3831 | break; | |
3832 | case XDP_REDIRECT: | |
3833 | err = xdp_do_redirect(port->dev, xdp, prog); | |
3834 | if (unlikely(err)) { | |
3835 | ret = MVPP2_XDP_DROPPED; | |
3836 | page = virt_to_head_page(xdp->data); | |
3837 | page_pool_put_page(pp, page, sync, true); | |
3838 | } else { | |
3839 | ret = MVPP2_XDP_REDIR; | |
39b96315 | 3840 | stats->xdp_redirect++; |
07dd0a7a MC |
3841 | } |
3842 | break; | |
c2d6fe61 MC |
3843 | case XDP_TX: |
3844 | ret = mvpp2_xdp_xmit_back(port, xdp); | |
3845 | if (ret != MVPP2_XDP_TX) { | |
3846 | page = virt_to_head_page(xdp->data); | |
3847 | page_pool_put_page(pp, page, sync, true); | |
3848 | } | |
3849 | break; | |
07dd0a7a | 3850 | default: |
c8064e5b | 3851 | bpf_warn_invalid_xdp_action(port->dev, prog, act); |
07dd0a7a MC |
3852 | fallthrough; |
3853 | case XDP_ABORTED: | |
3854 | trace_xdp_exception(port->dev, prog, act); | |
3855 | fallthrough; | |
3856 | case XDP_DROP: | |
3857 | page = virt_to_head_page(xdp->data); | |
3858 | page_pool_put_page(pp, page, sync, true); | |
3859 | ret = MVPP2_XDP_DROPPED; | |
39b96315 | 3860 | stats->xdp_drop++; |
07dd0a7a MC |
3861 | break; |
3862 | } | |
3863 | ||
3864 | return ret; | |
3865 | } | |
3866 | ||
17f9c1b6 SC |
3867 | static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc, |
3868 | int pool, u32 rx_status) | |
3869 | { | |
3870 | phys_addr_t phys_addr, phys_addr_next; | |
3871 | dma_addr_t dma_addr, dma_addr_next; | |
3872 | struct mvpp2_buff_hdr *buff_hdr; | |
3873 | ||
3874 | phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); | |
3875 | dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); | |
3876 | ||
3877 | do { | |
3878 | buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr); | |
3879 | ||
3880 | phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr); | |
3881 | dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr); | |
3882 | ||
3883 | if (port->priv->hw_version >= MVPP22) { | |
3884 | phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32); | |
3885 | dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32); | |
3886 | } | |
3887 | ||
3888 | mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); | |
3889 | ||
3890 | phys_addr = phys_addr_next; | |
3891 | dma_addr = dma_addr_next; | |
3892 | ||
3893 | } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info))); | |
3894 | } | |
3895 | ||
3f518509 | 3896 | /* Main rx processing */ |
591f4cfa TP |
3897 | static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, |
3898 | int rx_todo, struct mvpp2_rx_queue *rxq) | |
3f518509 MW |
3899 | { |
3900 | struct net_device *dev = port->dev; | |
39b96315 | 3901 | struct mvpp2_pcpu_stats ps = {}; |
c2d6fe61 | 3902 | enum dma_data_direction dma_dir; |
07dd0a7a MC |
3903 | struct bpf_prog *xdp_prog; |
3904 | struct xdp_buff xdp; | |
b5015854 MW |
3905 | int rx_received; |
3906 | int rx_done = 0; | |
07dd0a7a | 3907 | u32 xdp_ret = 0; |
3f518509 | 3908 | |
07dd0a7a MC |
3909 | xdp_prog = READ_ONCE(port->xdp_prog); |
3910 | ||
3f518509 MW |
3911 | /* Get number of received packets and clamp the to-do */ |
3912 | rx_received = mvpp2_rxq_received(port, rxq->id); | |
3913 | if (rx_todo > rx_received) | |
3914 | rx_todo = rx_received; | |
3915 | ||
b5015854 | 3916 | while (rx_done < rx_todo) { |
3f518509 | 3917 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); |
9a45e193 | 3918 | u32 rx_status, timestamp, metasize = 0; |
3f518509 | 3919 | struct mvpp2_bm_pool *bm_pool; |
b27db227 | 3920 | struct page_pool *pp = NULL; |
3f518509 | 3921 | struct sk_buff *skb; |
0e037281 | 3922 | unsigned int frag_size; |
20396136 | 3923 | dma_addr_t dma_addr; |
ac3dd277 | 3924 | phys_addr_t phys_addr; |
07dd0a7a | 3925 | int pool, rx_bytes, err, ret; |
2f128eb3 | 3926 | struct page *page; |
0e037281 | 3927 | void *data; |
3f518509 | 3928 | |
2f128eb3 MC |
3929 | phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); |
3930 | data = (void *)phys_to_virt(phys_addr); | |
3931 | page = virt_to_page(data); | |
3932 | prefetch(page); | |
3933 | ||
b5015854 | 3934 | rx_done++; |
ac3dd277 TP |
3935 | rx_status = mvpp2_rxdesc_status_get(port, rx_desc); |
3936 | rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); | |
3937 | rx_bytes -= MVPP2_MH_SIZE; | |
3938 | dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); | |
ac3dd277 | 3939 | |
56b8aae9 TP |
3940 | pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> |
3941 | MVPP2_RXD_BM_POOL_ID_OFFS; | |
3f518509 | 3942 | bm_pool = &port->priv->bm_pools[pool]; |
3f518509 | 3943 | |
c2d6fe61 MC |
3944 | if (port->priv->percpu_pools) { |
3945 | pp = port->priv->page_pool[pool]; | |
3946 | dma_dir = page_pool_get_dma_dir(pp); | |
3947 | } else { | |
3948 | dma_dir = DMA_FROM_DEVICE; | |
3949 | } | |
3950 | ||
e1921168 MC |
3951 | dma_sync_single_for_cpu(dev->dev.parent, dma_addr, |
3952 | rx_bytes + MVPP2_MH_SIZE, | |
c2d6fe61 | 3953 | dma_dir); |
e1921168 | 3954 | |
17f9c1b6 SC |
3955 | /* Buffer header not supported */ |
3956 | if (rx_status & MVPP2_RXD_BUF_HDR) | |
3957 | goto err_drop_frame; | |
3958 | ||
3959 | /* In case of an error, release the requested buffer pointer | |
3960 | * to the Buffer Manager. This request process is controlled | |
3961 | * by the hardware, and the information about the buffer is | |
3962 | * comprised by the RX descriptor. | |
3963 | */ | |
3964 | if (rx_status & MVPP2_RXD_ERR_SUMMARY) | |
3965 | goto err_drop_frame; | |
3966 | ||
c2d6fe61 | 3967 | /* Prefetch header */ |
d8ea89fe | 3968 | prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); |
b27db227 | 3969 | |
0e037281 TP |
3970 | if (bm_pool->frag_size > PAGE_SIZE) |
3971 | frag_size = 0; | |
3972 | else | |
3973 | frag_size = bm_pool->frag_size; | |
3974 | ||
07dd0a7a | 3975 | if (xdp_prog) { |
43b5169d | 3976 | struct xdp_rxq_info *xdp_rxq; |
07dd0a7a MC |
3977 | |
3978 | if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE) | |
43b5169d | 3979 | xdp_rxq = &rxq->xdp_rxq_short; |
07dd0a7a | 3980 | else |
43b5169d | 3981 | xdp_rxq = &rxq->xdp_rxq_long; |
07dd0a7a | 3982 | |
43b5169d | 3983 | xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq); |
be9df4af LB |
3984 | xdp_prepare_buff(&xdp, data, |
3985 | MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM, | |
9a45e193 | 3986 | rx_bytes, true); |
07dd0a7a | 3987 | |
376d6892 | 3988 | ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps); |
07dd0a7a MC |
3989 | |
3990 | if (ret) { | |
3991 | xdp_ret |= ret; | |
3992 | err = mvpp2_rx_refill(port, bm_pool, pp, pool); | |
3993 | if (err) { | |
3994 | netdev_err(port->dev, "failed to refill BM pools\n"); | |
3995 | goto err_drop_frame; | |
3996 | } | |
3997 | ||
39b96315 SA |
3998 | ps.rx_packets++; |
3999 | ps.rx_bytes += rx_bytes; | |
07dd0a7a MC |
4000 | continue; |
4001 | } | |
9a45e193 LB |
4002 | |
4003 | metasize = xdp.data - xdp.data_meta; | |
07dd0a7a MC |
4004 | } |
4005 | ||
4467c09b AS |
4006 | if (frag_size) |
4007 | skb = build_skb(data, frag_size); | |
4008 | else | |
4009 | skb = slab_build_skb(data); | |
0e037281 TP |
4010 | if (!skb) { |
4011 | netdev_warn(port->dev, "skb build failed\n"); | |
4012 | goto err_drop_frame; | |
4013 | } | |
3f518509 | 4014 | |
ce3497e2 RK |
4015 | /* If we have RX hardware timestamping enabled, grab the |
4016 | * timestamp from the queue and convert. | |
4017 | */ | |
4018 | if (mvpp22_rx_hwtstamping(port)) { | |
4019 | timestamp = le32_to_cpu(rx_desc->pp22.timestamp); | |
4020 | mvpp22_tai_tstamp(port->priv->tai, timestamp, | |
4021 | skb_hwtstamps(skb)); | |
4022 | } | |
4023 | ||
b27db227 | 4024 | err = mvpp2_rx_refill(port, bm_pool, pp, pool); |
b5015854 MW |
4025 | if (err) { |
4026 | netdev_err(port->dev, "failed to refill BM pools\n"); | |
d6526926 | 4027 | dev_kfree_skb_any(skb); |
b5015854 MW |
4028 | goto err_drop_frame; |
4029 | } | |
4030 | ||
b27db227 | 4031 | if (pp) |
57f05bc2 | 4032 | skb_mark_for_recycle(skb); |
b27db227 MC |
4033 | else |
4034 | dma_unmap_single_attrs(dev->dev.parent, dma_addr, | |
4035 | bm_pool->buf_size, DMA_FROM_DEVICE, | |
4036 | DMA_ATTR_SKIP_CPU_SYNC); | |
4229d502 | 4037 | |
39b96315 SA |
4038 | ps.rx_packets++; |
4039 | ps.rx_bytes += rx_bytes; | |
3f518509 | 4040 | |
07dd0a7a | 4041 | skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); |
3f518509 | 4042 | skb_put(skb, rx_bytes); |
9a45e193 LB |
4043 | if (metasize) |
4044 | skb_metadata_set(skb, metasize); | |
aff0824d | 4045 | skb->ip_summed = mvpp2_rx_csum(port, rx_status); |
d8ea89fe | 4046 | skb->protocol = eth_type_trans(skb, dev); |
3f518509 | 4047 | |
591f4cfa | 4048 | napi_gro_receive(napi, skb); |
7f7183af MC |
4049 | continue; |
4050 | ||
4051 | err_drop_frame: | |
4052 | dev->stats.rx_errors++; | |
4053 | mvpp2_rx_error(port, rx_desc); | |
4054 | /* Return the buffer to the pool */ | |
17f9c1b6 SC |
4055 | if (rx_status & MVPP2_RXD_BUF_HDR) |
4056 | mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status); | |
4057 | else | |
4058 | mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); | |
3f518509 MW |
4059 | } |
4060 | ||
c2d6fe61 | 4061 | if (xdp_ret & MVPP2_XDP_REDIR) |
7f04bd10 | 4062 | xdp_do_flush(); |
c2d6fe61 | 4063 | |
39b96315 | 4064 | if (ps.rx_packets) { |
3f518509 MW |
4065 | struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); |
4066 | ||
4067 | u64_stats_update_begin(&stats->syncp); | |
39b96315 SA |
4068 | stats->rx_packets += ps.rx_packets; |
4069 | stats->rx_bytes += ps.rx_bytes; | |
4070 | /* xdp */ | |
4071 | stats->xdp_redirect += ps.xdp_redirect; | |
4072 | stats->xdp_pass += ps.xdp_pass; | |
4073 | stats->xdp_drop += ps.xdp_drop; | |
3f518509 MW |
4074 | u64_stats_update_end(&stats->syncp); |
4075 | } | |
4076 | ||
4077 | /* Update Rx queue management counters */ | |
4078 | wmb(); | |
b5015854 | 4079 | mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); |
3f518509 MW |
4080 | |
4081 | return rx_todo; | |
4082 | } | |
4083 | ||
4084 | static inline void | |
ac3dd277 | 4085 | tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, |
3f518509 MW |
4086 | struct mvpp2_tx_desc *desc) |
4087 | { | |
e531f767 | 4088 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
074c74df | 4089 | struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
20920267 | 4090 | |
ac3dd277 TP |
4091 | dma_addr_t buf_dma_addr = |
4092 | mvpp2_txdesc_dma_addr_get(port, desc); | |
4093 | size_t buf_sz = | |
4094 | mvpp2_txdesc_size_get(port, desc); | |
20920267 AT |
4095 | if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) |
4096 | dma_unmap_single(port->dev->dev.parent, buf_dma_addr, | |
4097 | buf_sz, DMA_TO_DEVICE); | |
3f518509 MW |
4098 | mvpp2_txq_desc_put(txq); |
4099 | } | |
4100 | ||
f5015a59 RK |
4101 | static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port, |
4102 | struct mvpp2_tx_desc *desc) | |
4103 | { | |
4104 | /* We only need to clear the low bits */ | |
f704177e | 4105 | if (port->priv->hw_version >= MVPP22) |
f5015a59 RK |
4106 | desc->pp22.ptp_descriptor &= |
4107 | cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); | |
4108 | } | |
4109 | ||
4110 | static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port, | |
4111 | struct mvpp2_tx_desc *tx_desc, | |
4112 | struct sk_buff *skb) | |
4113 | { | |
4114 | struct mvpp2_hwtstamp_queue *queue; | |
4115 | unsigned int mtype, type, i; | |
4116 | struct ptp_header *hdr; | |
4117 | u64 ptpdesc; | |
4118 | ||
4119 | if (port->priv->hw_version == MVPP21 || | |
4120 | port->tx_hwtstamp_type == HWTSTAMP_TX_OFF) | |
4121 | return false; | |
4122 | ||
4123 | type = ptp_classify_raw(skb); | |
4124 | if (!type) | |
4125 | return false; | |
4126 | ||
4127 | hdr = ptp_parse_header(skb, type); | |
4128 | if (!hdr) | |
4129 | return false; | |
4130 | ||
068b6214 RK |
4131 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
4132 | ||
f5015a59 RK |
4133 | ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN | |
4134 | MVPP22_PTP_ACTION_CAPTURE; | |
4135 | queue = &port->tx_hwtstamp_queue[0]; | |
4136 | ||
4137 | switch (type & PTP_CLASS_VMASK) { | |
4138 | case PTP_CLASS_V1: | |
4139 | ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1); | |
4140 | break; | |
4141 | ||
4142 | case PTP_CLASS_V2: | |
4143 | ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2); | |
4144 | mtype = hdr->tsmt & 15; | |
4145 | /* Direct PTP Sync messages to queue 1 */ | |
4146 | if (mtype == 0) { | |
4147 | ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT; | |
4148 | queue = &port->tx_hwtstamp_queue[1]; | |
4149 | } | |
4150 | break; | |
4151 | } | |
4152 | ||
4153 | /* Take a reference on the skb and insert into our queue */ | |
4154 | i = queue->next; | |
4155 | queue->next = (i + 1) & 31; | |
4156 | if (queue->skb[i]) | |
4157 | dev_kfree_skb_any(queue->skb[i]); | |
4158 | queue->skb[i] = skb_get(skb); | |
4159 | ||
4160 | ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i); | |
4161 | ||
4162 | /* | |
4163 | * 3:0 - PTPAction | |
4164 | * 6:4 - PTPPacketFormat | |
4165 | * 7 - PTP_CF_WraparoundCheckEn | |
4166 | * 9:8 - IngressTimestampSeconds[1:0] | |
4167 | * 10 - Reserved | |
4168 | * 11 - MACTimestampingEn | |
4169 | * 17:12 - PTP_TimestampQueueEntryID[5:0] | |
4170 | * 18 - PTPTimestampQueueSelect | |
4171 | * 19 - UDPChecksumUpdateEn | |
4172 | * 27:20 - TimestampOffset | |
4173 | * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header | |
4174 | * NTPTs, Y.1731 - L3 to timestamp entry | |
4175 | * 35:28 - UDP Checksum Offset | |
4176 | * | |
4177 | * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12) | |
4178 | */ | |
4179 | tx_desc->pp22.ptp_descriptor &= | |
4180 | cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW); | |
4181 | tx_desc->pp22.ptp_descriptor |= | |
4182 | cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW); | |
4183 | tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL); | |
4184 | tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40); | |
4185 | ||
4186 | return true; | |
4187 | } | |
4188 | ||
3f518509 MW |
4189 | /* Handle tx fragmentation processing */ |
4190 | static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, | |
4191 | struct mvpp2_tx_queue *aggr_txq, | |
4192 | struct mvpp2_tx_queue *txq) | |
4193 | { | |
e531f767 | 4194 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
074c74df | 4195 | struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
3f518509 MW |
4196 | struct mvpp2_tx_desc *tx_desc; |
4197 | int i; | |
20396136 | 4198 | dma_addr_t buf_dma_addr; |
3f518509 MW |
4199 | |
4200 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
4201 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
d7840976 | 4202 | void *addr = skb_frag_address(frag); |
3f518509 MW |
4203 | |
4204 | tx_desc = mvpp2_txq_next_desc_get(aggr_txq); | |
f5015a59 | 4205 | mvpp2_txdesc_clear_ptp(port, tx_desc); |
ac3dd277 | 4206 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
d7840976 | 4207 | mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag)); |
3f518509 | 4208 | |
20396136 | 4209 | buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, |
d7840976 MWO |
4210 | skb_frag_size(frag), |
4211 | DMA_TO_DEVICE); | |
20396136 | 4212 | if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { |
3f518509 | 4213 | mvpp2_txq_desc_put(txq); |
32bae631 | 4214 | goto cleanup; |
3f518509 MW |
4215 | } |
4216 | ||
6eb5d375 | 4217 | mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); |
3f518509 MW |
4218 | |
4219 | if (i == (skb_shinfo(skb)->nr_frags - 1)) { | |
4220 | /* Last descriptor */ | |
ac3dd277 TP |
4221 | mvpp2_txdesc_cmd_set(port, tx_desc, |
4222 | MVPP2_TXD_L_DESC); | |
c2d6fe61 | 4223 | mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); |
3f518509 MW |
4224 | } else { |
4225 | /* Descriptor in the middle: Not First, Not Last */ | |
ac3dd277 | 4226 | mvpp2_txdesc_cmd_set(port, tx_desc, 0); |
c2d6fe61 | 4227 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); |
3f518509 MW |
4228 | } |
4229 | } | |
4230 | ||
4231 | return 0; | |
32bae631 | 4232 | cleanup: |
3f518509 MW |
4233 | /* Release all descriptors that were used to map fragments of |
4234 | * this packet, as well as the corresponding DMA mappings | |
4235 | */ | |
4236 | for (i = i - 1; i >= 0; i--) { | |
4237 | tx_desc = txq->descs + i; | |
ac3dd277 | 4238 | tx_desc_unmap_put(port, txq, tx_desc); |
3f518509 MW |
4239 | } |
4240 | ||
4241 | return -ENOMEM; | |
4242 | } | |
4243 | ||
186cd4d4 AT |
4244 | static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, |
4245 | struct net_device *dev, | |
4246 | struct mvpp2_tx_queue *txq, | |
4247 | struct mvpp2_tx_queue *aggr_txq, | |
4248 | struct mvpp2_txq_pcpu *txq_pcpu, | |
4249 | int hdr_sz) | |
4250 | { | |
4251 | struct mvpp2_port *port = netdev_priv(dev); | |
4252 | struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); | |
4253 | dma_addr_t addr; | |
4254 | ||
f5015a59 | 4255 | mvpp2_txdesc_clear_ptp(port, tx_desc); |
186cd4d4 AT |
4256 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
4257 | mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); | |
4258 | ||
4259 | addr = txq_pcpu->tso_headers_dma + | |
4260 | txq_pcpu->txq_put_index * TSO_HEADER_SIZE; | |
6eb5d375 | 4261 | mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); |
186cd4d4 AT |
4262 | |
4263 | mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | | |
4264 | MVPP2_TXD_F_DESC | | |
4265 | MVPP2_TXD_PADDING_DISABLE); | |
c2d6fe61 | 4266 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); |
186cd4d4 AT |
4267 | } |
4268 | ||
4269 | static inline int mvpp2_tso_put_data(struct sk_buff *skb, | |
4270 | struct net_device *dev, struct tso_t *tso, | |
4271 | struct mvpp2_tx_queue *txq, | |
4272 | struct mvpp2_tx_queue *aggr_txq, | |
4273 | struct mvpp2_txq_pcpu *txq_pcpu, | |
4274 | int sz, bool left, bool last) | |
4275 | { | |
4276 | struct mvpp2_port *port = netdev_priv(dev); | |
4277 | struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); | |
4278 | dma_addr_t buf_dma_addr; | |
4279 | ||
f5015a59 | 4280 | mvpp2_txdesc_clear_ptp(port, tx_desc); |
186cd4d4 AT |
4281 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
4282 | mvpp2_txdesc_size_set(port, tx_desc, sz); | |
4283 | ||
4284 | buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, | |
4285 | DMA_TO_DEVICE); | |
4286 | if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { | |
4287 | mvpp2_txq_desc_put(txq); | |
4288 | return -ENOMEM; | |
4289 | } | |
4290 | ||
6eb5d375 | 4291 | mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); |
186cd4d4 AT |
4292 | |
4293 | if (!left) { | |
4294 | mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); | |
4295 | if (last) { | |
c2d6fe61 | 4296 | mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); |
186cd4d4 AT |
4297 | return 0; |
4298 | } | |
4299 | } else { | |
4300 | mvpp2_txdesc_cmd_set(port, tx_desc, 0); | |
4301 | } | |
4302 | ||
c2d6fe61 | 4303 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); |
186cd4d4 AT |
4304 | return 0; |
4305 | } | |
4306 | ||
4307 | static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, | |
4308 | struct mvpp2_tx_queue *txq, | |
4309 | struct mvpp2_tx_queue *aggr_txq, | |
4310 | struct mvpp2_txq_pcpu *txq_pcpu) | |
4311 | { | |
4312 | struct mvpp2_port *port = netdev_priv(dev); | |
761b331c | 4313 | int hdr_sz, i, len, descs = 0; |
186cd4d4 | 4314 | struct tso_t tso; |
186cd4d4 AT |
4315 | |
4316 | /* Check number of available descriptors */ | |
e531f767 | 4317 | if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || |
074c74df | 4318 | mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, |
186cd4d4 AT |
4319 | tso_count_descs(skb))) |
4320 | return 0; | |
4321 | ||
761b331c ED |
4322 | hdr_sz = tso_start(skb, &tso); |
4323 | ||
186cd4d4 AT |
4324 | len = skb->len - hdr_sz; |
4325 | while (len > 0) { | |
4326 | int left = min_t(int, skb_shinfo(skb)->gso_size, len); | |
4327 | char *hdr = txq_pcpu->tso_headers + | |
4328 | txq_pcpu->txq_put_index * TSO_HEADER_SIZE; | |
4329 | ||
4330 | len -= left; | |
4331 | descs++; | |
4332 | ||
4333 | tso_build_hdr(skb, hdr, &tso, left, len == 0); | |
4334 | mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); | |
4335 | ||
4336 | while (left > 0) { | |
4337 | int sz = min_t(int, tso.size, left); | |
4338 | left -= sz; | |
4339 | descs++; | |
4340 | ||
4341 | if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, | |
4342 | txq_pcpu, sz, left, len == 0)) | |
4343 | goto release; | |
4344 | tso_build_data(skb, &tso, sz); | |
4345 | } | |
4346 | } | |
4347 | ||
4348 | return descs; | |
4349 | ||
4350 | release: | |
4351 | for (i = descs - 1; i >= 0; i--) { | |
4352 | struct mvpp2_tx_desc *tx_desc = txq->descs + i; | |
4353 | tx_desc_unmap_put(port, txq, tx_desc); | |
4354 | } | |
4355 | return 0; | |
4356 | } | |
4357 | ||
3f518509 | 4358 | /* Main tx processing */ |
f03508ce | 4359 | static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) |
3f518509 MW |
4360 | { |
4361 | struct mvpp2_port *port = netdev_priv(dev); | |
4362 | struct mvpp2_tx_queue *txq, *aggr_txq; | |
4363 | struct mvpp2_txq_pcpu *txq_pcpu; | |
4364 | struct mvpp2_tx_desc *tx_desc; | |
20396136 | 4365 | dma_addr_t buf_dma_addr; |
e531f767 | 4366 | unsigned long flags = 0; |
074c74df | 4367 | unsigned int thread; |
3f518509 MW |
4368 | int frags = 0; |
4369 | u16 txq_id; | |
4370 | u32 tx_cmd; | |
4371 | ||
e531f767 | 4372 | thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
074c74df | 4373 | |
3f518509 MW |
4374 | txq_id = skb_get_queue_mapping(skb); |
4375 | txq = port->txqs[txq_id]; | |
074c74df AT |
4376 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
4377 | aggr_txq = &port->priv->aggr_txqs[thread]; | |
3f518509 | 4378 | |
e531f767 AT |
4379 | if (test_bit(thread, &port->priv->lock_map)) |
4380 | spin_lock_irqsave(&port->tx_lock[thread], flags); | |
4381 | ||
186cd4d4 AT |
4382 | if (skb_is_gso(skb)) { |
4383 | frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); | |
4384 | goto out; | |
4385 | } | |
3f518509 MW |
4386 | frags = skb_shinfo(skb)->nr_frags + 1; |
4387 | ||
4388 | /* Check number of available descriptors */ | |
e531f767 | 4389 | if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || |
074c74df | 4390 | mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { |
3f518509 MW |
4391 | frags = 0; |
4392 | goto out; | |
4393 | } | |
4394 | ||
4395 | /* Get a descriptor for the first part of the packet */ | |
4396 | tx_desc = mvpp2_txq_next_desc_get(aggr_txq); | |
f5015a59 RK |
4397 | if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) || |
4398 | !mvpp2_tx_hw_tstamp(port, tx_desc, skb)) | |
4399 | mvpp2_txdesc_clear_ptp(port, tx_desc); | |
ac3dd277 TP |
4400 | mvpp2_txdesc_txq_set(port, tx_desc, txq->id); |
4401 | mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); | |
3f518509 | 4402 | |
20396136 | 4403 | buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, |
ac3dd277 | 4404 | skb_headlen(skb), DMA_TO_DEVICE); |
20396136 | 4405 | if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { |
3f518509 MW |
4406 | mvpp2_txq_desc_put(txq); |
4407 | frags = 0; | |
4408 | goto out; | |
4409 | } | |
ac3dd277 | 4410 | |
6eb5d375 | 4411 | mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); |
3f518509 MW |
4412 | |
4413 | tx_cmd = mvpp2_skb_tx_csum(port, skb); | |
4414 | ||
4415 | if (frags == 1) { | |
4416 | /* First and Last descriptor */ | |
4417 | tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; | |
ac3dd277 | 4418 | mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); |
c2d6fe61 | 4419 | mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); |
3f518509 MW |
4420 | } else { |
4421 | /* First but not Last */ | |
4422 | tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; | |
ac3dd277 | 4423 | mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); |
c2d6fe61 | 4424 | mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); |
3f518509 MW |
4425 | |
4426 | /* Continue with other skb fragments */ | |
4427 | if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { | |
ac3dd277 | 4428 | tx_desc_unmap_put(port, txq, tx_desc); |
3f518509 | 4429 | frags = 0; |
3f518509 MW |
4430 | } |
4431 | } | |
4432 | ||
3f518509 MW |
4433 | out: |
4434 | if (frags > 0) { | |
074c74df | 4435 | struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); |
186cd4d4 AT |
4436 | struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); |
4437 | ||
4438 | txq_pcpu->reserved_num -= frags; | |
4439 | txq_pcpu->count += frags; | |
4440 | aggr_txq->count += frags; | |
4441 | ||
4442 | /* Enable transmit */ | |
4443 | wmb(); | |
4444 | mvpp2_aggr_txq_pend_desc_add(port, frags); | |
4445 | ||
1d17db08 | 4446 | if (txq_pcpu->count >= txq_pcpu->stop_threshold) |
186cd4d4 | 4447 | netif_tx_stop_queue(nq); |
3f518509 MW |
4448 | |
4449 | u64_stats_update_begin(&stats->syncp); | |
4450 | stats->tx_packets++; | |
4451 | stats->tx_bytes += skb->len; | |
4452 | u64_stats_update_end(&stats->syncp); | |
4453 | } else { | |
4454 | dev->stats.tx_dropped++; | |
4455 | dev_kfree_skb_any(skb); | |
4456 | } | |
4457 | ||
edc660fa | 4458 | /* Finalize TX processing */ |
082297e6 | 4459 | if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) |
edc660fa MW |
4460 | mvpp2_txq_done(port, txq, txq_pcpu); |
4461 | ||
4462 | /* Set the timer in case not all frags were processed */ | |
213f428f TP |
4463 | if (!port->has_tx_irqs && txq_pcpu->count <= frags && |
4464 | txq_pcpu->count > 0) { | |
074c74df | 4465 | struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread); |
edc660fa | 4466 | |
ecb9f80d TG |
4467 | if (!port_pcpu->timer_scheduled) { |
4468 | port_pcpu->timer_scheduled = true; | |
4469 | hrtimer_start(&port_pcpu->tx_done_timer, | |
4470 | MVPP2_TXDONE_HRTIMER_PERIOD_NS, | |
4471 | HRTIMER_MODE_REL_PINNED_SOFT); | |
4472 | } | |
edc660fa MW |
4473 | } |
4474 | ||
e531f767 AT |
4475 | if (test_bit(thread, &port->priv->lock_map)) |
4476 | spin_unlock_irqrestore(&port->tx_lock[thread], flags); | |
4477 | ||
3f518509 MW |
4478 | return NETDEV_TX_OK; |
4479 | } | |
4480 | ||
4481 | static inline void mvpp2_cause_error(struct net_device *dev, int cause) | |
4482 | { | |
4483 | if (cause & MVPP2_CAUSE_FCS_ERR_MASK) | |
4484 | netdev_err(dev, "FCS error\n"); | |
4485 | if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) | |
4486 | netdev_err(dev, "rx fifo overrun error\n"); | |
4487 | if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) | |
4488 | netdev_err(dev, "tx fifo underrun error\n"); | |
4489 | } | |
4490 | ||
edc660fa | 4491 | static int mvpp2_poll(struct napi_struct *napi, int budget) |
3f518509 | 4492 | { |
213f428f | 4493 | u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; |
edc660fa MW |
4494 | int rx_done = 0; |
4495 | struct mvpp2_port *port = netdev_priv(napi->dev); | |
591f4cfa | 4496 | struct mvpp2_queue_vector *qv; |
e531f767 | 4497 | unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); |
3f518509 | 4498 | |
591f4cfa TP |
4499 | qv = container_of(napi, struct mvpp2_queue_vector, napi); |
4500 | ||
3f518509 MW |
4501 | /* Rx/Tx cause register |
4502 | * | |
4503 | * Bits 0-15: each bit indicates received packets on the Rx queue | |
4504 | * (bit 0 is for Rx queue 0). | |
4505 | * | |
4506 | * Bits 16-23: each bit indicates transmitted packets on the Tx queue | |
4507 | * (bit 16 is for Tx queue 0). | |
4508 | * | |
4509 | * Each CPU has its own Rx/Tx cause register | |
4510 | */ | |
1068549c | 4511 | cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, |
cdcfeb0f | 4512 | MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); |
3f518509 | 4513 | |
213f428f | 4514 | cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; |
3f518509 MW |
4515 | if (cause_misc) { |
4516 | mvpp2_cause_error(port->dev, cause_misc); | |
4517 | ||
4518 | /* Clear the cause register */ | |
4519 | mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); | |
1068549c | 4520 | mvpp2_thread_write(port->priv, thread, |
a786841d TP |
4521 | MVPP2_ISR_RX_TX_CAUSE_REG(port->id), |
4522 | cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); | |
3f518509 MW |
4523 | } |
4524 | ||
774268f3 AT |
4525 | if (port->has_tx_irqs) { |
4526 | cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; | |
4527 | if (cause_tx) { | |
4528 | cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; | |
4529 | mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); | |
4530 | } | |
213f428f | 4531 | } |
3f518509 MW |
4532 | |
4533 | /* Process RX packets */ | |
70afb58e AT |
4534 | cause_rx = cause_rx_tx & |
4535 | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); | |
213f428f | 4536 | cause_rx <<= qv->first_rxq; |
591f4cfa | 4537 | cause_rx |= qv->pending_cause_rx; |
3f518509 MW |
4538 | while (cause_rx && budget > 0) { |
4539 | int count; | |
4540 | struct mvpp2_rx_queue *rxq; | |
4541 | ||
4542 | rxq = mvpp2_get_rx_queue(port, cause_rx); | |
4543 | if (!rxq) | |
4544 | break; | |
4545 | ||
591f4cfa | 4546 | count = mvpp2_rx(port, napi, budget, rxq); |
3f518509 MW |
4547 | rx_done += count; |
4548 | budget -= count; | |
4549 | if (budget > 0) { | |
4550 | /* Clear the bit associated to this Rx queue | |
4551 | * so that next iteration will continue from | |
4552 | * the next Rx queue. | |
4553 | */ | |
4554 | cause_rx &= ~(1 << rxq->logic_rxq); | |
4555 | } | |
4556 | } | |
4557 | ||
4558 | if (budget > 0) { | |
4559 | cause_rx = 0; | |
6ad20165 | 4560 | napi_complete_done(napi, rx_done); |
3f518509 | 4561 | |
591f4cfa | 4562 | mvpp2_qvec_interrupt_enable(qv); |
3f518509 | 4563 | } |
591f4cfa | 4564 | qv->pending_cause_rx = cause_rx; |
3f518509 MW |
4565 | return rx_done; |
4566 | } | |
4567 | ||
bb7bbb6e MB |
4568 | static void mvpp22_mode_reconfigure(struct mvpp2_port *port, |
4569 | phy_interface_t interface) | |
3f518509 | 4570 | { |
4bb04326 AT |
4571 | u32 ctrl3; |
4572 | ||
5434e8fa AT |
4573 | /* Set the GMAC & XLG MAC in reset */ |
4574 | mvpp2_mac_reset_assert(port); | |
4575 | ||
7409e66e AT |
4576 | /* Set the MPCS and XPCS in reset */ |
4577 | mvpp22_pcs_reset_assert(port); | |
4578 | ||
4bb04326 | 4579 | /* comphy reconfiguration */ |
bb7bbb6e | 4580 | mvpp22_comphy_init(port, interface); |
4bb04326 AT |
4581 | |
4582 | /* gop reconfiguration */ | |
bb7bbb6e | 4583 | mvpp22_gop_init(port, interface); |
4bb04326 | 4584 | |
bb7bbb6e | 4585 | mvpp22_pcs_reset_deassert(port, interface); |
7409e66e | 4586 | |
a9a33202 | 4587 | if (mvpp2_port_supports_xlg(port)) { |
4bb04326 AT |
4588 | ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); |
4589 | ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; | |
4590 | ||
bb7bbb6e | 4591 | if (mvpp2_is_xlg(interface)) |
4bb04326 AT |
4592 | ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; |
4593 | else | |
4594 | ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; | |
4595 | ||
4596 | writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); | |
4597 | } | |
8e07269d | 4598 | |
bb7bbb6e | 4599 | if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface)) |
76eb1b1d SC |
4600 | mvpp2_xlg_max_rx_size_set(port); |
4601 | else | |
4602 | mvpp2_gmac_max_rx_size_set(port); | |
4bb04326 AT |
4603 | } |
4604 | ||
4605 | /* Set hw internals when starting port */ | |
4606 | static void mvpp2_start_dev(struct mvpp2_port *port) | |
4607 | { | |
4608 | int i; | |
76eb1b1d | 4609 | |
3f518509 MW |
4610 | mvpp2_txp_max_tx_size_set(port); |
4611 | ||
591f4cfa TP |
4612 | for (i = 0; i < port->nqvecs; i++) |
4613 | napi_enable(&port->qvecs[i].napi); | |
3f518509 | 4614 | |
543ec376 | 4615 | /* Enable interrupts on all threads */ |
3f518509 MW |
4616 | mvpp2_interrupts_enable(port); |
4617 | ||
f704177e | 4618 | if (port->priv->hw_version >= MVPP22) |
bb7bbb6e | 4619 | mvpp22_mode_reconfigure(port, port->phy_interface); |
4bb04326 AT |
4620 | |
4621 | if (port->phylink) { | |
4622 | phylink_start(port->phylink); | |
4623 | } else { | |
87745c74 | 4624 | mvpp2_acpi_start(port); |
542897d9 | 4625 | } |
f84bf386 | 4626 | |
3f518509 | 4627 | netif_tx_start_all_queues(port->dev); |
07dd0a7a MC |
4628 | |
4629 | clear_bit(0, &port->state); | |
3f518509 MW |
4630 | } |
4631 | ||
4632 | /* Set hw internals when stopping port */ | |
4633 | static void mvpp2_stop_dev(struct mvpp2_port *port) | |
4634 | { | |
591f4cfa | 4635 | int i; |
8e07269d | 4636 | |
07dd0a7a MC |
4637 | set_bit(0, &port->state); |
4638 | ||
543ec376 | 4639 | /* Disable interrupts on all threads */ |
3f518509 MW |
4640 | mvpp2_interrupts_disable(port); |
4641 | ||
591f4cfa TP |
4642 | for (i = 0; i < port->nqvecs; i++) |
4643 | napi_disable(&port->qvecs[i].napi); | |
3f518509 | 4644 | |
4bb04326 AT |
4645 | if (port->phylink) |
4646 | phylink_stop(port->phylink); | |
542897d9 | 4647 | phy_power_off(port->comphy); |
3f518509 MW |
4648 | } |
4649 | ||
3f518509 MW |
4650 | static int mvpp2_check_ringparam_valid(struct net_device *dev, |
4651 | struct ethtool_ringparam *ring) | |
4652 | { | |
4653 | u16 new_rx_pending = ring->rx_pending; | |
4654 | u16 new_tx_pending = ring->tx_pending; | |
4655 | ||
4656 | if (ring->rx_pending == 0 || ring->tx_pending == 0) | |
4657 | return -EINVAL; | |
4658 | ||
7cf87e4a YM |
4659 | if (ring->rx_pending > MVPP2_MAX_RXD_MAX) |
4660 | new_rx_pending = MVPP2_MAX_RXD_MAX; | |
3bd17fdc SC |
4661 | else if (ring->rx_pending < MSS_THRESHOLD_START) |
4662 | new_rx_pending = MSS_THRESHOLD_START; | |
3f518509 MW |
4663 | else if (!IS_ALIGNED(ring->rx_pending, 16)) |
4664 | new_rx_pending = ALIGN(ring->rx_pending, 16); | |
4665 | ||
7cf87e4a YM |
4666 | if (ring->tx_pending > MVPP2_MAX_TXD_MAX) |
4667 | new_tx_pending = MVPP2_MAX_TXD_MAX; | |
3f518509 MW |
4668 | else if (!IS_ALIGNED(ring->tx_pending, 32)) |
4669 | new_tx_pending = ALIGN(ring->tx_pending, 32); | |
4670 | ||
76e583c5 AT |
4671 | /* The Tx ring size cannot be smaller than the minimum number of |
4672 | * descriptors needed for TSO. | |
4673 | */ | |
4674 | if (new_tx_pending < MVPP2_MAX_SKB_DESCS) | |
4675 | new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); | |
4676 | ||
3f518509 MW |
4677 | if (ring->rx_pending != new_rx_pending) { |
4678 | netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", | |
4679 | ring->rx_pending, new_rx_pending); | |
4680 | ring->rx_pending = new_rx_pending; | |
4681 | } | |
4682 | ||
4683 | if (ring->tx_pending != new_tx_pending) { | |
4684 | netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", | |
4685 | ring->tx_pending, new_tx_pending); | |
4686 | ring->tx_pending = new_tx_pending; | |
4687 | } | |
4688 | ||
4689 | return 0; | |
4690 | } | |
4691 | ||
26975821 | 4692 | static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) |
3f518509 MW |
4693 | { |
4694 | u32 mac_addr_l, mac_addr_m, mac_addr_h; | |
4695 | ||
4696 | mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); | |
4697 | mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); | |
4698 | mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); | |
4699 | addr[0] = (mac_addr_h >> 24) & 0xFF; | |
4700 | addr[1] = (mac_addr_h >> 16) & 0xFF; | |
4701 | addr[2] = (mac_addr_h >> 8) & 0xFF; | |
4702 | addr[3] = mac_addr_h & 0xFF; | |
4703 | addr[4] = mac_addr_m & 0xFF; | |
4704 | addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; | |
4705 | } | |
4706 | ||
591f4cfa TP |
4707 | static int mvpp2_irqs_init(struct mvpp2_port *port) |
4708 | { | |
4709 | int err, i; | |
4710 | ||
4711 | for (i = 0; i < port->nqvecs; i++) { | |
4712 | struct mvpp2_queue_vector *qv = port->qvecs + i; | |
4713 | ||
a6b3a3fa MZ |
4714 | if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { |
4715 | qv->mask = kzalloc(cpumask_size(), GFP_KERNEL); | |
4716 | if (!qv->mask) { | |
4717 | err = -ENOMEM; | |
4718 | goto err; | |
4719 | } | |
4720 | ||
13c249a9 | 4721 | irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); |
a6b3a3fa | 4722 | } |
13c249a9 | 4723 | |
591f4cfa TP |
4724 | err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); |
4725 | if (err) | |
4726 | goto err; | |
213f428f | 4727 | |
e531f767 | 4728 | if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { |
e531f767 AT |
4729 | unsigned int cpu; |
4730 | ||
4731 | for_each_present_cpu(cpu) { | |
4732 | if (mvpp2_cpu_to_thread(port->priv, cpu) == | |
4733 | qv->sw_thread_id) | |
a6b3a3fa | 4734 | cpumask_set_cpu(cpu, qv->mask); |
e531f767 AT |
4735 | } |
4736 | ||
a6b3a3fa | 4737 | irq_set_affinity_hint(qv->irq, qv->mask); |
e531f767 | 4738 | } |
591f4cfa TP |
4739 | } |
4740 | ||
4741 | return 0; | |
4742 | err: | |
4743 | for (i = 0; i < port->nqvecs; i++) { | |
4744 | struct mvpp2_queue_vector *qv = port->qvecs + i; | |
4745 | ||
213f428f | 4746 | irq_set_affinity_hint(qv->irq, NULL); |
a6b3a3fa MZ |
4747 | kfree(qv->mask); |
4748 | qv->mask = NULL; | |
591f4cfa TP |
4749 | free_irq(qv->irq, qv); |
4750 | } | |
4751 | ||
4752 | return err; | |
4753 | } | |
4754 | ||
4755 | static void mvpp2_irqs_deinit(struct mvpp2_port *port) | |
4756 | { | |
4757 | int i; | |
4758 | ||
4759 | for (i = 0; i < port->nqvecs; i++) { | |
4760 | struct mvpp2_queue_vector *qv = port->qvecs + i; | |
4761 | ||
213f428f | 4762 | irq_set_affinity_hint(qv->irq, NULL); |
a6b3a3fa MZ |
4763 | kfree(qv->mask); |
4764 | qv->mask = NULL; | |
13c249a9 | 4765 | irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); |
591f4cfa TP |
4766 | free_irq(qv->irq, qv); |
4767 | } | |
4768 | } | |
4769 | ||
0a8a8000 | 4770 | static bool mvpp22_rss_is_supported(struct mvpp2_port *port) |
4c4a5686 | 4771 | { |
0a8a8000 SC |
4772 | return (queue_mode == MVPP2_QDIST_MULTI_MODE) && |
4773 | !(port->flags & MVPP2_F_LOOPBACK); | |
4c4a5686 YM |
4774 | } |
4775 | ||
3f518509 MW |
4776 | static int mvpp2_open(struct net_device *dev) |
4777 | { | |
4778 | struct mvpp2_port *port = netdev_priv(dev); | |
fd3651b2 | 4779 | struct mvpp2 *priv = port->priv; |
3f518509 MW |
4780 | unsigned char mac_bcast[ETH_ALEN] = { |
4781 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | |
4bb04326 | 4782 | bool valid = false; |
3f518509 MW |
4783 | int err; |
4784 | ||
ce2a27c7 | 4785 | err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); |
3f518509 MW |
4786 | if (err) { |
4787 | netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); | |
4788 | return err; | |
4789 | } | |
ce2a27c7 | 4790 | err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); |
3f518509 | 4791 | if (err) { |
ce2a27c7 | 4792 | netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); |
3f518509 MW |
4793 | return err; |
4794 | } | |
4795 | err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); | |
4796 | if (err) { | |
4797 | netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); | |
4798 | return err; | |
4799 | } | |
4800 | err = mvpp2_prs_def_flow(port); | |
4801 | if (err) { | |
4802 | netdev_err(dev, "mvpp2_prs_def_flow failed\n"); | |
4803 | return err; | |
4804 | } | |
4805 | ||
4806 | /* Allocate the Rx/Tx queues */ | |
4807 | err = mvpp2_setup_rxqs(port); | |
4808 | if (err) { | |
4809 | netdev_err(port->dev, "cannot allocate Rx queues\n"); | |
4810 | return err; | |
4811 | } | |
4812 | ||
4813 | err = mvpp2_setup_txqs(port); | |
4814 | if (err) { | |
4815 | netdev_err(port->dev, "cannot allocate Tx queues\n"); | |
4816 | goto err_cleanup_rxqs; | |
4817 | } | |
4818 | ||
591f4cfa | 4819 | err = mvpp2_irqs_init(port); |
3f518509 | 4820 | if (err) { |
591f4cfa | 4821 | netdev_err(port->dev, "cannot init IRQs\n"); |
3f518509 MW |
4822 | goto err_cleanup_txqs; |
4823 | } | |
4824 | ||
dfce1bab MW |
4825 | if (port->phylink) { |
4826 | err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0); | |
4bb04326 AT |
4827 | if (err) { |
4828 | netdev_err(port->dev, "could not attach PHY (%d)\n", | |
4829 | err); | |
4830 | goto err_free_irq; | |
4831 | } | |
4832 | ||
4833 | valid = true; | |
4834 | } | |
4835 | ||
f704177e | 4836 | if (priv->hw_version >= MVPP22 && port->port_irq) { |
89141972 | 4837 | err = request_irq(port->port_irq, mvpp2_port_isr, 0, |
fd3651b2 AT |
4838 | dev->name, port); |
4839 | if (err) { | |
89141972 RK |
4840 | netdev_err(port->dev, |
4841 | "cannot request port link/ptp IRQ %d\n", | |
4842 | port->port_irq); | |
fd3651b2 AT |
4843 | goto err_free_irq; |
4844 | } | |
4845 | ||
4846 | mvpp22_gop_setup_irq(port); | |
fd3651b2 | 4847 | |
4bb04326 AT |
4848 | /* In default link is down */ |
4849 | netif_carrier_off(port->dev); | |
3f518509 | 4850 | |
4bb04326 AT |
4851 | valid = true; |
4852 | } else { | |
89141972 | 4853 | port->port_irq = 0; |
4bb04326 AT |
4854 | } |
4855 | ||
4856 | if (!valid) { | |
4857 | netdev_err(port->dev, | |
4858 | "invalid configuration: no dt or link IRQ"); | |
82a10dc7 | 4859 | err = -ENOENT; |
4bb04326 AT |
4860 | goto err_free_irq; |
4861 | } | |
3f518509 MW |
4862 | |
4863 | /* Unmask interrupts on all CPUs */ | |
4864 | on_each_cpu(mvpp2_interrupts_unmask, port, 1); | |
213f428f | 4865 | mvpp2_shared_interrupt_mask_unmask(port, false); |
3f518509 MW |
4866 | |
4867 | mvpp2_start_dev(port); | |
4868 | ||
118d6298 | 4869 | /* Start hardware statistics gathering */ |
e5c500eb | 4870 | queue_delayed_work(priv->stats_queue, &port->stats_work, |
118d6298 MR |
4871 | MVPP2_MIB_COUNTERS_STATS_DELAY); |
4872 | ||
3f518509 MW |
4873 | return 0; |
4874 | ||
4875 | err_free_irq: | |
591f4cfa | 4876 | mvpp2_irqs_deinit(port); |
3f518509 MW |
4877 | err_cleanup_txqs: |
4878 | mvpp2_cleanup_txqs(port); | |
4879 | err_cleanup_rxqs: | |
4880 | mvpp2_cleanup_rxqs(port); | |
4881 | return err; | |
4882 | } | |
4883 | ||
4884 | static int mvpp2_stop(struct net_device *dev) | |
4885 | { | |
4886 | struct mvpp2_port *port = netdev_priv(dev); | |
edc660fa | 4887 | struct mvpp2_port_pcpu *port_pcpu; |
074c74df | 4888 | unsigned int thread; |
3f518509 MW |
4889 | |
4890 | mvpp2_stop_dev(port); | |
3f518509 | 4891 | |
e531f767 | 4892 | /* Mask interrupts on all threads */ |
3f518509 | 4893 | on_each_cpu(mvpp2_interrupts_mask, port, 1); |
213f428f | 4894 | mvpp2_shared_interrupt_mask_unmask(port, true); |
3f518509 | 4895 | |
4bb04326 AT |
4896 | if (port->phylink) |
4897 | phylink_disconnect_phy(port->phylink); | |
89141972 RK |
4898 | if (port->port_irq) |
4899 | free_irq(port->port_irq, port); | |
fd3651b2 | 4900 | |
591f4cfa | 4901 | mvpp2_irqs_deinit(port); |
213f428f | 4902 | if (!port->has_tx_irqs) { |
e531f767 | 4903 | for (thread = 0; thread < port->priv->nthreads; thread++) { |
074c74df | 4904 | port_pcpu = per_cpu_ptr(port->pcpu, thread); |
edc660fa | 4905 | |
213f428f TP |
4906 | hrtimer_cancel(&port_pcpu->tx_done_timer); |
4907 | port_pcpu->timer_scheduled = false; | |
213f428f | 4908 | } |
edc660fa | 4909 | } |
3f518509 MW |
4910 | mvpp2_cleanup_rxqs(port); |
4911 | mvpp2_cleanup_txqs(port); | |
4912 | ||
e5c500eb | 4913 | cancel_delayed_work_sync(&port->stats_work); |
118d6298 | 4914 | |
1f69afce AT |
4915 | mvpp2_mac_reset_assert(port); |
4916 | mvpp22_pcs_reset_assert(port); | |
4917 | ||
3f518509 MW |
4918 | return 0; |
4919 | } | |
4920 | ||
10fea26c MC |
4921 | static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, |
4922 | struct netdev_hw_addr_list *list) | |
3f518509 | 4923 | { |
3f518509 | 4924 | struct netdev_hw_addr *ha; |
10fea26c MC |
4925 | int ret; |
4926 | ||
4927 | netdev_hw_addr_list_for_each(ha, list) { | |
4928 | ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); | |
4929 | if (ret) | |
4930 | return ret; | |
3f518509 | 4931 | } |
56beda3d | 4932 | |
10fea26c MC |
4933 | return 0; |
4934 | } | |
4935 | ||
4936 | static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) | |
4937 | { | |
4938 | if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) | |
56beda3d | 4939 | mvpp2_prs_vid_enable_filtering(port); |
10fea26c MC |
4940 | else |
4941 | mvpp2_prs_vid_disable_filtering(port); | |
4942 | ||
4943 | mvpp2_prs_mac_promisc_set(port->priv, port->id, | |
4944 | MVPP2_PRS_L2_UNI_CAST, enable); | |
4945 | ||
4946 | mvpp2_prs_mac_promisc_set(port->priv, port->id, | |
4947 | MVPP2_PRS_L2_MULTI_CAST, enable); | |
4948 | } | |
4949 | ||
4950 | static void mvpp2_set_rx_mode(struct net_device *dev) | |
4951 | { | |
4952 | struct mvpp2_port *port = netdev_priv(dev); | |
4953 | ||
4954 | /* Clear the whole UC and MC list */ | |
4955 | mvpp2_prs_mac_del_all(port); | |
4956 | ||
4957 | if (dev->flags & IFF_PROMISC) { | |
4958 | mvpp2_set_rx_promisc(port, true); | |
4959 | return; | |
4960 | } | |
4961 | ||
4962 | mvpp2_set_rx_promisc(port, false); | |
4963 | ||
4964 | if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || | |
4965 | mvpp2_prs_mac_da_accept_list(port, &dev->uc)) | |
4966 | mvpp2_prs_mac_promisc_set(port->priv, port->id, | |
4967 | MVPP2_PRS_L2_UNI_CAST, true); | |
4968 | ||
4969 | if (dev->flags & IFF_ALLMULTI) { | |
4970 | mvpp2_prs_mac_promisc_set(port->priv, port->id, | |
4971 | MVPP2_PRS_L2_MULTI_CAST, true); | |
4972 | return; | |
4973 | } | |
4974 | ||
4975 | if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || | |
4976 | mvpp2_prs_mac_da_accept_list(port, &dev->mc)) | |
4977 | mvpp2_prs_mac_promisc_set(port->priv, port->id, | |
4978 | MVPP2_PRS_L2_MULTI_CAST, true); | |
3f518509 MW |
4979 | } |
4980 | ||
4981 | static int mvpp2_set_mac_address(struct net_device *dev, void *p) | |
4982 | { | |
3f518509 MW |
4983 | const struct sockaddr *addr = p; |
4984 | int err; | |
4985 | ||
5b0ab2f4 YM |
4986 | if (!is_valid_ether_addr(addr->sa_data)) |
4987 | return -EADDRNOTAVAIL; | |
3f518509 MW |
4988 | |
4989 | err = mvpp2_prs_update_mac_da(dev, addr->sa_data); | |
5b0ab2f4 YM |
4990 | if (err) { |
4991 | /* Reconfigure parser accept the original MAC address */ | |
4992 | mvpp2_prs_update_mac_da(dev, dev->dev_addr); | |
4993 | netdev_err(dev, "failed to change MAC address\n"); | |
4994 | } | |
3f518509 MW |
4995 | return err; |
4996 | } | |
4997 | ||
7d04b0b1 MC |
4998 | /* Shut down all the ports, reconfigure the pools as percpu or shared, |
4999 | * then bring up again all ports. | |
5000 | */ | |
5001 | static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) | |
5002 | { | |
3a616b92 | 5003 | bool change_percpu = (percpu != priv->percpu_pools); |
7d04b0b1 MC |
5004 | int numbufs = MVPP2_BM_POOLS_NUM, i; |
5005 | struct mvpp2_port *port = NULL; | |
5006 | bool status[MVPP2_MAX_PORTS]; | |
5007 | ||
5008 | for (i = 0; i < priv->port_count; i++) { | |
5009 | port = priv->port_list[i]; | |
5010 | status[i] = netif_running(port->dev); | |
5011 | if (status[i]) | |
5012 | mvpp2_stop(port->dev); | |
5013 | } | |
5014 | ||
5015 | /* nrxqs is the same for all ports */ | |
5016 | if (priv->percpu_pools) | |
5017 | numbufs = port->nrxqs * 2; | |
5018 | ||
3a616b92 SC |
5019 | if (change_percpu) |
5020 | mvpp2_bm_pool_update_priv_fc(priv, false); | |
5021 | ||
7d04b0b1 MC |
5022 | for (i = 0; i < numbufs; i++) |
5023 | mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]); | |
5024 | ||
5025 | devm_kfree(port->dev->dev.parent, priv->bm_pools); | |
5026 | priv->percpu_pools = percpu; | |
5027 | mvpp2_bm_init(port->dev->dev.parent, priv); | |
5028 | ||
5029 | for (i = 0; i < priv->port_count; i++) { | |
5030 | port = priv->port_list[i]; | |
481e96fc MC |
5031 | if (percpu && port->ntxqs >= num_possible_cpus() * 2) |
5032 | xdp_set_features_flag(port->dev, | |
5033 | NETDEV_XDP_ACT_BASIC | | |
5034 | NETDEV_XDP_ACT_REDIRECT | | |
5035 | NETDEV_XDP_ACT_NDO_XMIT); | |
5036 | else | |
5037 | xdp_clear_features_flag(port->dev); | |
5038 | ||
7d04b0b1 MC |
5039 | mvpp2_swf_bm_pool_init(port); |
5040 | if (status[i]) | |
5041 | mvpp2_open(port->dev); | |
5042 | } | |
5043 | ||
3a616b92 SC |
5044 | if (change_percpu) |
5045 | mvpp2_bm_pool_update_priv_fc(priv, true); | |
5046 | ||
7d04b0b1 MC |
5047 | return 0; |
5048 | } | |
5049 | ||
3f518509 MW |
5050 | static int mvpp2_change_mtu(struct net_device *dev, int mtu) |
5051 | { | |
5052 | struct mvpp2_port *port = netdev_priv(dev); | |
230bd958 | 5053 | bool running = netif_running(dev); |
7d04b0b1 | 5054 | struct mvpp2 *priv = port->priv; |
3f518509 MW |
5055 | int err; |
5056 | ||
5777987e JW |
5057 | if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { |
5058 | netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, | |
5059 | ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); | |
5060 | mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); | |
3f518509 MW |
5061 | } |
5062 | ||
7b1b62bc MB |
5063 | if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) { |
5064 | netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n", | |
5065 | mtu, (int)MVPP2_MAX_RX_BUF_SIZE); | |
5066 | return -EINVAL; | |
5067 | } | |
5068 | ||
7d04b0b1 MC |
5069 | if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { |
5070 | if (priv->percpu_pools) { | |
5071 | netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); | |
5072 | mvpp2_bm_switch_buffers(priv, false); | |
5073 | } | |
5074 | } else { | |
5075 | bool jumbo = false; | |
5076 | int i; | |
5077 | ||
5078 | for (i = 0; i < priv->port_count; i++) | |
5079 | if (priv->port_list[i] != port && | |
5080 | MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) > | |
5081 | MVPP2_BM_LONG_PKT_SIZE) { | |
5082 | jumbo = true; | |
5083 | break; | |
5084 | } | |
5085 | ||
5086 | /* No port is using jumbo frames */ | |
5087 | if (!jumbo) { | |
5088 | dev_info(port->dev->dev.parent, | |
5089 | "all ports have a low MTU, switching to per-cpu buffers"); | |
5090 | mvpp2_bm_switch_buffers(priv, true); | |
5091 | } | |
5092 | } | |
5093 | ||
230bd958 MC |
5094 | if (running) |
5095 | mvpp2_stop_dev(port); | |
3f518509 MW |
5096 | |
5097 | err = mvpp2_bm_update_mtu(dev, mtu); | |
230bd958 MC |
5098 | if (err) { |
5099 | netdev_err(dev, "failed to change MTU\n"); | |
5100 | /* Reconfigure BM to the original MTU */ | |
5101 | mvpp2_bm_update_mtu(dev, dev->mtu); | |
5102 | } else { | |
3f518509 | 5103 | port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); |
3f518509 MW |
5104 | } |
5105 | ||
230bd958 MC |
5106 | if (running) { |
5107 | mvpp2_start_dev(port); | |
5108 | mvpp2_egress_enable(port); | |
5109 | mvpp2_ingress_enable(port); | |
5110 | } | |
3f518509 | 5111 | |
3f518509 MW |
5112 | return err; |
5113 | } | |
5114 | ||
c2d6fe61 MC |
5115 | static int mvpp2_check_pagepool_dma(struct mvpp2_port *port) |
5116 | { | |
5117 | enum dma_data_direction dma_dir = DMA_FROM_DEVICE; | |
5118 | struct mvpp2 *priv = port->priv; | |
5119 | int err = -1, i; | |
5120 | ||
5121 | if (!priv->percpu_pools) | |
5122 | return err; | |
5123 | ||
4e48978c | 5124 | if (!priv->page_pool[0]) |
c2d6fe61 MC |
5125 | return -ENOMEM; |
5126 | ||
5127 | for (i = 0; i < priv->port_count; i++) { | |
5128 | port = priv->port_list[i]; | |
5129 | if (port->xdp_prog) { | |
5130 | dma_dir = DMA_BIDIRECTIONAL; | |
5131 | break; | |
5132 | } | |
5133 | } | |
5134 | ||
5135 | /* All pools are equal in terms of DMA direction */ | |
5136 | if (priv->page_pool[0]->p.dma_dir != dma_dir) | |
5137 | err = mvpp2_bm_switch_buffers(priv, true); | |
5138 | ||
5139 | return err; | |
5140 | } | |
5141 | ||
bc1f4470 | 5142 | static void |
3f518509 MW |
5143 | mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) |
5144 | { | |
5145 | struct mvpp2_port *port = netdev_priv(dev); | |
5146 | unsigned int start; | |
850623b3 | 5147 | unsigned int cpu; |
3f518509 MW |
5148 | |
5149 | for_each_possible_cpu(cpu) { | |
5150 | struct mvpp2_pcpu_stats *cpu_stats; | |
5151 | u64 rx_packets; | |
5152 | u64 rx_bytes; | |
5153 | u64 tx_packets; | |
5154 | u64 tx_bytes; | |
5155 | ||
5156 | cpu_stats = per_cpu_ptr(port->stats, cpu); | |
5157 | do { | |
068c38ad | 5158 | start = u64_stats_fetch_begin(&cpu_stats->syncp); |
3f518509 MW |
5159 | rx_packets = cpu_stats->rx_packets; |
5160 | rx_bytes = cpu_stats->rx_bytes; | |
5161 | tx_packets = cpu_stats->tx_packets; | |
5162 | tx_bytes = cpu_stats->tx_bytes; | |
068c38ad | 5163 | } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); |
3f518509 MW |
5164 | |
5165 | stats->rx_packets += rx_packets; | |
5166 | stats->rx_bytes += rx_bytes; | |
5167 | stats->tx_packets += tx_packets; | |
5168 | stats->tx_bytes += tx_bytes; | |
5169 | } | |
5170 | ||
5171 | stats->rx_errors = dev->stats.rx_errors; | |
5172 | stats->rx_dropped = dev->stats.rx_dropped; | |
5173 | stats->tx_dropped = dev->stats.tx_dropped; | |
3f518509 MW |
5174 | } |
5175 | ||
3c9ff6eb VO |
5176 | static int mvpp2_hwtstamp_set(struct net_device *dev, |
5177 | struct kernel_hwtstamp_config *config, | |
5178 | struct netlink_ext_ack *extack) | |
ce3497e2 | 5179 | { |
3c9ff6eb | 5180 | struct mvpp2_port *port = netdev_priv(dev); |
ce3497e2 | 5181 | void __iomem *ptp; |
f5015a59 | 5182 | u32 gcr, int_mask; |
ce3497e2 | 5183 | |
3c9ff6eb VO |
5184 | if (!port->hwtstamp) |
5185 | return -EOPNOTSUPP; | |
ce3497e2 | 5186 | |
3c9ff6eb VO |
5187 | if (config->tx_type != HWTSTAMP_TX_OFF && |
5188 | config->tx_type != HWTSTAMP_TX_ON) | |
ce3497e2 RK |
5189 | return -ERANGE; |
5190 | ||
5191 | ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id); | |
f5015a59 RK |
5192 | |
5193 | int_mask = gcr = 0; | |
3c9ff6eb | 5194 | if (config->tx_type != HWTSTAMP_TX_OFF) { |
f5015a59 RK |
5195 | gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET; |
5196 | int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 | | |
5197 | MVPP22_PTP_INT_MASK_QUEUE0; | |
5198 | } | |
5199 | ||
5200 | /* It seems we must also release the TX reset when enabling the TSU */ | |
3c9ff6eb | 5201 | if (config->rx_filter != HWTSTAMP_FILTER_NONE) |
f5015a59 RK |
5202 | gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET | |
5203 | MVPP22_PTP_GCR_TX_RESET; | |
5204 | ||
5205 | if (gcr & MVPP22_PTP_GCR_TSU_ENABLE) | |
5206 | mvpp22_tai_start(port->priv->tai); | |
5207 | ||
3c9ff6eb VO |
5208 | if (config->rx_filter != HWTSTAMP_FILTER_NONE) { |
5209 | config->rx_filter = HWTSTAMP_FILTER_ALL; | |
ce3497e2 RK |
5210 | mvpp2_modify(ptp + MVPP22_PTP_GCR, |
5211 | MVPP22_PTP_GCR_RX_RESET | | |
5212 | MVPP22_PTP_GCR_TX_RESET | | |
f5015a59 | 5213 | MVPP22_PTP_GCR_TSU_ENABLE, gcr); |
ce3497e2 RK |
5214 | port->rx_hwtstamp = true; |
5215 | } else { | |
5216 | port->rx_hwtstamp = false; | |
5217 | mvpp2_modify(ptp + MVPP22_PTP_GCR, | |
5218 | MVPP22_PTP_GCR_RX_RESET | | |
5219 | MVPP22_PTP_GCR_TX_RESET | | |
f5015a59 | 5220 | MVPP22_PTP_GCR_TSU_ENABLE, gcr); |
ce3497e2 RK |
5221 | } |
5222 | ||
f5015a59 RK |
5223 | mvpp2_modify(ptp + MVPP22_PTP_INT_MASK, |
5224 | MVPP22_PTP_INT_MASK_QUEUE1 | | |
5225 | MVPP22_PTP_INT_MASK_QUEUE0, int_mask); | |
5226 | ||
5227 | if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE)) | |
5228 | mvpp22_tai_stop(port->priv->tai); | |
5229 | ||
3c9ff6eb | 5230 | port->tx_hwtstamp_type = config->tx_type; |
ce3497e2 RK |
5231 | |
5232 | return 0; | |
5233 | } | |
5234 | ||
3c9ff6eb VO |
5235 | static int mvpp2_hwtstamp_get(struct net_device *dev, |
5236 | struct kernel_hwtstamp_config *config) | |
ce3497e2 | 5237 | { |
3c9ff6eb | 5238 | struct mvpp2_port *port = netdev_priv(dev); |
ce3497e2 | 5239 | |
3c9ff6eb VO |
5240 | if (!port->hwtstamp) |
5241 | return -EOPNOTSUPP; | |
ce3497e2 | 5242 | |
3c9ff6eb VO |
5243 | config->tx_type = port->tx_hwtstamp_type; |
5244 | config->rx_filter = port->rx_hwtstamp ? HWTSTAMP_FILTER_ALL : | |
5245 | HWTSTAMP_FILTER_NONE; | |
ce3497e2 RK |
5246 | |
5247 | return 0; | |
5248 | } | |
5249 | ||
5250 | static int mvpp2_ethtool_get_ts_info(struct net_device *dev, | |
2111375b | 5251 | struct kernel_ethtool_ts_info *info) |
ce3497e2 RK |
5252 | { |
5253 | struct mvpp2_port *port = netdev_priv(dev); | |
5254 | ||
5255 | if (!port->hwtstamp) | |
5256 | return -EOPNOTSUPP; | |
5257 | ||
5258 | info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai); | |
5259 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | | |
f5015a59 | 5260 | SOF_TIMESTAMPING_TX_HARDWARE | |
ce3497e2 RK |
5261 | SOF_TIMESTAMPING_RX_HARDWARE | |
5262 | SOF_TIMESTAMPING_RAW_HARDWARE; | |
f5015a59 RK |
5263 | info->tx_types = BIT(HWTSTAMP_TX_OFF) | |
5264 | BIT(HWTSTAMP_TX_ON); | |
ce3497e2 RK |
5265 | info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | |
5266 | BIT(HWTSTAMP_FILTER_ALL); | |
5267 | ||
5268 | return 0; | |
5269 | } | |
5270 | ||
bd695a5f TP |
5271 | static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
5272 | { | |
4bb04326 | 5273 | struct mvpp2_port *port = netdev_priv(dev); |
bd695a5f | 5274 | |
4bb04326 | 5275 | if (!port->phylink) |
bd695a5f TP |
5276 | return -ENOTSUPP; |
5277 | ||
4bb04326 | 5278 | return phylink_mii_ioctl(port->phylink, ifr, cmd); |
bd695a5f TP |
5279 | } |
5280 | ||
56beda3d MC |
5281 | static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) |
5282 | { | |
5283 | struct mvpp2_port *port = netdev_priv(dev); | |
5284 | int ret; | |
5285 | ||
5286 | ret = mvpp2_prs_vid_entry_add(port, vid); | |
5287 | if (ret) | |
5288 | netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", | |
5289 | MVPP2_PRS_VLAN_FILT_MAX - 1); | |
5290 | return ret; | |
5291 | } | |
5292 | ||
5293 | static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) | |
5294 | { | |
5295 | struct mvpp2_port *port = netdev_priv(dev); | |
5296 | ||
5297 | mvpp2_prs_vid_entry_remove(port, vid); | |
5298 | return 0; | |
5299 | } | |
5300 | ||
5301 | static int mvpp2_set_features(struct net_device *dev, | |
5302 | netdev_features_t features) | |
5303 | { | |
5304 | netdev_features_t changed = dev->features ^ features; | |
5305 | struct mvpp2_port *port = netdev_priv(dev); | |
5306 | ||
5307 | if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { | |
5308 | if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { | |
5309 | mvpp2_prs_vid_enable_filtering(port); | |
5310 | } else { | |
5311 | /* Invalidate all registered VID filters for this | |
5312 | * port | |
5313 | */ | |
5314 | mvpp2_prs_vid_remove_all(port); | |
5315 | ||
5316 | mvpp2_prs_vid_disable_filtering(port); | |
5317 | } | |
5318 | } | |
5319 | ||
d33ec452 MC |
5320 | if (changed & NETIF_F_RXHASH) { |
5321 | if (features & NETIF_F_RXHASH) | |
6310f77d | 5322 | mvpp22_port_rss_enable(port); |
d33ec452 | 5323 | else |
6310f77d | 5324 | mvpp22_port_rss_disable(port); |
d33ec452 MC |
5325 | } |
5326 | ||
56beda3d MC |
5327 | return 0; |
5328 | } | |
5329 | ||
07dd0a7a MC |
5330 | static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) |
5331 | { | |
5332 | struct bpf_prog *prog = bpf->prog, *old_prog; | |
5333 | bool running = netif_running(port->dev); | |
5334 | bool reset = !prog != !port->xdp_prog; | |
5335 | ||
7b1b62bc MB |
5336 | if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) { |
5337 | NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); | |
07dd0a7a MC |
5338 | return -EOPNOTSUPP; |
5339 | } | |
5340 | ||
5341 | if (!port->priv->percpu_pools) { | |
5342 | NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP"); | |
5343 | return -EOPNOTSUPP; | |
5344 | } | |
5345 | ||
c2d6fe61 MC |
5346 | if (port->ntxqs < num_possible_cpus() * 2) { |
5347 | NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU"); | |
5348 | return -EOPNOTSUPP; | |
07dd0a7a MC |
5349 | } |
5350 | ||
c2d6fe61 MC |
5351 | /* device is up and bpf is added/removed, must setup the RX queues */ |
5352 | if (running && reset) | |
5353 | mvpp2_stop(port->dev); | |
5354 | ||
07dd0a7a MC |
5355 | old_prog = xchg(&port->xdp_prog, prog); |
5356 | if (old_prog) | |
5357 | bpf_prog_put(old_prog); | |
5358 | ||
5359 | /* bpf is just replaced, RXQ and MTU are already setup */ | |
5360 | if (!reset) | |
5361 | return 0; | |
5362 | ||
5363 | /* device was up, restore the link */ | |
c2d6fe61 MC |
5364 | if (running) |
5365 | mvpp2_open(port->dev); | |
07dd0a7a | 5366 | |
c2d6fe61 MC |
5367 | /* Check Page Pool DMA Direction */ |
5368 | mvpp2_check_pagepool_dma(port); | |
07dd0a7a MC |
5369 | |
5370 | return 0; | |
5371 | } | |
5372 | ||
5373 | static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) | |
5374 | { | |
5375 | struct mvpp2_port *port = netdev_priv(dev); | |
5376 | ||
5377 | switch (xdp->command) { | |
5378 | case XDP_SETUP_PROG: | |
5379 | return mvpp2_xdp_setup(port, xdp); | |
07dd0a7a MC |
5380 | default: |
5381 | return -EINVAL; | |
5382 | } | |
5383 | } | |
5384 | ||
3f518509 MW |
5385 | /* Ethtool methods */ |
5386 | ||
4bb04326 AT |
5387 | static int mvpp2_ethtool_nway_reset(struct net_device *dev) |
5388 | { | |
5389 | struct mvpp2_port *port = netdev_priv(dev); | |
5390 | ||
5391 | if (!port->phylink) | |
5392 | return -ENOTSUPP; | |
5393 | ||
5394 | return phylink_ethtool_nway_reset(port->phylink); | |
5395 | } | |
5396 | ||
3f518509 | 5397 | /* Set interrupt coalescing for ethtools */ |
f3ccfda1 YM |
5398 | static int |
5399 | mvpp2_ethtool_set_coalesce(struct net_device *dev, | |
5400 | struct ethtool_coalesce *c, | |
5401 | struct kernel_ethtool_coalesce *kernel_coal, | |
5402 | struct netlink_ext_ack *extack) | |
3f518509 MW |
5403 | { |
5404 | struct mvpp2_port *port = netdev_priv(dev); | |
5405 | int queue; | |
5406 | ||
09f83975 | 5407 | for (queue = 0; queue < port->nrxqs; queue++) { |
3f518509 MW |
5408 | struct mvpp2_rx_queue *rxq = port->rxqs[queue]; |
5409 | ||
5410 | rxq->time_coal = c->rx_coalesce_usecs; | |
5411 | rxq->pkts_coal = c->rx_max_coalesced_frames; | |
d63f9e41 TP |
5412 | mvpp2_rx_pkts_coal_set(port, rxq); |
5413 | mvpp2_rx_time_coal_set(port, rxq); | |
3f518509 MW |
5414 | } |
5415 | ||
213f428f TP |
5416 | if (port->has_tx_irqs) { |
5417 | port->tx_time_coal = c->tx_coalesce_usecs; | |
5418 | mvpp2_tx_time_coal_set(port); | |
5419 | } | |
5420 | ||
09f83975 | 5421 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
5422 | struct mvpp2_tx_queue *txq = port->txqs[queue]; |
5423 | ||
5424 | txq->done_pkts_coal = c->tx_max_coalesced_frames; | |
213f428f TP |
5425 | |
5426 | if (port->has_tx_irqs) | |
5427 | mvpp2_tx_pkts_coal_set(port, txq); | |
3f518509 MW |
5428 | } |
5429 | ||
3f518509 MW |
5430 | return 0; |
5431 | } | |
5432 | ||
5433 | /* get coalescing for ethtools */ | |
f3ccfda1 YM |
5434 | static int |
5435 | mvpp2_ethtool_get_coalesce(struct net_device *dev, | |
5436 | struct ethtool_coalesce *c, | |
5437 | struct kernel_ethtool_coalesce *kernel_coal, | |
5438 | struct netlink_ext_ack *extack) | |
3f518509 MW |
5439 | { |
5440 | struct mvpp2_port *port = netdev_priv(dev); | |
5441 | ||
385c284f AT |
5442 | c->rx_coalesce_usecs = port->rxqs[0]->time_coal; |
5443 | c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; | |
5444 | c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; | |
24b28ccb | 5445 | c->tx_coalesce_usecs = port->tx_time_coal; |
3f518509 MW |
5446 | return 0; |
5447 | } | |
5448 | ||
5449 | static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, | |
5450 | struct ethtool_drvinfo *drvinfo) | |
5451 | { | |
f029c781 | 5452 | strscpy(drvinfo->driver, MVPP2_DRIVER_NAME, |
3f518509 | 5453 | sizeof(drvinfo->driver)); |
f029c781 | 5454 | strscpy(drvinfo->version, MVPP2_DRIVER_VERSION, |
3f518509 | 5455 | sizeof(drvinfo->version)); |
f029c781 | 5456 | strscpy(drvinfo->bus_info, dev_name(&dev->dev), |
3f518509 MW |
5457 | sizeof(drvinfo->bus_info)); |
5458 | } | |
5459 | ||
74624944 HC |
5460 | static void |
5461 | mvpp2_ethtool_get_ringparam(struct net_device *dev, | |
5462 | struct ethtool_ringparam *ring, | |
5463 | struct kernel_ethtool_ringparam *kernel_ring, | |
5464 | struct netlink_ext_ack *extack) | |
3f518509 MW |
5465 | { |
5466 | struct mvpp2_port *port = netdev_priv(dev); | |
5467 | ||
7cf87e4a YM |
5468 | ring->rx_max_pending = MVPP2_MAX_RXD_MAX; |
5469 | ring->tx_max_pending = MVPP2_MAX_TXD_MAX; | |
3f518509 MW |
5470 | ring->rx_pending = port->rx_ring_size; |
5471 | ring->tx_pending = port->tx_ring_size; | |
5472 | } | |
5473 | ||
74624944 HC |
5474 | static int |
5475 | mvpp2_ethtool_set_ringparam(struct net_device *dev, | |
5476 | struct ethtool_ringparam *ring, | |
5477 | struct kernel_ethtool_ringparam *kernel_ring, | |
5478 | struct netlink_ext_ack *extack) | |
3f518509 MW |
5479 | { |
5480 | struct mvpp2_port *port = netdev_priv(dev); | |
5481 | u16 prev_rx_ring_size = port->rx_ring_size; | |
5482 | u16 prev_tx_ring_size = port->tx_ring_size; | |
5483 | int err; | |
5484 | ||
5485 | err = mvpp2_check_ringparam_valid(dev, ring); | |
5486 | if (err) | |
5487 | return err; | |
5488 | ||
5489 | if (!netif_running(dev)) { | |
5490 | port->rx_ring_size = ring->rx_pending; | |
5491 | port->tx_ring_size = ring->tx_pending; | |
5492 | return 0; | |
5493 | } | |
5494 | ||
5495 | /* The interface is running, so we have to force a | |
5496 | * reallocation of the queues | |
5497 | */ | |
5498 | mvpp2_stop_dev(port); | |
5499 | mvpp2_cleanup_rxqs(port); | |
5500 | mvpp2_cleanup_txqs(port); | |
5501 | ||
5502 | port->rx_ring_size = ring->rx_pending; | |
5503 | port->tx_ring_size = ring->tx_pending; | |
5504 | ||
5505 | err = mvpp2_setup_rxqs(port); | |
5506 | if (err) { | |
5507 | /* Reallocate Rx queues with the original ring size */ | |
5508 | port->rx_ring_size = prev_rx_ring_size; | |
5509 | ring->rx_pending = prev_rx_ring_size; | |
5510 | err = mvpp2_setup_rxqs(port); | |
5511 | if (err) | |
5512 | goto err_out; | |
5513 | } | |
5514 | err = mvpp2_setup_txqs(port); | |
5515 | if (err) { | |
5516 | /* Reallocate Tx queues with the original ring size */ | |
5517 | port->tx_ring_size = prev_tx_ring_size; | |
5518 | ring->tx_pending = prev_tx_ring_size; | |
5519 | err = mvpp2_setup_txqs(port); | |
5520 | if (err) | |
5521 | goto err_clean_rxqs; | |
5522 | } | |
5523 | ||
5524 | mvpp2_start_dev(port); | |
5525 | mvpp2_egress_enable(port); | |
5526 | mvpp2_ingress_enable(port); | |
5527 | ||
5528 | return 0; | |
5529 | ||
5530 | err_clean_rxqs: | |
5531 | mvpp2_cleanup_rxqs(port); | |
5532 | err_out: | |
dfd4240a | 5533 | netdev_err(dev, "failed to change ring parameters"); |
3f518509 MW |
5534 | return err; |
5535 | } | |
5536 | ||
4bb04326 AT |
5537 | static void mvpp2_ethtool_get_pause_param(struct net_device *dev, |
5538 | struct ethtool_pauseparam *pause) | |
5539 | { | |
5540 | struct mvpp2_port *port = netdev_priv(dev); | |
5541 | ||
5542 | if (!port->phylink) | |
5543 | return; | |
5544 | ||
5545 | phylink_ethtool_get_pauseparam(port->phylink, pause); | |
5546 | } | |
5547 | ||
5548 | static int mvpp2_ethtool_set_pause_param(struct net_device *dev, | |
5549 | struct ethtool_pauseparam *pause) | |
5550 | { | |
5551 | struct mvpp2_port *port = netdev_priv(dev); | |
5552 | ||
5553 | if (!port->phylink) | |
5554 | return -ENOTSUPP; | |
5555 | ||
5556 | return phylink_ethtool_set_pauseparam(port->phylink, pause); | |
5557 | } | |
5558 | ||
5559 | static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, | |
5560 | struct ethtool_link_ksettings *cmd) | |
5561 | { | |
5562 | struct mvpp2_port *port = netdev_priv(dev); | |
5563 | ||
5564 | if (!port->phylink) | |
5565 | return -ENOTSUPP; | |
5566 | ||
5567 | return phylink_ethtool_ksettings_get(port->phylink, cmd); | |
5568 | } | |
5569 | ||
5570 | static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, | |
5571 | const struct ethtool_link_ksettings *cmd) | |
5572 | { | |
5573 | struct mvpp2_port *port = netdev_priv(dev); | |
5574 | ||
5575 | if (!port->phylink) | |
5576 | return -ENOTSUPP; | |
5577 | ||
5578 | return phylink_ethtool_ksettings_set(port->phylink, cmd); | |
5579 | } | |
5580 | ||
8179642b AT |
5581 | static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, |
5582 | struct ethtool_rxnfc *info, u32 *rules) | |
5583 | { | |
5584 | struct mvpp2_port *port = netdev_priv(dev); | |
90b509b3 | 5585 | int ret = 0, i, loc = 0; |
8179642b | 5586 | |
0a8a8000 | 5587 | if (!mvpp22_rss_is_supported(port)) |
8179642b AT |
5588 | return -EOPNOTSUPP; |
5589 | ||
5590 | switch (info->cmd) { | |
436d4fdb MC |
5591 | case ETHTOOL_GRXFH: |
5592 | ret = mvpp2_ethtool_rxfh_get(port, info); | |
5593 | break; | |
8179642b AT |
5594 | case ETHTOOL_GRXRINGS: |
5595 | info->data = port->nrxqs; | |
5596 | break; | |
90b509b3 MC |
5597 | case ETHTOOL_GRXCLSRLCNT: |
5598 | info->rule_cnt = port->n_rfs_rules; | |
5599 | break; | |
5600 | case ETHTOOL_GRXCLSRULE: | |
5601 | ret = mvpp2_ethtool_cls_rule_get(port, info); | |
5602 | break; | |
5603 | case ETHTOOL_GRXCLSRLALL: | |
ae8e1d5e | 5604 | for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { |
51fe0a47 HH |
5605 | if (loc == info->rule_cnt) { |
5606 | ret = -EMSGSIZE; | |
5607 | break; | |
5608 | } | |
5609 | ||
90b509b3 MC |
5610 | if (port->rfs_rules[i]) |
5611 | rules[loc++] = i; | |
5612 | } | |
5613 | break; | |
8179642b AT |
5614 | default: |
5615 | return -ENOTSUPP; | |
5616 | } | |
5617 | ||
436d4fdb MC |
5618 | return ret; |
5619 | } | |
5620 | ||
5621 | static int mvpp2_ethtool_set_rxnfc(struct net_device *dev, | |
5622 | struct ethtool_rxnfc *info) | |
5623 | { | |
5624 | struct mvpp2_port *port = netdev_priv(dev); | |
5625 | int ret = 0; | |
5626 | ||
0a8a8000 | 5627 | if (!mvpp22_rss_is_supported(port)) |
436d4fdb MC |
5628 | return -EOPNOTSUPP; |
5629 | ||
5630 | switch (info->cmd) { | |
5631 | case ETHTOOL_SRXFH: | |
5632 | ret = mvpp2_ethtool_rxfh_set(port, info); | |
5633 | break; | |
90b509b3 MC |
5634 | case ETHTOOL_SRXCLSRLINS: |
5635 | ret = mvpp2_ethtool_cls_rule_ins(port, info); | |
5636 | break; | |
5637 | case ETHTOOL_SRXCLSRLDEL: | |
5638 | ret = mvpp2_ethtool_cls_rule_del(port, info); | |
5639 | break; | |
436d4fdb MC |
5640 | default: |
5641 | return -EOPNOTSUPP; | |
5642 | } | |
5643 | return ret; | |
8179642b AT |
5644 | } |
5645 | ||
5646 | static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev) | |
5647 | { | |
0a8a8000 SC |
5648 | struct mvpp2_port *port = netdev_priv(dev); |
5649 | ||
5650 | return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0; | |
8179642b AT |
5651 | } |
5652 | ||
fb6e30a7 AZ |
5653 | static int mvpp2_ethtool_get_rxfh(struct net_device *dev, |
5654 | struct ethtool_rxfh_param *rxfh) | |
8179642b AT |
5655 | { |
5656 | struct mvpp2_port *port = netdev_priv(dev); | |
dcd8dbf9 | 5657 | u32 rss_context = rxfh->rss_context; |
895586d5 MC |
5658 | int ret = 0; |
5659 | ||
0a8a8000 | 5660 | if (!mvpp22_rss_is_supported(port)) |
895586d5 | 5661 | return -EOPNOTSUPP; |
39bd16df DC |
5662 | if (rss_context >= MVPP22_N_RSS_TABLES) |
5663 | return -EINVAL; | |
895586d5 | 5664 | |
fb6e30a7 | 5665 | rxfh->hfunc = ETH_RSS_HASH_CRC32; |
895586d5 | 5666 | |
fb6e30a7 AZ |
5667 | if (rxfh->indir) |
5668 | ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, | |
5669 | rxfh->indir); | |
895586d5 MC |
5670 | |
5671 | return ret; | |
5672 | } | |
5673 | ||
f203fd85 JK |
5674 | static bool mvpp2_ethtool_rxfh_okay(struct mvpp2_port *port, |
5675 | const struct ethtool_rxfh_param *rxfh) | |
895586d5 | 5676 | { |
0a8a8000 | 5677 | if (!mvpp22_rss_is_supported(port)) |
f203fd85 | 5678 | return false; |
895586d5 | 5679 | |
fb6e30a7 AZ |
5680 | if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && |
5681 | rxfh->hfunc != ETH_RSS_HASH_CRC32) | |
f203fd85 | 5682 | return false; |
895586d5 | 5683 | |
fb6e30a7 | 5684 | if (rxfh->key) |
f203fd85 JK |
5685 | return false; |
5686 | ||
5687 | return true; | |
5688 | } | |
5689 | ||
5690 | static int mvpp2_create_rxfh_context(struct net_device *dev, | |
5691 | struct ethtool_rxfh_context *ctx, | |
5692 | const struct ethtool_rxfh_param *rxfh, | |
5693 | struct netlink_ext_ack *extack) | |
5694 | { | |
5695 | struct mvpp2_port *port = netdev_priv(dev); | |
5696 | int ret = 0; | |
5697 | ||
5698 | if (!mvpp2_ethtool_rxfh_okay(port, rxfh)) | |
895586d5 MC |
5699 | return -EOPNOTSUPP; |
5700 | ||
f203fd85 | 5701 | ctx->hfunc = ETH_RSS_HASH_CRC32; |
895586d5 | 5702 | |
f203fd85 JK |
5703 | ret = mvpp22_port_rss_ctx_create(port, rxfh->rss_context); |
5704 | if (ret) | |
5705 | return ret; | |
895586d5 | 5706 | |
f203fd85 JK |
5707 | if (!rxfh->indir) |
5708 | ret = mvpp22_port_rss_ctx_indir_get(port, rxfh->rss_context, | |
5709 | ethtool_rxfh_context_indir(ctx)); | |
5710 | else | |
5711 | ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context, | |
dcd8dbf9 | 5712 | rxfh->indir); |
f203fd85 JK |
5713 | return ret; |
5714 | } | |
5715 | ||
5716 | static int mvpp2_modify_rxfh_context(struct net_device *dev, | |
5717 | struct ethtool_rxfh_context *ctx, | |
5718 | const struct ethtool_rxfh_param *rxfh, | |
5719 | struct netlink_ext_ack *extack) | |
5720 | { | |
5721 | struct mvpp2_port *port = netdev_priv(dev); | |
5722 | int ret = 0; | |
5723 | ||
5724 | if (!mvpp2_ethtool_rxfh_okay(port, rxfh)) | |
5725 | return -EOPNOTSUPP; | |
dcd8dbf9 | 5726 | |
f203fd85 JK |
5727 | if (rxfh->indir) |
5728 | ret = mvpp22_port_rss_ctx_indir_set(port, rxfh->rss_context, | |
5729 | rxfh->indir); | |
dcd8dbf9 | 5730 | return ret; |
895586d5 | 5731 | } |
dcd8dbf9 | 5732 | |
f203fd85 JK |
5733 | static int mvpp2_remove_rxfh_context(struct net_device *dev, |
5734 | struct ethtool_rxfh_context *ctx, | |
5735 | u32 rss_context, | |
5736 | struct netlink_ext_ack *extack) | |
5737 | { | |
5738 | struct mvpp2_port *port = netdev_priv(dev); | |
5739 | ||
5740 | return mvpp22_port_rss_ctx_delete(port, rss_context); | |
5741 | } | |
5742 | ||
5743 | static int mvpp2_ethtool_set_rxfh(struct net_device *dev, | |
5744 | struct ethtool_rxfh_param *rxfh, | |
5745 | struct netlink_ext_ack *extack) | |
5746 | { | |
5747 | return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack); | |
5748 | } | |
5749 | ||
b53b1478 RKO |
5750 | static int mvpp2_ethtool_get_eee(struct net_device *dev, |
5751 | struct ethtool_keee *eee) | |
5752 | { | |
5753 | struct mvpp2_port *port = netdev_priv(dev); | |
5754 | ||
5755 | if (!port->phylink) | |
5756 | return -EOPNOTSUPP; | |
5757 | ||
5758 | return phylink_ethtool_get_eee(port->phylink, eee); | |
5759 | } | |
5760 | ||
5761 | static int mvpp2_ethtool_set_eee(struct net_device *dev, | |
5762 | struct ethtool_keee *eee) | |
5763 | { | |
5764 | struct mvpp2_port *port = netdev_priv(dev); | |
5765 | ||
5766 | if (!port->phylink) | |
5767 | return -EOPNOTSUPP; | |
5768 | ||
5769 | return phylink_ethtool_set_eee(port->phylink, eee); | |
5770 | } | |
5771 | ||
3f518509 MW |
5772 | /* Device ops */ |
5773 | ||
5774 | static const struct net_device_ops mvpp2_netdev_ops = { | |
5775 | .ndo_open = mvpp2_open, | |
5776 | .ndo_stop = mvpp2_stop, | |
5777 | .ndo_start_xmit = mvpp2_tx, | |
5778 | .ndo_set_rx_mode = mvpp2_set_rx_mode, | |
5779 | .ndo_set_mac_address = mvpp2_set_mac_address, | |
5780 | .ndo_change_mtu = mvpp2_change_mtu, | |
5781 | .ndo_get_stats64 = mvpp2_get_stats64, | |
a7605370 | 5782 | .ndo_eth_ioctl = mvpp2_ioctl, |
56beda3d MC |
5783 | .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, |
5784 | .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, | |
5785 | .ndo_set_features = mvpp2_set_features, | |
07dd0a7a | 5786 | .ndo_bpf = mvpp2_xdp, |
c2d6fe61 | 5787 | .ndo_xdp_xmit = mvpp2_xdp_xmit, |
3c9ff6eb VO |
5788 | .ndo_hwtstamp_get = mvpp2_hwtstamp_get, |
5789 | .ndo_hwtstamp_set = mvpp2_hwtstamp_set, | |
3f518509 MW |
5790 | }; |
5791 | ||
5792 | static const struct ethtool_ops mvpp2_eth_tool_ops = { | |
f203fd85 | 5793 | .rxfh_max_num_contexts = MVPP22_N_RSS_TABLES, |
078db9a3 JK |
5794 | .supported_coalesce_params = ETHTOOL_COALESCE_USECS | |
5795 | ETHTOOL_COALESCE_MAX_FRAMES, | |
4bb04326 | 5796 | .nway_reset = mvpp2_ethtool_nway_reset, |
dcd3e73a | 5797 | .get_link = ethtool_op_get_link, |
ce3497e2 | 5798 | .get_ts_info = mvpp2_ethtool_get_ts_info, |
dcd3e73a AT |
5799 | .set_coalesce = mvpp2_ethtool_set_coalesce, |
5800 | .get_coalesce = mvpp2_ethtool_get_coalesce, | |
5801 | .get_drvinfo = mvpp2_ethtool_get_drvinfo, | |
5802 | .get_ringparam = mvpp2_ethtool_get_ringparam, | |
5803 | .set_ringparam = mvpp2_ethtool_set_ringparam, | |
5804 | .get_strings = mvpp2_ethtool_get_strings, | |
5805 | .get_ethtool_stats = mvpp2_ethtool_get_stats, | |
5806 | .get_sset_count = mvpp2_ethtool_get_sset_count, | |
4bb04326 AT |
5807 | .get_pauseparam = mvpp2_ethtool_get_pause_param, |
5808 | .set_pauseparam = mvpp2_ethtool_set_pause_param, | |
5809 | .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, | |
5810 | .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, | |
8179642b | 5811 | .get_rxnfc = mvpp2_ethtool_get_rxnfc, |
436d4fdb | 5812 | .set_rxnfc = mvpp2_ethtool_set_rxnfc, |
8179642b AT |
5813 | .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size, |
5814 | .get_rxfh = mvpp2_ethtool_get_rxfh, | |
5815 | .set_rxfh = mvpp2_ethtool_set_rxfh, | |
f203fd85 JK |
5816 | .create_rxfh_context = mvpp2_create_rxfh_context, |
5817 | .modify_rxfh_context = mvpp2_modify_rxfh_context, | |
5818 | .remove_rxfh_context = mvpp2_remove_rxfh_context, | |
b53b1478 RKO |
5819 | .get_eee = mvpp2_ethtool_get_eee, |
5820 | .set_eee = mvpp2_ethtool_set_eee, | |
3f518509 MW |
5821 | }; |
5822 | ||
213f428f TP |
5823 | /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that |
5824 | * had a single IRQ defined per-port. | |
5825 | */ | |
5826 | static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, | |
5827 | struct device_node *port_node) | |
591f4cfa TP |
5828 | { |
5829 | struct mvpp2_queue_vector *v = &port->qvecs[0]; | |
5830 | ||
5831 | v->first_rxq = 0; | |
5832 | v->nrxqs = port->nrxqs; | |
5833 | v->type = MVPP2_QUEUE_VECTOR_SHARED; | |
5834 | v->sw_thread_id = 0; | |
5835 | v->sw_thread_mask = *cpumask_bits(cpu_online_mask); | |
5836 | v->port = port; | |
5837 | v->irq = irq_of_parse_and_map(port_node, 0); | |
5838 | if (v->irq <= 0) | |
5839 | return -EINVAL; | |
b48b89f9 | 5840 | netif_napi_add(port->dev, &v->napi, mvpp2_poll); |
591f4cfa TP |
5841 | |
5842 | port->nqvecs = 1; | |
5843 | ||
5844 | return 0; | |
5845 | } | |
5846 | ||
213f428f TP |
5847 | static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, |
5848 | struct device_node *port_node) | |
5849 | { | |
e531f767 | 5850 | struct mvpp2 *priv = port->priv; |
213f428f TP |
5851 | struct mvpp2_queue_vector *v; |
5852 | int i, ret; | |
5853 | ||
e531f767 AT |
5854 | switch (queue_mode) { |
5855 | case MVPP2_QDIST_SINGLE_MODE: | |
5856 | port->nqvecs = priv->nthreads + 1; | |
5857 | break; | |
5858 | case MVPP2_QDIST_MULTI_MODE: | |
5859 | port->nqvecs = priv->nthreads; | |
5860 | break; | |
5861 | } | |
213f428f TP |
5862 | |
5863 | for (i = 0; i < port->nqvecs; i++) { | |
5864 | char irqname[16]; | |
5865 | ||
5866 | v = port->qvecs + i; | |
5867 | ||
5868 | v->port = port; | |
5869 | v->type = MVPP2_QUEUE_VECTOR_PRIVATE; | |
5870 | v->sw_thread_id = i; | |
5871 | v->sw_thread_mask = BIT(i); | |
5872 | ||
a9aac385 AT |
5873 | if (port->flags & MVPP2_F_DT_COMPAT) |
5874 | snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); | |
5875 | else | |
5876 | snprintf(irqname, sizeof(irqname), "hif%d", i); | |
213f428f TP |
5877 | |
5878 | if (queue_mode == MVPP2_QDIST_MULTI_MODE) { | |
3f136849 AT |
5879 | v->first_rxq = i; |
5880 | v->nrxqs = 1; | |
213f428f TP |
5881 | } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && |
5882 | i == (port->nqvecs - 1)) { | |
5883 | v->first_rxq = 0; | |
5884 | v->nrxqs = port->nrxqs; | |
5885 | v->type = MVPP2_QUEUE_VECTOR_SHARED; | |
a9aac385 AT |
5886 | |
5887 | if (port->flags & MVPP2_F_DT_COMPAT) | |
fda9e465 | 5888 | strscpy(irqname, "rx-shared", sizeof(irqname)); |
213f428f TP |
5889 | } |
5890 | ||
a75edc7c MW |
5891 | if (port_node) |
5892 | v->irq = of_irq_get_byname(port_node, irqname); | |
5893 | else | |
5894 | v->irq = fwnode_irq_get(port->fwnode, i); | |
213f428f TP |
5895 | if (v->irq <= 0) { |
5896 | ret = -EINVAL; | |
5897 | goto err; | |
5898 | } | |
5899 | ||
b48b89f9 | 5900 | netif_napi_add(port->dev, &v->napi, mvpp2_poll); |
213f428f TP |
5901 | } |
5902 | ||
5903 | return 0; | |
5904 | ||
5905 | err: | |
5906 | for (i = 0; i < port->nqvecs; i++) | |
5907 | irq_dispose_mapping(port->qvecs[i].irq); | |
5908 | return ret; | |
5909 | } | |
5910 | ||
5911 | static int mvpp2_queue_vectors_init(struct mvpp2_port *port, | |
5912 | struct device_node *port_node) | |
5913 | { | |
5914 | if (port->has_tx_irqs) | |
5915 | return mvpp2_multi_queue_vectors_init(port, port_node); | |
5916 | else | |
5917 | return mvpp2_simple_queue_vectors_init(port, port_node); | |
5918 | } | |
5919 | ||
591f4cfa TP |
5920 | static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) |
5921 | { | |
5922 | int i; | |
5923 | ||
5924 | for (i = 0; i < port->nqvecs; i++) | |
5925 | irq_dispose_mapping(port->qvecs[i].irq); | |
5926 | } | |
5927 | ||
5928 | /* Configure Rx queue group interrupt for this port */ | |
5929 | static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) | |
5930 | { | |
5931 | struct mvpp2 *priv = port->priv; | |
5932 | u32 val; | |
5933 | int i; | |
5934 | ||
5935 | if (priv->hw_version == MVPP21) { | |
5936 | mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), | |
5937 | port->nrxqs); | |
5938 | return; | |
5939 | } | |
5940 | ||
6af27a1d | 5941 | /* Handle the more complicated PPv2.2 and PPv2.3 case */ |
591f4cfa TP |
5942 | for (i = 0; i < port->nqvecs; i++) { |
5943 | struct mvpp2_queue_vector *qv = port->qvecs + i; | |
5944 | ||
5945 | if (!qv->nrxqs) | |
5946 | continue; | |
5947 | ||
5948 | val = qv->sw_thread_id; | |
5949 | val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; | |
5950 | mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); | |
5951 | ||
5952 | val = qv->first_rxq; | |
5953 | val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; | |
5954 | mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); | |
5955 | } | |
5956 | } | |
5957 | ||
3f518509 MW |
5958 | /* Initialize port HW */ |
5959 | static int mvpp2_port_init(struct mvpp2_port *port) | |
5960 | { | |
5961 | struct device *dev = port->dev->dev.parent; | |
5962 | struct mvpp2 *priv = port->priv; | |
5963 | struct mvpp2_txq_pcpu *txq_pcpu; | |
074c74df | 5964 | unsigned int thread; |
87508224 | 5965 | int queue, err, val; |
3f518509 | 5966 | |
09f83975 TP |
5967 | /* Checks for hardware constraints */ |
5968 | if (port->first_rxq + port->nrxqs > | |
59b9a31e | 5969 | MVPP2_MAX_PORTS * priv->max_port_rxqs) |
3f518509 MW |
5970 | return -EINVAL; |
5971 | ||
3f136849 | 5972 | if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) |
09f83975 TP |
5973 | return -EINVAL; |
5974 | ||
3f518509 MW |
5975 | /* Disable port */ |
5976 | mvpp2_egress_disable(port); | |
5977 | mvpp2_port_disable(port); | |
5978 | ||
87508224 SC |
5979 | if (mvpp2_is_xlg(port->phy_interface)) { |
5980 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); | |
5981 | val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; | |
5982 | val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; | |
5983 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); | |
5984 | } else { | |
5985 | val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
5986 | val &= ~MVPP2_GMAC_FORCE_LINK_PASS; | |
5987 | val |= MVPP2_GMAC_FORCE_LINK_DOWN; | |
5988 | writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
5989 | } | |
5990 | ||
213f428f TP |
5991 | port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; |
5992 | ||
09f83975 | 5993 | port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), |
3f518509 MW |
5994 | GFP_KERNEL); |
5995 | if (!port->txqs) | |
5996 | return -ENOMEM; | |
5997 | ||
5998 | /* Associate physical Tx queues to this port and initialize. | |
5999 | * The mapping is predefined. | |
6000 | */ | |
09f83975 | 6001 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
6002 | int queue_phy_id = mvpp2_txq_phys(port->id, queue); |
6003 | struct mvpp2_tx_queue *txq; | |
6004 | ||
6005 | txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); | |
177c8d1c CJ |
6006 | if (!txq) { |
6007 | err = -ENOMEM; | |
6008 | goto err_free_percpu; | |
6009 | } | |
3f518509 MW |
6010 | |
6011 | txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); | |
6012 | if (!txq->pcpu) { | |
6013 | err = -ENOMEM; | |
6014 | goto err_free_percpu; | |
6015 | } | |
6016 | ||
6017 | txq->id = queue_phy_id; | |
6018 | txq->log_id = queue; | |
6019 | txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; | |
e531f767 | 6020 | for (thread = 0; thread < priv->nthreads; thread++) { |
074c74df AT |
6021 | txq_pcpu = per_cpu_ptr(txq->pcpu, thread); |
6022 | txq_pcpu->thread = thread; | |
3f518509 MW |
6023 | } |
6024 | ||
6025 | port->txqs[queue] = txq; | |
6026 | } | |
6027 | ||
09f83975 | 6028 | port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), |
3f518509 MW |
6029 | GFP_KERNEL); |
6030 | if (!port->rxqs) { | |
6031 | err = -ENOMEM; | |
6032 | goto err_free_percpu; | |
6033 | } | |
6034 | ||
6035 | /* Allocate and initialize Rx queue for this port */ | |
09f83975 | 6036 | for (queue = 0; queue < port->nrxqs; queue++) { |
3f518509 MW |
6037 | struct mvpp2_rx_queue *rxq; |
6038 | ||
6039 | /* Map physical Rx queue to port's logical Rx queue */ | |
6040 | rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); | |
d82b0c21 JZ |
6041 | if (!rxq) { |
6042 | err = -ENOMEM; | |
3f518509 | 6043 | goto err_free_percpu; |
d82b0c21 | 6044 | } |
3f518509 MW |
6045 | /* Map this Rx queue to a physical queue */ |
6046 | rxq->id = port->first_rxq + queue; | |
6047 | rxq->port = port->id; | |
6048 | rxq->logic_rxq = queue; | |
6049 | ||
6050 | port->rxqs[queue] = rxq; | |
6051 | } | |
6052 | ||
591f4cfa | 6053 | mvpp2_rx_irqs_setup(port); |
3f518509 MW |
6054 | |
6055 | /* Create Rx descriptor rings */ | |
09f83975 | 6056 | for (queue = 0; queue < port->nrxqs; queue++) { |
3f518509 MW |
6057 | struct mvpp2_rx_queue *rxq = port->rxqs[queue]; |
6058 | ||
6059 | rxq->size = port->rx_ring_size; | |
6060 | rxq->pkts_coal = MVPP2_RX_COAL_PKTS; | |
6061 | rxq->time_coal = MVPP2_RX_COAL_USEC; | |
6062 | } | |
6063 | ||
6064 | mvpp2_ingress_disable(port); | |
6065 | ||
6066 | /* Port default configuration */ | |
6067 | mvpp2_defaults_set(port); | |
6068 | ||
6069 | /* Port's classifier configuration */ | |
6070 | mvpp2_cls_oversize_rxq_set(port); | |
6071 | mvpp2_cls_port_config(port); | |
6072 | ||
0a8a8000 | 6073 | if (mvpp22_rss_is_supported(port)) |
6310f77d | 6074 | mvpp22_port_rss_init(port); |
e6e21c02 | 6075 | |
3f518509 MW |
6076 | /* Provide an initial Rx packet size */ |
6077 | port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); | |
6078 | ||
6079 | /* Initialize pools for swf */ | |
6080 | err = mvpp2_swf_bm_pool_init(port); | |
6081 | if (err) | |
6082 | goto err_free_percpu; | |
6083 | ||
9bea6897 MC |
6084 | /* Clear all port stats */ |
6085 | mvpp2_read_stats(port); | |
6086 | memset(port->ethtool_stats, 0, | |
6087 | MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64)); | |
6410c139 | 6088 | |
3f518509 MW |
6089 | return 0; |
6090 | ||
6091 | err_free_percpu: | |
09f83975 | 6092 | for (queue = 0; queue < port->ntxqs; queue++) { |
3f518509 MW |
6093 | if (!port->txqs[queue]) |
6094 | continue; | |
6095 | free_percpu(port->txqs[queue]->pcpu); | |
6096 | } | |
6097 | return err; | |
6098 | } | |
6099 | ||
a9aac385 AT |
6100 | static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, |
6101 | unsigned long *flags) | |
6102 | { | |
6103 | char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", | |
6104 | "tx-cpu3" }; | |
6105 | int i; | |
6106 | ||
6107 | for (i = 0; i < 5; i++) | |
6108 | if (of_property_match_string(port_node, "interrupt-names", | |
6109 | irqs[i]) < 0) | |
6110 | return false; | |
6111 | ||
6112 | *flags |= MVPP2_F_DT_COMPAT; | |
6113 | return true; | |
6114 | } | |
6115 | ||
6116 | /* Checks if the port dt description has the required Tx interrupts: | |
6117 | * - PPv2.1: there are no such interrupts. | |
6af27a1d | 6118 | * - PPv2.2 and PPv2.3: |
a9aac385 AT |
6119 | * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] |
6120 | * - The new ones have: "hifX" with X in [0..8] | |
6121 | * | |
6122 | * All those variants are supported to keep the backward compatibility. | |
213f428f | 6123 | */ |
a9aac385 AT |
6124 | static bool mvpp2_port_has_irqs(struct mvpp2 *priv, |
6125 | struct device_node *port_node, | |
6126 | unsigned long *flags) | |
213f428f | 6127 | { |
a9aac385 AT |
6128 | char name[5]; |
6129 | int i; | |
213f428f | 6130 | |
fd4a1056 AT |
6131 | /* ACPI */ |
6132 | if (!port_node) | |
6133 | return true; | |
6134 | ||
213f428f TP |
6135 | if (priv->hw_version == MVPP21) |
6136 | return false; | |
6137 | ||
a9aac385 AT |
6138 | if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) |
6139 | return true; | |
6140 | ||
6141 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { | |
6142 | snprintf(name, 5, "hif%d", i); | |
6143 | if (of_property_match_string(port_node, "interrupt-names", | |
6144 | name) < 0) | |
213f428f TP |
6145 | return false; |
6146 | } | |
6147 | ||
6148 | return true; | |
6149 | } | |
6150 | ||
cc4342f6 MR |
6151 | static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, |
6152 | struct fwnode_handle *fwnode, | |
6153 | char **mac_from) | |
3ba8c81e AT |
6154 | { |
6155 | struct mvpp2_port *port = netdev_priv(dev); | |
6156 | char hw_mac_addr[ETH_ALEN] = {0}; | |
24812221 | 6157 | char fw_mac_addr[ETH_ALEN]; |
cc4342f6 | 6158 | int ret; |
3ba8c81e | 6159 | |
0a14501e | 6160 | if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) { |
24812221 | 6161 | *mac_from = "firmware node"; |
f3956ebb | 6162 | eth_hw_addr_set(dev, fw_mac_addr); |
cc4342f6 | 6163 | return 0; |
688cbaf2 | 6164 | } |
d2a6e48e | 6165 | |
688cbaf2 AT |
6166 | if (priv->hw_version == MVPP21) { |
6167 | mvpp21_get_mac_address(port, hw_mac_addr); | |
6168 | if (is_valid_ether_addr(hw_mac_addr)) { | |
6169 | *mac_from = "hardware"; | |
f3956ebb | 6170 | eth_hw_addr_set(dev, hw_mac_addr); |
cc4342f6 | 6171 | return 0; |
688cbaf2 | 6172 | } |
3ba8c81e | 6173 | } |
688cbaf2 | 6174 | |
7a74c126 | 6175 | /* Only valid on OF enabled platforms */ |
cc4342f6 MR |
6176 | ret = of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr); |
6177 | if (ret == -EPROBE_DEFER) | |
6178 | return ret; | |
6179 | if (!ret) { | |
7a74c126 MR |
6180 | *mac_from = "nvmem cell"; |
6181 | eth_hw_addr_set(dev, fw_mac_addr); | |
cc4342f6 | 6182 | return 0; |
7a74c126 MR |
6183 | } |
6184 | ||
688cbaf2 AT |
6185 | *mac_from = "random"; |
6186 | eth_hw_addr_random(dev); | |
cc4342f6 MR |
6187 | |
6188 | return 0; | |
3ba8c81e AT |
6189 | } |
6190 | ||
6c2b49eb RK |
6191 | static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) |
6192 | { | |
6193 | return container_of(config, struct mvpp2_port, phylink_config); | |
6194 | } | |
6195 | ||
cff05632 | 6196 | static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs) |
94bfe438 | 6197 | { |
cff05632 RKO |
6198 | return container_of(pcs, struct mvpp2_port, pcs_xlg); |
6199 | } | |
6200 | ||
6201 | static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs) | |
6202 | { | |
6203 | return container_of(pcs, struct mvpp2_port, pcs_gmac); | |
94bfe438 RK |
6204 | } |
6205 | ||
c596d2cd | 6206 | static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs, |
c6739623 | 6207 | unsigned int neg_mode, |
c596d2cd | 6208 | struct phylink_link_state *state) |
94bfe438 | 6209 | { |
cff05632 | 6210 | struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs); |
94bfe438 RK |
6211 | u32 val; |
6212 | ||
4043ec70 MB |
6213 | if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER) |
6214 | state->speed = SPEED_5000; | |
6215 | else | |
6216 | state->speed = SPEED_10000; | |
94bfe438 RK |
6217 | state->duplex = 1; |
6218 | state->an_complete = 1; | |
6219 | ||
6220 | val = readl(port->base + MVPP22_XLG_STATUS); | |
6221 | state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); | |
6222 | ||
6223 | state->pause = 0; | |
6224 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); | |
6225 | if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) | |
6226 | state->pause |= MLO_PAUSE_TX; | |
6227 | if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) | |
6228 | state->pause |= MLO_PAUSE_RX; | |
6229 | } | |
6230 | ||
d5b16264 | 6231 | static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, |
c596d2cd RK |
6232 | phy_interface_t interface, |
6233 | const unsigned long *advertising, | |
6234 | bool permit_pause_to_mac) | |
6235 | { | |
6236 | return 0; | |
6237 | } | |
6238 | ||
6239 | static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = { | |
6240 | .pcs_get_state = mvpp2_xlg_pcs_get_state, | |
6241 | .pcs_config = mvpp2_xlg_pcs_config, | |
6242 | }; | |
6243 | ||
d4169f0c RKO |
6244 | static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs, |
6245 | phy_interface_t interface) | |
85e3e0eb | 6246 | { |
d4169f0c | 6247 | /* When operating in an 802.3z mode, we must have AN enabled: |
85e3e0eb RKO |
6248 | * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... |
6249 | * When <PortType> = 1 (1000BASE-X) this field must be set to 1. | |
d4169f0c | 6250 | * Therefore, inband is "required". |
85e3e0eb | 6251 | */ |
d4169f0c RKO |
6252 | if (phy_interface_mode_is_8023z(interface)) |
6253 | return LINK_INBAND_ENABLE; | |
85e3e0eb | 6254 | |
d4169f0c RKO |
6255 | /* SGMII and RGMII can be configured to use inband signalling of the |
6256 | * AN result. Indicate these as "possible". | |
6257 | */ | |
6258 | if (interface == PHY_INTERFACE_MODE_SGMII || | |
6259 | phy_interface_mode_is_rgmii(interface)) | |
6260 | return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE; | |
6261 | ||
6262 | /* For any other modes, indicate that inband is not supported. */ | |
6263 | return LINK_INBAND_DISABLE; | |
85e3e0eb RKO |
6264 | } |
6265 | ||
c596d2cd | 6266 | static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs, |
c6739623 | 6267 | unsigned int neg_mode, |
94bfe438 RK |
6268 | struct phylink_link_state *state) |
6269 | { | |
cff05632 | 6270 | struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); |
94bfe438 RK |
6271 | u32 val; |
6272 | ||
6273 | val = readl(port->base + MVPP2_GMAC_STATUS0); | |
6274 | ||
6275 | state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); | |
6276 | state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); | |
6277 | state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); | |
6278 | ||
6279 | switch (port->phy_interface) { | |
6280 | case PHY_INTERFACE_MODE_1000BASEX: | |
6281 | state->speed = SPEED_1000; | |
6282 | break; | |
6283 | case PHY_INTERFACE_MODE_2500BASEX: | |
6284 | state->speed = SPEED_2500; | |
6285 | break; | |
6286 | default: | |
6287 | if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) | |
6288 | state->speed = SPEED_1000; | |
6289 | else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) | |
6290 | state->speed = SPEED_100; | |
6291 | else | |
6292 | state->speed = SPEED_10; | |
6293 | } | |
6294 | ||
6295 | state->pause = 0; | |
6296 | if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) | |
6297 | state->pause |= MLO_PAUSE_RX; | |
6298 | if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) | |
6299 | state->pause |= MLO_PAUSE_TX; | |
6300 | } | |
6301 | ||
d5b16264 | 6302 | static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, |
94bfe438 RK |
6303 | phy_interface_t interface, |
6304 | const unsigned long *advertising, | |
6305 | bool permit_pause_to_mac) | |
6306 | { | |
cff05632 | 6307 | struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); |
94bfe438 RK |
6308 | u32 mask, val, an, old_an, changed; |
6309 | ||
6310 | mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | | |
6311 | MVPP2_GMAC_IN_BAND_AUTONEG | | |
6312 | MVPP2_GMAC_AN_SPEED_EN | | |
6313 | MVPP2_GMAC_FLOW_CTRL_AUTONEG | | |
6314 | MVPP2_GMAC_AN_DUPLEX_EN; | |
6315 | ||
d5b16264 | 6316 | if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { |
94bfe438 RK |
6317 | mask |= MVPP2_GMAC_CONFIG_MII_SPEED | |
6318 | MVPP2_GMAC_CONFIG_GMII_SPEED | | |
6319 | MVPP2_GMAC_CONFIG_FULL_DUPLEX; | |
6320 | val = MVPP2_GMAC_IN_BAND_AUTONEG; | |
6321 | ||
6322 | if (interface == PHY_INTERFACE_MODE_SGMII) { | |
6323 | /* SGMII mode receives the speed and duplex from PHY */ | |
6324 | val |= MVPP2_GMAC_AN_SPEED_EN | | |
6325 | MVPP2_GMAC_AN_DUPLEX_EN; | |
6326 | } else { | |
6327 | /* 802.3z mode has fixed speed and duplex */ | |
6328 | val |= MVPP2_GMAC_CONFIG_GMII_SPEED | | |
6329 | MVPP2_GMAC_CONFIG_FULL_DUPLEX; | |
6330 | ||
6331 | /* The FLOW_CTRL_AUTONEG bit selects either the hardware | |
6332 | * automatically or the bits in MVPP22_GMAC_CTRL_4_REG | |
6333 | * manually controls the GMAC pause modes. | |
6334 | */ | |
6335 | if (permit_pause_to_mac) | |
6336 | val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; | |
6337 | ||
6338 | /* Configure advertisement bits */ | |
6339 | mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN; | |
6340 | if (phylink_test(advertising, Pause)) | |
6341 | val |= MVPP2_GMAC_FC_ADV_EN; | |
6342 | if (phylink_test(advertising, Asym_Pause)) | |
6343 | val |= MVPP2_GMAC_FC_ADV_ASM_EN; | |
6344 | } | |
6345 | } else { | |
6346 | val = 0; | |
6347 | } | |
6348 | ||
6349 | old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
6350 | an = (an & ~mask) | val; | |
6351 | changed = an ^ old_an; | |
6352 | if (changed) | |
6353 | writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
6354 | ||
6355 | /* We are only interested in the advertisement bits changing */ | |
6356 | return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN); | |
6357 | } | |
6358 | ||
c596d2cd | 6359 | static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs) |
94bfe438 | 6360 | { |
cff05632 | 6361 | struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs); |
94bfe438 RK |
6362 | u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
6363 | ||
6364 | writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, | |
6365 | port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
6366 | writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, | |
6367 | port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
6368 | } | |
6369 | ||
c596d2cd | 6370 | static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = { |
d4169f0c | 6371 | .pcs_inband_caps = mvpp2_gmac_pcs_inband_caps, |
c596d2cd RK |
6372 | .pcs_get_state = mvpp2_gmac_pcs_get_state, |
6373 | .pcs_config = mvpp2_gmac_pcs_config, | |
6374 | .pcs_an_restart = mvpp2_gmac_pcs_an_restart, | |
94bfe438 RK |
6375 | }; |
6376 | ||
4bb04326 AT |
6377 | static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, |
6378 | const struct phylink_link_state *state) | |
6379 | { | |
bd45f644 | 6380 | u32 val; |
649e51d5 | 6381 | |
bd45f644 | 6382 | mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, |
63d78cc9 RK |
6383 | MVPP22_XLG_CTRL0_MAC_RESET_DIS, |
6384 | MVPP22_XLG_CTRL0_MAC_RESET_DIS); | |
bd45f644 RK |
6385 | mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG, |
6386 | MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | | |
6387 | MVPP22_XLG_CTRL4_EN_IDLE_CHECK | | |
6388 | MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC, | |
6389 | MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC); | |
6390 | ||
6391 | /* Wait for reset to deassert */ | |
6392 | do { | |
6393 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); | |
6394 | } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS)); | |
4bb04326 AT |
6395 | } |
6396 | ||
6397 | static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, | |
6398 | const struct phylink_link_state *state) | |
6399 | { | |
d14e078f RK |
6400 | u32 old_ctrl0, ctrl0; |
6401 | u32 old_ctrl2, ctrl2; | |
6402 | u32 old_ctrl4, ctrl4; | |
4bb04326 | 6403 | |
d14e078f RK |
6404 | old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); |
6405 | old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); | |
6406 | old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); | |
4bb04326 | 6407 | |
4bb04326 | 6408 | ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; |
262412d5 | 6409 | ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK); |
4bb04326 | 6410 | |
388ca27f | 6411 | /* Configure port type */ |
4a4cec72 | 6412 | if (phy_interface_mode_is_8023z(state->interface)) { |
388ca27f RK |
6413 | ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; |
6414 | ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; | |
6415 | ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | | |
6416 | MVPP22_CTRL4_DP_CLK_SEL | | |
6417 | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; | |
6418 | } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { | |
6419 | ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; | |
6420 | ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; | |
6421 | ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | | |
6422 | MVPP22_CTRL4_DP_CLK_SEL | | |
6423 | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; | |
6424 | } else if (phy_interface_mode_is_rgmii(state->interface)) { | |
6425 | ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; | |
6426 | ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | | |
6427 | MVPP22_CTRL4_SYNC_BYPASS_DIS | | |
6428 | MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; | |
d97c9f4a | 6429 | } |
4bb04326 | 6430 | |
388ca27f RK |
6431 | /* Configure negotiation style */ |
6432 | if (!phylink_autoneg_inband(mode)) { | |
24cb72df RK |
6433 | /* Phy or fixed speed - no in-band AN, nothing to do, leave the |
6434 | * configured speed, duplex and flow control as-is. | |
6435 | */ | |
388ca27f RK |
6436 | } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { |
6437 | /* SGMII in-band mode receives the speed and duplex from | |
6438 | * the PHY. Flow control information is not received. */ | |
388ca27f RK |
6439 | } else if (phy_interface_mode_is_8023z(state->interface)) { |
6440 | /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can | |
6441 | * they negotiate duplex: they are always operating with a fixed | |
6442 | * speed of 1000/2500Mbps in full duplex, so force 1000/2500 | |
6443 | * speed and full duplex here. | |
6444 | */ | |
6445 | ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; | |
4bb04326 AT |
6446 | } |
6447 | ||
d14e078f RK |
6448 | if (old_ctrl0 != ctrl0) |
6449 | writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); | |
6450 | if (old_ctrl2 != ctrl2) | |
6451 | writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); | |
6452 | if (old_ctrl4 != ctrl4) | |
6453 | writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); | |
4bb04326 AT |
6454 | } |
6455 | ||
cff05632 RKO |
6456 | static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config, |
6457 | phy_interface_t interface) | |
6458 | { | |
6459 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); | |
6460 | ||
6461 | /* Select the appropriate PCS operations depending on the | |
6462 | * configured interface mode. We will only switch to a mode | |
6463 | * that the validate() checks have already passed. | |
6464 | */ | |
6465 | if (mvpp2_is_xlg(interface)) | |
6466 | return &port->pcs_xlg; | |
6467 | else | |
6468 | return &port->pcs_gmac; | |
6469 | } | |
6470 | ||
6471 | static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode, | |
6472 | phy_interface_t interface) | |
4bb04326 | 6473 | { |
6c2b49eb | 6474 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
4bb04326 AT |
6475 | |
6476 | /* Check for invalid configuration */ | |
bfe301eb | 6477 | if (mvpp2_is_xlg(interface) && port->gop_id != 0) { |
6c2b49eb | 6478 | netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name); |
bfe301eb | 6479 | return -EINVAL; |
4bb04326 AT |
6480 | } |
6481 | ||
fefeae73 RK |
6482 | if (port->phy_interface != interface || |
6483 | phylink_autoneg_inband(mode)) { | |
6484 | /* Force the link down when changing the interface or if in | |
6485 | * in-band mode to ensure we do not change the configuration | |
6486 | * while the hardware is indicating link is up. We force both | |
6487 | * XLG and GMAC down to ensure that they're both in a known | |
6488 | * state. | |
6489 | */ | |
6490 | mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, | |
6491 | MVPP2_GMAC_FORCE_LINK_PASS | | |
6492 | MVPP2_GMAC_FORCE_LINK_DOWN, | |
6493 | MVPP2_GMAC_FORCE_LINK_DOWN); | |
6494 | ||
6495 | if (mvpp2_port_supports_xlg(port)) | |
6496 | mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, | |
6497 | MVPP22_XLG_CTRL0_FORCE_LINK_PASS | | |
6498 | MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, | |
6499 | MVPP22_XLG_CTRL0_FORCE_LINK_DOWN); | |
6500 | } | |
6501 | ||
4bb04326 AT |
6502 | /* Make sure the port is disabled when reconfiguring the mode */ |
6503 | mvpp2_port_disable(port); | |
1970ee96 | 6504 | |
82b1c8fa RK |
6505 | if (port->phy_interface != interface) { |
6506 | /* Place GMAC into reset */ | |
6507 | mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, | |
6508 | MVPP2_GMAC_PORT_RESET_MASK, | |
6509 | MVPP2_GMAC_PORT_RESET_MASK); | |
6510 | ||
f704177e | 6511 | if (port->priv->hw_version >= MVPP22) { |
82b1c8fa | 6512 | mvpp22_gop_mask_irq(port); |
4bb04326 | 6513 | |
82b1c8fa | 6514 | phy_power_off(port->comphy); |
bb7bbb6e MB |
6515 | |
6516 | /* Reconfigure the serdes lanes */ | |
6517 | mvpp22_mode_reconfigure(port, interface); | |
82b1c8fa | 6518 | } |
4bb04326 AT |
6519 | } |
6520 | ||
bfe301eb RK |
6521 | return 0; |
6522 | } | |
6523 | ||
6524 | static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, | |
6525 | const struct phylink_link_state *state) | |
6526 | { | |
6527 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); | |
6528 | ||
4bb04326 | 6529 | /* mac (re)configuration */ |
1d9b041e | 6530 | if (mvpp2_is_xlg(state->interface)) |
4bb04326 AT |
6531 | mvpp2_xlg_config(port, mode, state); |
6532 | else if (phy_interface_mode_is_rgmii(state->interface) || | |
4a4cec72 RK |
6533 | phy_interface_mode_is_8023z(state->interface) || |
6534 | state->interface == PHY_INTERFACE_MODE_SGMII) | |
4bb04326 AT |
6535 | mvpp2_gmac_config(port, mode, state); |
6536 | ||
6537 | if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) | |
6538 | mvpp2_port_loopback_set(port, state); | |
bfe301eb RK |
6539 | } |
6540 | ||
6541 | static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode, | |
6542 | phy_interface_t interface) | |
6543 | { | |
6544 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); | |
6545 | ||
f704177e | 6546 | if (port->priv->hw_version >= MVPP22 && |
bfe301eb RK |
6547 | port->phy_interface != interface) { |
6548 | port->phy_interface = interface; | |
4bb04326 | 6549 | |
bfe301eb | 6550 | /* Unmask interrupts */ |
bf2fa125 | 6551 | mvpp22_gop_unmask_irq(port); |
bfe301eb | 6552 | } |
bf2fa125 | 6553 | |
82b1c8fa RK |
6554 | if (!mvpp2_is_xlg(interface)) { |
6555 | /* Release GMAC reset and wait */ | |
6556 | mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG, | |
6557 | MVPP2_GMAC_PORT_RESET_MASK, 0); | |
6558 | ||
6559 | while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & | |
6560 | MVPP2_GMAC_PORT_RESET_MASK) | |
6561 | continue; | |
6562 | } | |
6563 | ||
41948ccb | 6564 | mvpp2_port_enable(port); |
bfe301eb | 6565 | |
fefeae73 RK |
6566 | /* Allow the link to come up if in in-band mode, otherwise the |
6567 | * link is forced via mac_link_down()/mac_link_up() | |
6568 | */ | |
6569 | if (phylink_autoneg_inband(mode)) { | |
6570 | if (mvpp2_is_xlg(interface)) | |
6571 | mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, | |
6572 | MVPP22_XLG_CTRL0_FORCE_LINK_PASS | | |
6573 | MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0); | |
6574 | else | |
6575 | mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, | |
6576 | MVPP2_GMAC_FORCE_LINK_PASS | | |
6577 | MVPP2_GMAC_FORCE_LINK_DOWN, 0); | |
6578 | } | |
6579 | ||
bfe301eb | 6580 | return 0; |
4bb04326 AT |
6581 | } |
6582 | ||
91a208f2 RK |
6583 | static void mvpp2_mac_link_up(struct phylink_config *config, |
6584 | struct phy_device *phy, | |
6585 | unsigned int mode, phy_interface_t interface, | |
6586 | int speed, int duplex, | |
6587 | bool tx_pause, bool rx_pause) | |
4bb04326 | 6588 | { |
6c2b49eb | 6589 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
4bb04326 | 6590 | u32 val; |
76055831 | 6591 | int i; |
4bb04326 | 6592 | |
24cb72df RK |
6593 | if (mvpp2_is_xlg(interface)) { |
6594 | if (!phylink_autoneg_inband(mode)) { | |
63d78cc9 RK |
6595 | val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS; |
6596 | if (tx_pause) | |
6597 | val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; | |
6598 | if (rx_pause) | |
6599 | val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; | |
6600 | ||
bd45f644 RK |
6601 | mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, |
6602 | MVPP22_XLG_CTRL0_FORCE_LINK_DOWN | | |
63d78cc9 RK |
6603 | MVPP22_XLG_CTRL0_FORCE_LINK_PASS | |
6604 | MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN | | |
6605 | MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val); | |
24cb72df RK |
6606 | } |
6607 | } else { | |
6608 | if (!phylink_autoneg_inband(mode)) { | |
bd45f644 | 6609 | val = MVPP2_GMAC_FORCE_LINK_PASS; |
24cb72df RK |
6610 | |
6611 | if (speed == SPEED_1000 || speed == SPEED_2500) | |
6612 | val |= MVPP2_GMAC_CONFIG_GMII_SPEED; | |
6613 | else if (speed == SPEED_100) | |
6614 | val |= MVPP2_GMAC_CONFIG_MII_SPEED; | |
6615 | ||
6616 | if (duplex == DUPLEX_FULL) | |
6617 | val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; | |
6618 | ||
bd45f644 RK |
6619 | mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, |
6620 | MVPP2_GMAC_FORCE_LINK_DOWN | | |
6621 | MVPP2_GMAC_FORCE_LINK_PASS | | |
6622 | MVPP2_GMAC_CONFIG_MII_SPEED | | |
6623 | MVPP2_GMAC_CONFIG_GMII_SPEED | | |
6624 | MVPP2_GMAC_CONFIG_FULL_DUPLEX, val); | |
1970ee96 | 6625 | } |
24cb72df RK |
6626 | |
6627 | /* We can always update the flow control enable bits; | |
6628 | * these will only be effective if flow control AN | |
6629 | * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled. | |
6630 | */ | |
bd45f644 | 6631 | val = 0; |
24cb72df RK |
6632 | if (tx_pause) |
6633 | val |= MVPP22_CTRL4_TX_FC_EN; | |
6634 | if (rx_pause) | |
6635 | val |= MVPP22_CTRL4_RX_FC_EN; | |
bd45f644 RK |
6636 | |
6637 | mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG, | |
6638 | MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN, | |
6639 | val); | |
4bb04326 AT |
6640 | } |
6641 | ||
76055831 SC |
6642 | if (port->priv->global_tx_fc) { |
6643 | port->tx_fc = tx_pause; | |
6644 | if (tx_pause) | |
6645 | mvpp2_rxq_enable_fc(port); | |
6646 | else | |
6647 | mvpp2_rxq_disable_fc(port); | |
6648 | if (port->priv->percpu_pools) { | |
6649 | for (i = 0; i < port->nrxqs; i++) | |
6650 | mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause); | |
6651 | } else { | |
6652 | mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause); | |
6653 | mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause); | |
6654 | } | |
aca0e235 SC |
6655 | if (port->priv->hw_version == MVPP23) |
6656 | mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause); | |
76055831 SC |
6657 | } |
6658 | ||
4bb04326 AT |
6659 | mvpp2_port_enable(port); |
6660 | ||
6661 | mvpp2_egress_enable(port); | |
6662 | mvpp2_ingress_enable(port); | |
6c2b49eb | 6663 | netif_tx_wake_all_queues(port->dev); |
4bb04326 AT |
6664 | } |
6665 | ||
44cc27e4 IC |
6666 | static void mvpp2_mac_link_down(struct phylink_config *config, |
6667 | unsigned int mode, phy_interface_t interface) | |
4bb04326 | 6668 | { |
6c2b49eb | 6669 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); |
4bb04326 AT |
6670 | u32 val; |
6671 | ||
1970ee96 AT |
6672 | if (!phylink_autoneg_inband(mode)) { |
6673 | if (mvpp2_is_xlg(interface)) { | |
6674 | val = readl(port->base + MVPP22_XLG_CTRL0_REG); | |
6675 | val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; | |
6676 | val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; | |
6677 | writel(val, port->base + MVPP22_XLG_CTRL0_REG); | |
6678 | } else { | |
6679 | val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
6680 | val &= ~MVPP2_GMAC_FORCE_LINK_PASS; | |
6681 | val |= MVPP2_GMAC_FORCE_LINK_DOWN; | |
6682 | writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); | |
6683 | } | |
4bb04326 AT |
6684 | } |
6685 | ||
6c2b49eb | 6686 | netif_tx_stop_all_queues(port->dev); |
4bb04326 AT |
6687 | mvpp2_egress_disable(port); |
6688 | mvpp2_ingress_disable(port); | |
6689 | ||
4bb04326 AT |
6690 | mvpp2_port_disable(port); |
6691 | } | |
6692 | ||
b53b1478 RKO |
6693 | static void mvpp2_mac_disable_tx_lpi(struct phylink_config *config) |
6694 | { | |
6695 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); | |
6696 | ||
6697 | mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL1, | |
6698 | MVPP2_GMAC_LPI_CTRL1_REQ_EN, 0); | |
6699 | } | |
6700 | ||
6701 | static int mvpp2_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, | |
6702 | bool tx_clk_stop) | |
6703 | { | |
6704 | struct mvpp2_port *port = mvpp2_phylink_to_port(config); | |
6705 | u32 ts, tw, lpi1, status; | |
6706 | ||
6707 | status = readl(port->base + MVPP2_GMAC_STATUS0); | |
6708 | if (status & MVPP2_GMAC_STATUS0_GMII_SPEED) { | |
6709 | /* At 1G speeds, the timer resolution are 1us, and | |
6710 | * 802.3 says tw is 16.5us. Round up to 17us. | |
6711 | */ | |
6712 | tw = 17; | |
6713 | ts = timer; | |
6714 | } else { | |
6715 | /* At 100M speeds, the timer resolutions are 10us, and | |
6716 | * 802.3 says tw is 30us. | |
6717 | */ | |
6718 | tw = 3; | |
6719 | ts = DIV_ROUND_UP(timer, 10); | |
6720 | } | |
6721 | ||
6722 | if (ts > 255) | |
6723 | ts = 255; | |
6724 | ||
6725 | /* Configure ts */ | |
6726 | mvpp2_modify(port->base + MVPP2_GMAC_LPI_CTRL0, | |
6727 | MVPP2_GMAC_LPI_CTRL0_TS_MASK, | |
6728 | FIELD_PREP(MVPP2_GMAC_LPI_CTRL0_TS_MASK, ts)); | |
6729 | ||
6730 | lpi1 = readl(port->base + MVPP2_GMAC_LPI_CTRL1); | |
6731 | ||
6732 | /* Configure tw */ | |
6733 | lpi1 = u32_replace_bits(lpi1, tw, MVPP2_GMAC_LPI_CTRL1_TW_MASK); | |
6734 | ||
6735 | /* Enable LPI generation */ | |
6736 | writel(lpi1 | MVPP2_GMAC_LPI_CTRL1_REQ_EN, | |
6737 | port->base + MVPP2_GMAC_LPI_CTRL1); | |
6738 | ||
6739 | return 0; | |
6740 | } | |
6741 | ||
4bb04326 | 6742 | static const struct phylink_mac_ops mvpp2_phylink_ops = { |
cff05632 | 6743 | .mac_select_pcs = mvpp2_select_pcs, |
bfe301eb | 6744 | .mac_prepare = mvpp2_mac_prepare, |
4bb04326 | 6745 | .mac_config = mvpp2_mac_config, |
bfe301eb | 6746 | .mac_finish = mvpp2_mac_finish, |
4bb04326 AT |
6747 | .mac_link_up = mvpp2_mac_link_up, |
6748 | .mac_link_down = mvpp2_mac_link_down, | |
b53b1478 RKO |
6749 | .mac_enable_tx_lpi = mvpp2_mac_enable_tx_lpi, |
6750 | .mac_disable_tx_lpi = mvpp2_mac_disable_tx_lpi, | |
4bb04326 AT |
6751 | }; |
6752 | ||
87745c74 RK |
6753 | /* Work-around for ACPI */ |
6754 | static void mvpp2_acpi_start(struct mvpp2_port *port) | |
6755 | { | |
6756 | /* Phylink isn't used as of now for ACPI, so the MAC has to be | |
6757 | * configured manually when the interface is started. This will | |
6758 | * be removed as soon as the phylink ACPI support lands in. | |
6759 | */ | |
6760 | struct phylink_link_state state = { | |
6761 | .interface = port->phy_interface, | |
6762 | }; | |
cff05632 RKO |
6763 | struct phylink_pcs *pcs; |
6764 | ||
6765 | pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface); | |
6766 | ||
6767 | mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND, | |
6768 | port->phy_interface); | |
87745c74 | 6769 | mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state); |
d5b16264 RKO |
6770 | pcs->ops->pcs_config(pcs, PHYLINK_PCS_NEG_INBAND_ENABLED, |
6771 | port->phy_interface, state.advertising, | |
6772 | false); | |
bfe301eb RK |
6773 | mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND, |
6774 | port->phy_interface); | |
87745c74 RK |
6775 | mvpp2_mac_link_up(&port->phylink_config, NULL, |
6776 | MLO_AN_INBAND, port->phy_interface, | |
6777 | SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false); | |
6778 | } | |
6779 | ||
dfce1bab MW |
6780 | /* In order to ensure backward compatibility for ACPI, check if the port |
6781 | * firmware node comprises the necessary description allowing to use phylink. | |
6782 | */ | |
6783 | static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode) | |
6784 | { | |
6785 | if (!is_acpi_node(port_fwnode)) | |
6786 | return false; | |
6787 | ||
6788 | return (!fwnode_property_present(port_fwnode, "phy-handle") && | |
6789 | !fwnode_property_present(port_fwnode, "managed") && | |
6790 | !fwnode_get_named_child_node(port_fwnode, "fixed-link")); | |
6791 | } | |
6792 | ||
3f518509 MW |
6793 | /* Ports initialization */ |
6794 | static int mvpp2_port_probe(struct platform_device *pdev, | |
24812221 | 6795 | struct fwnode_handle *port_fwnode, |
bf147153 | 6796 | struct mvpp2 *priv) |
3f518509 | 6797 | { |
a75edc7c | 6798 | struct phy *comphy = NULL; |
3f518509 | 6799 | struct mvpp2_port *port; |
edc660fa | 6800 | struct mvpp2_port_pcpu *port_pcpu; |
24812221 | 6801 | struct device_node *port_node = to_of_node(port_fwnode); |
c9dbb6cf | 6802 | netdev_features_t features; |
3f518509 | 6803 | struct net_device *dev; |
4bb04326 | 6804 | struct phylink *phylink; |
3ba8c81e | 6805 | char *mac_from = ""; |
074c74df | 6806 | unsigned int ntxqs, nrxqs, thread; |
a9aac385 | 6807 | unsigned long flags = 0; |
213f428f | 6808 | bool has_tx_irqs; |
3f518509 | 6809 | u32 id; |
3f518509 | 6810 | int phy_mode; |
850623b3 | 6811 | int err, i; |
3f518509 | 6812 | |
fd4a1056 AT |
6813 | has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); |
6814 | if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { | |
6815 | dev_err(&pdev->dev, | |
6816 | "not enough IRQs to support multi queue mode\n"); | |
6817 | return -EINVAL; | |
a75edc7c | 6818 | } |
213f428f | 6819 | |
09f83975 | 6820 | ntxqs = MVPP2_MAX_TXQ; |
7d04b0b1 | 6821 | nrxqs = mvpp2_get_nrxqs(priv); |
09f83975 TP |
6822 | |
6823 | dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); | |
3f518509 MW |
6824 | if (!dev) |
6825 | return -ENOMEM; | |
6826 | ||
24812221 | 6827 | phy_mode = fwnode_get_phy_mode(port_fwnode); |
3f518509 MW |
6828 | if (phy_mode < 0) { |
6829 | dev_err(&pdev->dev, "incorrect phy mode\n"); | |
6830 | err = phy_mode; | |
6831 | goto err_free_netdev; | |
6832 | } | |
6833 | ||
e0f909bc RK |
6834 | /* |
6835 | * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT. | |
6836 | * Existing usage of 10GBASE-KR is not correct; no backplane | |
6837 | * negotiation is done, and this driver does not actually support | |
6838 | * 10GBASE-KR. | |
6839 | */ | |
6840 | if (phy_mode == PHY_INTERFACE_MODE_10GKR) | |
6841 | phy_mode = PHY_INTERFACE_MODE_10GBASER; | |
6842 | ||
a75edc7c MW |
6843 | if (port_node) { |
6844 | comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); | |
6845 | if (IS_ERR(comphy)) { | |
6846 | if (PTR_ERR(comphy) == -EPROBE_DEFER) { | |
6847 | err = -EPROBE_DEFER; | |
6848 | goto err_free_netdev; | |
6849 | } | |
6850 | comphy = NULL; | |
542897d9 | 6851 | } |
542897d9 AT |
6852 | } |
6853 | ||
24812221 | 6854 | if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { |
3f518509 MW |
6855 | err = -EINVAL; |
6856 | dev_err(&pdev->dev, "missing port-id value\n"); | |
6857 | goto err_free_netdev; | |
6858 | } | |
6859 | ||
7cf87e4a | 6860 | dev->tx_queue_len = MVPP2_MAX_TXD_MAX; |
3f518509 MW |
6861 | dev->watchdog_timeo = 5 * HZ; |
6862 | dev->netdev_ops = &mvpp2_netdev_ops; | |
6863 | dev->ethtool_ops = &mvpp2_eth_tool_ops; | |
6864 | ||
6865 | port = netdev_priv(dev); | |
591f4cfa | 6866 | port->dev = dev; |
a75edc7c | 6867 | port->fwnode = port_fwnode; |
09f83975 TP |
6868 | port->ntxqs = ntxqs; |
6869 | port->nrxqs = nrxqs; | |
213f428f TP |
6870 | port->priv = priv; |
6871 | port->has_tx_irqs = has_tx_irqs; | |
a9aac385 | 6872 | port->flags = flags; |
3f518509 | 6873 | |
591f4cfa TP |
6874 | err = mvpp2_queue_vectors_init(port, port_node); |
6875 | if (err) | |
3f518509 | 6876 | goto err_free_netdev; |
3f518509 | 6877 | |
a75edc7c | 6878 | if (port_node) |
89141972 | 6879 | port->port_irq = of_irq_get_byname(port_node, "link"); |
a75edc7c | 6880 | else |
89141972 RK |
6881 | port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); |
6882 | if (port->port_irq == -EPROBE_DEFER) { | |
fd3651b2 AT |
6883 | err = -EPROBE_DEFER; |
6884 | goto err_deinit_qvecs; | |
6885 | } | |
89141972 | 6886 | if (port->port_irq <= 0) |
fd3651b2 | 6887 | /* the link irq is optional */ |
89141972 | 6888 | port->port_irq = 0; |
fd3651b2 | 6889 | |
24812221 | 6890 | if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) |
3f518509 MW |
6891 | port->flags |= MVPP2_F_LOOPBACK; |
6892 | ||
3f518509 | 6893 | port->id = id; |
59b9a31e | 6894 | if (priv->hw_version == MVPP21) |
09f83975 | 6895 | port->first_rxq = port->id * port->nrxqs; |
59b9a31e TP |
6896 | else |
6897 | port->first_rxq = port->id * priv->max_port_rxqs; | |
6898 | ||
4bb04326 | 6899 | port->of_node = port_node; |
3f518509 | 6900 | port->phy_interface = phy_mode; |
542897d9 | 6901 | port->comphy = comphy; |
3f518509 | 6902 | |
a786841d | 6903 | if (priv->hw_version == MVPP21) { |
3230a55b | 6904 | port->base = devm_platform_ioremap_resource(pdev, 2 + id); |
a786841d TP |
6905 | if (IS_ERR(port->base)) { |
6906 | err = PTR_ERR(port->base); | |
fd3651b2 | 6907 | goto err_free_irq; |
a786841d | 6908 | } |
118d6298 MR |
6909 | |
6910 | port->stats_base = port->priv->lms_base + | |
6911 | MVPP21_MIB_COUNTERS_OFFSET + | |
6912 | port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; | |
a786841d | 6913 | } else { |
24812221 MW |
6914 | if (fwnode_property_read_u32(port_fwnode, "gop-port-id", |
6915 | &port->gop_id)) { | |
a786841d TP |
6916 | err = -EINVAL; |
6917 | dev_err(&pdev->dev, "missing gop-port-id value\n"); | |
591f4cfa | 6918 | goto err_deinit_qvecs; |
a786841d TP |
6919 | } |
6920 | ||
6921 | port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); | |
118d6298 MR |
6922 | port->stats_base = port->priv->iface_base + |
6923 | MVPP22_MIB_COUNTERS_OFFSET + | |
6924 | port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; | |
ce3497e2 RK |
6925 | |
6926 | /* We may want a property to describe whether we should use | |
6927 | * MAC hardware timestamping. | |
6928 | */ | |
6929 | if (priv->tai) | |
6930 | port->hwtstamp = true; | |
3f518509 MW |
6931 | } |
6932 | ||
118d6298 | 6933 | /* Alloc per-cpu and ethtool stats */ |
3f518509 MW |
6934 | port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); |
6935 | if (!port->stats) { | |
6936 | err = -ENOMEM; | |
fd3651b2 | 6937 | goto err_free_irq; |
3f518509 MW |
6938 | } |
6939 | ||
118d6298 | 6940 | port->ethtool_stats = devm_kcalloc(&pdev->dev, |
9bea6897 | 6941 | MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs), |
118d6298 MR |
6942 | sizeof(u64), GFP_KERNEL); |
6943 | if (!port->ethtool_stats) { | |
6944 | err = -ENOMEM; | |
6945 | goto err_free_stats; | |
6946 | } | |
6947 | ||
e5c500eb MR |
6948 | mutex_init(&port->gather_stats_lock); |
6949 | INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); | |
6950 | ||
cc4342f6 MR |
6951 | err = mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); |
6952 | if (err < 0) | |
6953 | goto err_free_stats; | |
3f518509 | 6954 | |
7cf87e4a YM |
6955 | port->tx_ring_size = MVPP2_MAX_TXD_DFLT; |
6956 | port->rx_ring_size = MVPP2_MAX_RXD_DFLT; | |
3f518509 MW |
6957 | SET_NETDEV_DEV(dev, &pdev->dev); |
6958 | ||
6959 | err = mvpp2_port_init(port); | |
6960 | if (err < 0) { | |
6961 | dev_err(&pdev->dev, "failed to init port %d\n", id); | |
6962 | goto err_free_stats; | |
6963 | } | |
26975821 | 6964 | |
26975821 TP |
6965 | mvpp2_port_periodic_xon_disable(port); |
6966 | ||
649e51d5 | 6967 | mvpp2_mac_reset_assert(port); |
7409e66e | 6968 | mvpp22_pcs_reset_assert(port); |
3f518509 | 6969 | |
edc660fa MW |
6970 | port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); |
6971 | if (!port->pcpu) { | |
6972 | err = -ENOMEM; | |
6973 | goto err_free_txq_pcpu; | |
6974 | } | |
6975 | ||
213f428f | 6976 | if (!port->has_tx_irqs) { |
e531f767 | 6977 | for (thread = 0; thread < priv->nthreads; thread++) { |
074c74df | 6978 | port_pcpu = per_cpu_ptr(port->pcpu, thread); |
edc660fa | 6979 | |
47815994 NC |
6980 | hrtimer_setup(&port_pcpu->tx_done_timer, mvpp2_hr_timer_cb, CLOCK_MONOTONIC, |
6981 | HRTIMER_MODE_REL_PINNED_SOFT); | |
213f428f | 6982 | port_pcpu->timer_scheduled = false; |
ecb9f80d | 6983 | port_pcpu->dev = dev; |
213f428f | 6984 | } |
edc660fa MW |
6985 | } |
6986 | ||
381c5671 AT |
6987 | features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
6988 | NETIF_F_TSO; | |
3f518509 | 6989 | dev->features = features | NETIF_F_RXCSUM; |
56beda3d MC |
6990 | dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | |
6991 | NETIF_F_HW_VLAN_CTAG_FILTER; | |
576193f2 | 6992 | |
0a8a8000 | 6993 | if (mvpp22_rss_is_supported(port)) { |
d33ec452 | 6994 | dev->hw_features |= NETIF_F_RXHASH; |
da86f59f MC |
6995 | dev->features |= NETIF_F_NTUPLE; |
6996 | } | |
d33ec452 | 6997 | |
7d04b0b1 MC |
6998 | if (!port->priv->percpu_pools) |
6999 | mvpp2_set_hw_csum(port, port->pool_long->id); | |
481e96fc MC |
7000 | else if (port->ntxqs >= num_possible_cpus() * 2) |
7001 | dev->xdp_features = NETDEV_XDP_ACT_BASIC | | |
7002 | NETDEV_XDP_ACT_REDIRECT | | |
7003 | NETDEV_XDP_ACT_NDO_XMIT; | |
576193f2 | 7004 | |
3f518509 | 7005 | dev->vlan_features |= features; |
ee8b7a11 | 7006 | netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS); |
66c0e13a | 7007 | |
10fea26c | 7008 | dev->priv_flags |= IFF_UNICAST_FLT; |
3f518509 | 7009 | |
576193f2 | 7010 | /* MTU range: 68 - 9704 */ |
5777987e | 7011 | dev->min_mtu = ETH_MIN_MTU; |
576193f2 SC |
7012 | /* 9704 == 9728 - 20 and rounding to 8 */ |
7013 | dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; | |
5fe65375 | 7014 | device_set_node(&dev->dev, port_fwnode); |
00418d55 | 7015 | dev->dev_port = port->id; |
5777987e | 7016 | |
5a2aba71 JL |
7017 | port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; |
7018 | port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; | |
7019 | ||
dfce1bab | 7020 | if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { |
44cc27e4 IC |
7021 | port->phylink_config.dev = &dev->dev; |
7022 | port->phylink_config.type = PHYLINK_NETDEV; | |
5038ffea RKO |
7023 | port->phylink_config.mac_capabilities = |
7024 | MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10; | |
7025 | ||
b53b1478 RKO |
7026 | __set_bit(PHY_INTERFACE_MODE_SGMII, |
7027 | port->phylink_config.lpi_interfaces); | |
7028 | ||
7029 | port->phylink_config.lpi_capabilities = MAC_1000FD | MAC_100FD; | |
7030 | ||
7031 | /* Setup EEE. Choose 250us idle. */ | |
7032 | port->phylink_config.lpi_timer_default = 250; | |
7033 | port->phylink_config.eee_enabled_default = true; | |
7034 | ||
5038ffea RKO |
7035 | if (port->priv->global_tx_fc) |
7036 | port->phylink_config.mac_capabilities |= | |
7037 | MAC_SYM_PAUSE | MAC_ASYM_PAUSE; | |
44cc27e4 | 7038 | |
8498e17e | 7039 | if (mvpp2_port_supports_xlg(port)) { |
4043ec70 MB |
7040 | /* If a COMPHY is present, we can support any of |
7041 | * the serdes modes and switch between them. | |
7042 | */ | |
7043 | if (comphy) { | |
7044 | __set_bit(PHY_INTERFACE_MODE_5GBASER, | |
7045 | port->phylink_config.supported_interfaces); | |
7046 | __set_bit(PHY_INTERFACE_MODE_10GBASER, | |
7047 | port->phylink_config.supported_interfaces); | |
7048 | __set_bit(PHY_INTERFACE_MODE_XAUI, | |
7049 | port->phylink_config.supported_interfaces); | |
7050 | } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) { | |
7051 | __set_bit(PHY_INTERFACE_MODE_5GBASER, | |
7052 | port->phylink_config.supported_interfaces); | |
7053 | } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) { | |
7054 | __set_bit(PHY_INTERFACE_MODE_10GBASER, | |
7055 | port->phylink_config.supported_interfaces); | |
7056 | } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) { | |
7057 | __set_bit(PHY_INTERFACE_MODE_XAUI, | |
7058 | port->phylink_config.supported_interfaces); | |
7059 | } | |
7060 | ||
7061 | if (comphy) | |
7062 | port->phylink_config.mac_capabilities |= | |
7063 | MAC_10000FD | MAC_5000FD; | |
7064 | else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) | |
7065 | port->phylink_config.mac_capabilities |= | |
7066 | MAC_5000FD; | |
7067 | else | |
7068 | port->phylink_config.mac_capabilities |= | |
7069 | MAC_10000FD; | |
8498e17e RK |
7070 | } |
7071 | ||
1b666016 | 7072 | if (mvpp2_port_supports_rgmii(port)) { |
8498e17e | 7073 | phy_interface_set_rgmii(port->phylink_config.supported_interfaces); |
1b666016 SE |
7074 | __set_bit(PHY_INTERFACE_MODE_MII, |
7075 | port->phylink_config.supported_interfaces); | |
7076 | } | |
8498e17e RK |
7077 | |
7078 | if (comphy) { | |
7079 | /* If a COMPHY is present, we can support any of the | |
7080 | * serdes modes and switch between them. | |
7081 | */ | |
7082 | __set_bit(PHY_INTERFACE_MODE_SGMII, | |
7083 | port->phylink_config.supported_interfaces); | |
7084 | __set_bit(PHY_INTERFACE_MODE_1000BASEX, | |
7085 | port->phylink_config.supported_interfaces); | |
7086 | __set_bit(PHY_INTERFACE_MODE_2500BASEX, | |
7087 | port->phylink_config.supported_interfaces); | |
7088 | } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { | |
7089 | /* No COMPHY, with only 2500BASE-X mode supported */ | |
7090 | __set_bit(PHY_INTERFACE_MODE_2500BASEX, | |
7091 | port->phylink_config.supported_interfaces); | |
7092 | } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || | |
7093 | phy_mode == PHY_INTERFACE_MODE_SGMII) { | |
7094 | /* No COMPHY, we can switch between 1000BASE-X and SGMII | |
7095 | */ | |
7096 | __set_bit(PHY_INTERFACE_MODE_1000BASEX, | |
7097 | port->phylink_config.supported_interfaces); | |
7098 | __set_bit(PHY_INTERFACE_MODE_SGMII, | |
7099 | port->phylink_config.supported_interfaces); | |
7100 | } | |
7101 | ||
44cc27e4 IC |
7102 | phylink = phylink_create(&port->phylink_config, port_fwnode, |
7103 | phy_mode, &mvpp2_phylink_ops); | |
4bb04326 AT |
7104 | if (IS_ERR(phylink)) { |
7105 | err = PTR_ERR(phylink); | |
7106 | goto err_free_port_pcpu; | |
7107 | } | |
7108 | port->phylink = phylink; | |
b53b1478 RKO |
7109 | |
7110 | mvpp2_mac_disable_tx_lpi(&port->phylink_config); | |
4bb04326 | 7111 | } else { |
dfce1bab | 7112 | dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id); |
4bb04326 AT |
7113 | port->phylink = NULL; |
7114 | } | |
7115 | ||
6791c102 RK |
7116 | /* Cycle the comphy to power it down, saving 270mW per port - |
7117 | * don't worry about an error powering it up. When the comphy | |
7118 | * driver does this, we can remove this code. | |
7119 | */ | |
7120 | if (port->comphy) { | |
bb7bbb6e | 7121 | err = mvpp22_comphy_init(port, port->phy_interface); |
6791c102 RK |
7122 | if (err == 0) |
7123 | phy_power_off(port->comphy); | |
7124 | } | |
7125 | ||
3f518509 MW |
7126 | err = register_netdev(dev); |
7127 | if (err < 0) { | |
7128 | dev_err(&pdev->dev, "failed to register netdev\n"); | |
4bb04326 | 7129 | goto err_phylink; |
3f518509 MW |
7130 | } |
7131 | netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); | |
7132 | ||
bf147153 MW |
7133 | priv->port_list[priv->port_count++] = port; |
7134 | ||
3f518509 MW |
7135 | return 0; |
7136 | ||
4bb04326 AT |
7137 | err_phylink: |
7138 | if (port->phylink) | |
7139 | phylink_destroy(port->phylink); | |
edc660fa MW |
7140 | err_free_port_pcpu: |
7141 | free_percpu(port->pcpu); | |
3f518509 | 7142 | err_free_txq_pcpu: |
09f83975 | 7143 | for (i = 0; i < port->ntxqs; i++) |
3f518509 MW |
7144 | free_percpu(port->txqs[i]->pcpu); |
7145 | err_free_stats: | |
7146 | free_percpu(port->stats); | |
fd3651b2 | 7147 | err_free_irq: |
89141972 RK |
7148 | if (port->port_irq) |
7149 | irq_dispose_mapping(port->port_irq); | |
591f4cfa TP |
7150 | err_deinit_qvecs: |
7151 | mvpp2_queue_vectors_deinit(port); | |
3f518509 MW |
7152 | err_free_netdev: |
7153 | free_netdev(dev); | |
7154 | return err; | |
7155 | } | |
7156 | ||
7157 | /* Ports removal routine */ | |
7158 | static void mvpp2_port_remove(struct mvpp2_port *port) | |
7159 | { | |
7160 | int i; | |
7161 | ||
7162 | unregister_netdev(port->dev); | |
4bb04326 AT |
7163 | if (port->phylink) |
7164 | phylink_destroy(port->phylink); | |
edc660fa | 7165 | free_percpu(port->pcpu); |
3f518509 | 7166 | free_percpu(port->stats); |
09f83975 | 7167 | for (i = 0; i < port->ntxqs; i++) |
3f518509 | 7168 | free_percpu(port->txqs[i]->pcpu); |
591f4cfa | 7169 | mvpp2_queue_vectors_deinit(port); |
89141972 RK |
7170 | if (port->port_irq) |
7171 | irq_dispose_mapping(port->port_irq); | |
3f518509 MW |
7172 | free_netdev(port->dev); |
7173 | } | |
7174 | ||
7175 | /* Initialize decoding windows */ | |
7176 | static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, | |
7177 | struct mvpp2 *priv) | |
7178 | { | |
7179 | u32 win_enable; | |
7180 | int i; | |
7181 | ||
7182 | for (i = 0; i < 6; i++) { | |
7183 | mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); | |
7184 | mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); | |
7185 | ||
7186 | if (i < 4) | |
7187 | mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); | |
7188 | } | |
7189 | ||
7190 | win_enable = 0; | |
7191 | ||
7192 | for (i = 0; i < dram->num_cs; i++) { | |
7193 | const struct mbus_dram_window *cs = dram->cs + i; | |
7194 | ||
7195 | mvpp2_write(priv, MVPP2_WIN_BASE(i), | |
7196 | (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | | |
7197 | dram->mbus_dram_target_id); | |
7198 | ||
7199 | mvpp2_write(priv, MVPP2_WIN_SIZE(i), | |
7200 | (cs->size - 1) & 0xffff0000); | |
7201 | ||
7202 | win_enable |= (1 << i); | |
7203 | } | |
7204 | ||
7205 | mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); | |
7206 | } | |
7207 | ||
7208 | /* Initialize Rx FIFO's */ | |
7209 | static void mvpp2_rx_fifo_init(struct mvpp2 *priv) | |
7210 | { | |
7211 | int port; | |
7212 | ||
7213 | for (port = 0; port < MVPP2_MAX_PORTS; port++) { | |
7214 | mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), | |
2d1d7df8 | 7215 | MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); |
3f518509 | 7216 | mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), |
2d1d7df8 AT |
7217 | MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); |
7218 | } | |
7219 | ||
7220 | mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, | |
7221 | MVPP2_RX_FIFO_PORT_MIN_PKT); | |
7222 | mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); | |
7223 | } | |
7224 | ||
9a71baf7 | 7225 | static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size) |
2d1d7df8 | 7226 | { |
9a71baf7 | 7227 | int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size); |
2d1d7df8 | 7228 | |
9a71baf7 SC |
7229 | mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size); |
7230 | mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size); | |
7231 | } | |
2d1d7df8 | 7232 | |
6af27a1d | 7233 | /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3. |
9a71baf7 SC |
7234 | * 4kB fixed space must be assigned for the loopback port. |
7235 | * Redistribute remaining avialable 44kB space among all active ports. | |
7236 | * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G | |
7237 | * SGMII link. | |
7238 | */ | |
7239 | static void mvpp22_rx_fifo_init(struct mvpp2 *priv) | |
7240 | { | |
7241 | int remaining_ports_count; | |
7242 | unsigned long port_map; | |
7243 | int size_remainder; | |
7244 | int port, size; | |
7245 | ||
7246 | /* The loopback requires fixed 4kB of the FIFO space assignment. */ | |
7247 | mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, | |
7248 | MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); | |
7249 | port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); | |
7250 | ||
7251 | /* Set RX FIFO size to 0 for inactive ports. */ | |
7252 | for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) | |
7253 | mvpp22_rx_fifo_set_hw(priv, port, 0); | |
7254 | ||
7255 | /* Assign remaining RX FIFO space among all active ports. */ | |
7256 | size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB; | |
7257 | remaining_ports_count = hweight_long(port_map); | |
7258 | ||
7259 | for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { | |
7260 | if (remaining_ports_count == 1) | |
7261 | size = size_remainder; | |
7262 | else if (port == 0) | |
7263 | size = max(size_remainder / remaining_ports_count, | |
7264 | MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); | |
7265 | else if (port == 1) | |
7266 | size = max(size_remainder / remaining_ports_count, | |
7267 | MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); | |
7268 | else | |
7269 | size = size_remainder / remaining_ports_count; | |
2d1d7df8 | 7270 | |
9a71baf7 SC |
7271 | size_remainder -= size; |
7272 | remaining_ports_count--; | |
2d1d7df8 | 7273 | |
9a71baf7 | 7274 | mvpp22_rx_fifo_set_hw(priv, port, size); |
3f518509 MW |
7275 | } |
7276 | ||
7277 | mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, | |
7278 | MVPP2_RX_FIFO_PORT_MIN_PKT); | |
7279 | mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); | |
7280 | } | |
7281 | ||
aca0e235 SC |
7282 | /* Configure Rx FIFO Flow control thresholds */ |
7283 | static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv) | |
7284 | { | |
7285 | int port, val; | |
7286 | ||
7287 | /* Port 0: maximum speed -10Gb/s port | |
7288 | * required by spec RX FIFO threshold 9KB | |
7289 | * Port 1: maximum speed -5Gb/s port | |
7290 | * required by spec RX FIFO threshold 4KB | |
7291 | * Port 2: maximum speed -1Gb/s port | |
7292 | * required by spec RX FIFO threshold 2KB | |
7293 | */ | |
7294 | ||
7295 | /* Without loopback port */ | |
7296 | for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) { | |
7297 | if (port == 0) { | |
7298 | val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) | |
7299 | << MVPP2_RX_FC_TRSH_OFFS; | |
7300 | val &= MVPP2_RX_FC_TRSH_MASK; | |
7301 | mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); | |
7302 | } else if (port == 1) { | |
7303 | val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) | |
7304 | << MVPP2_RX_FC_TRSH_OFFS; | |
7305 | val &= MVPP2_RX_FC_TRSH_MASK; | |
7306 | mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); | |
7307 | } else { | |
7308 | val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT) | |
7309 | << MVPP2_RX_FC_TRSH_OFFS; | |
7310 | val &= MVPP2_RX_FC_TRSH_MASK; | |
7311 | mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); | |
7312 | } | |
7313 | } | |
7314 | } | |
7315 | ||
7316 | /* Configure Rx FIFO Flow control thresholds */ | |
7317 | void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en) | |
7318 | { | |
7319 | int val; | |
7320 | ||
7321 | val = mvpp2_read(priv, MVPP2_RX_FC_REG(port)); | |
7322 | ||
7323 | if (en) | |
7324 | val |= MVPP2_RX_FC_EN; | |
7325 | else | |
7326 | val &= ~MVPP2_RX_FC_EN; | |
7327 | ||
7328 | mvpp2_write(priv, MVPP2_RX_FC_REG(port), val); | |
7329 | } | |
7330 | ||
9a71baf7 SC |
7331 | static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size) |
7332 | { | |
7333 | int threshold = MVPP2_TX_FIFO_THRESHOLD(size); | |
7334 | ||
7335 | mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); | |
7336 | mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold); | |
7337 | } | |
7338 | ||
6af27a1d | 7339 | /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3. |
7c294515 SC |
7340 | * 1kB fixed space must be assigned for the loopback port. |
7341 | * Redistribute remaining avialable 18kB space among all active ports. | |
9a71baf7 SC |
7342 | * The 10G interface should use 10kB (which is maximum possible size |
7343 | * per single port). | |
93ff130f | 7344 | */ |
7c10f974 AT |
7345 | static void mvpp22_tx_fifo_init(struct mvpp2 *priv) |
7346 | { | |
9a71baf7 SC |
7347 | int remaining_ports_count; |
7348 | unsigned long port_map; | |
7349 | int size_remainder; | |
7350 | int port, size; | |
7351 | ||
7c294515 | 7352 | /* The loopback requires fixed 1kB of the FIFO space assignment. */ |
9a71baf7 | 7353 | mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, |
7c294515 | 7354 | MVPP22_TX_FIFO_DATA_SIZE_1KB); |
9a71baf7 SC |
7355 | port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); |
7356 | ||
7357 | /* Set TX FIFO size to 0 for inactive ports. */ | |
7358 | for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) | |
7359 | mvpp22_tx_fifo_set_hw(priv, port, 0); | |
7360 | ||
7361 | /* Assign remaining TX FIFO space among all active ports. */ | |
7c294515 | 7362 | size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB; |
9a71baf7 SC |
7363 | remaining_ports_count = hweight_long(port_map); |
7364 | ||
7365 | for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { | |
7366 | if (remaining_ports_count == 1) | |
7367 | size = min(size_remainder, | |
7368 | MVPP22_TX_FIFO_DATA_SIZE_10KB); | |
7369 | else if (port == 0) | |
93ff130f | 7370 | size = MVPP22_TX_FIFO_DATA_SIZE_10KB; |
9a71baf7 SC |
7371 | else |
7372 | size = size_remainder / remaining_ports_count; | |
7373 | ||
7374 | size_remainder -= size; | |
7375 | remaining_ports_count--; | |
7376 | ||
7377 | mvpp22_tx_fifo_set_hw(priv, port, size); | |
93ff130f | 7378 | } |
7c10f974 AT |
7379 | } |
7380 | ||
6763ce31 TP |
7381 | static void mvpp2_axi_init(struct mvpp2 *priv) |
7382 | { | |
7383 | u32 val, rdval, wrval; | |
7384 | ||
7385 | mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); | |
7386 | ||
7387 | /* AXI Bridge Configuration */ | |
7388 | ||
7389 | rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE | |
7390 | << MVPP22_AXI_ATTR_CACHE_OFFS; | |
7391 | rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
7392 | << MVPP22_AXI_ATTR_DOMAIN_OFFS; | |
7393 | ||
7394 | wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE | |
7395 | << MVPP22_AXI_ATTR_CACHE_OFFS; | |
7396 | wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
7397 | << MVPP22_AXI_ATTR_DOMAIN_OFFS; | |
7398 | ||
7399 | /* BM */ | |
7400 | mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); | |
7401 | mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); | |
7402 | ||
7403 | /* Descriptors */ | |
7404 | mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); | |
7405 | mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); | |
7406 | mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); | |
7407 | mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); | |
7408 | ||
7409 | /* Buffer Data */ | |
7410 | mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); | |
7411 | mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); | |
7412 | ||
7413 | val = MVPP22_AXI_CODE_CACHE_NON_CACHE | |
7414 | << MVPP22_AXI_CODE_CACHE_OFFS; | |
7415 | val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM | |
7416 | << MVPP22_AXI_CODE_DOMAIN_OFFS; | |
7417 | mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); | |
7418 | mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); | |
7419 | ||
7420 | val = MVPP22_AXI_CODE_CACHE_RD_CACHE | |
7421 | << MVPP22_AXI_CODE_CACHE_OFFS; | |
7422 | val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
7423 | << MVPP22_AXI_CODE_DOMAIN_OFFS; | |
7424 | ||
7425 | mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); | |
7426 | ||
7427 | val = MVPP22_AXI_CODE_CACHE_WR_CACHE | |
7428 | << MVPP22_AXI_CODE_CACHE_OFFS; | |
7429 | val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM | |
7430 | << MVPP22_AXI_CODE_DOMAIN_OFFS; | |
7431 | ||
7432 | mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); | |
7433 | } | |
7434 | ||
3f518509 MW |
7435 | /* Initialize network controller common part HW */ |
7436 | static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) | |
7437 | { | |
7438 | const struct mbus_dram_target_info *dram_target_info; | |
7439 | int err, i; | |
08a23755 | 7440 | u32 val; |
3f518509 | 7441 | |
3f518509 MW |
7442 | /* MBUS windows configuration */ |
7443 | dram_target_info = mv_mbus_dram_info(); | |
7444 | if (dram_target_info) | |
7445 | mvpp2_conf_mbus_windows(dram_target_info, priv); | |
7446 | ||
f704177e | 7447 | if (priv->hw_version >= MVPP22) |
6763ce31 TP |
7448 | mvpp2_axi_init(priv); |
7449 | ||
08a23755 | 7450 | /* Disable HW PHY polling */ |
26975821 TP |
7451 | if (priv->hw_version == MVPP21) { |
7452 | val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); | |
7453 | val |= MVPP2_PHY_AN_STOP_SMI0_MASK; | |
7454 | writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); | |
7455 | } else { | |
7456 | val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); | |
7457 | val &= ~MVPP22_SMI_POLLING_EN; | |
7458 | writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); | |
7459 | } | |
08a23755 | 7460 | |
3f518509 | 7461 | /* Allocate and initialize aggregated TXQs */ |
074c74df | 7462 | priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, |
d7ce3cec | 7463 | sizeof(*priv->aggr_txqs), |
3f518509 MW |
7464 | GFP_KERNEL); |
7465 | if (!priv->aggr_txqs) | |
7466 | return -ENOMEM; | |
7467 | ||
074c74df | 7468 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
3f518509 MW |
7469 | priv->aggr_txqs[i].id = i; |
7470 | priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; | |
85affd7e | 7471 | err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); |
3f518509 MW |
7472 | if (err < 0) |
7473 | return err; | |
7474 | } | |
7475 | ||
7c10f974 AT |
7476 | /* Fifo Init */ |
7477 | if (priv->hw_version == MVPP21) { | |
2d1d7df8 | 7478 | mvpp2_rx_fifo_init(priv); |
7c10f974 | 7479 | } else { |
2d1d7df8 | 7480 | mvpp22_rx_fifo_init(priv); |
7c10f974 | 7481 | mvpp22_tx_fifo_init(priv); |
aca0e235 SC |
7482 | if (priv->hw_version == MVPP23) |
7483 | mvpp23_rx_fifo_fc_set_tresh(priv); | |
7c10f974 | 7484 | } |
3f518509 | 7485 | |
26975821 TP |
7486 | if (priv->hw_version == MVPP21) |
7487 | writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, | |
7488 | priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); | |
3f518509 MW |
7489 | |
7490 | /* Allow cache snoop when transmiting packets */ | |
7491 | mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); | |
7492 | ||
7493 | /* Buffer Manager initialization */ | |
13616361 | 7494 | err = mvpp2_bm_init(&pdev->dev, priv); |
3f518509 MW |
7495 | if (err < 0) |
7496 | return err; | |
7497 | ||
7498 | /* Parser default initialization */ | |
7499 | err = mvpp2_prs_default_init(pdev, priv); | |
7500 | if (err < 0) | |
7501 | return err; | |
7502 | ||
7503 | /* Classifier default initialization */ | |
7504 | mvpp2_cls_init(priv); | |
7505 | ||
7506 | return 0; | |
7507 | } | |
7508 | ||
e54ad1e0 SC |
7509 | static int mvpp2_get_sram(struct platform_device *pdev, |
7510 | struct mvpp2 *priv) | |
7511 | { | |
7512 | struct resource *res; | |
cbe86768 | 7513 | void __iomem *base; |
e54ad1e0 SC |
7514 | |
7515 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | |
7516 | if (!res) { | |
7517 | if (has_acpi_companion(&pdev->dev)) | |
7518 | dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n"); | |
7519 | else | |
7520 | dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n"); | |
7521 | return 0; | |
7522 | } | |
7523 | ||
cbe86768 HT |
7524 | base = devm_ioremap_resource(&pdev->dev, res); |
7525 | if (IS_ERR(base)) | |
7526 | return PTR_ERR(base); | |
e54ad1e0 | 7527 | |
cbe86768 HT |
7528 | priv->cm3_base = base; |
7529 | return 0; | |
e54ad1e0 SC |
7530 | } |
7531 | ||
3f518509 MW |
7532 | static int mvpp2_probe(struct platform_device *pdev) |
7533 | { | |
3f518509 MW |
7534 | struct mvpp2 *priv; |
7535 | struct resource *res; | |
a786841d | 7536 | void __iomem *base; |
e531f767 | 7537 | int i, shared; |
9ca5e767 | 7538 | int err; |
3f518509 | 7539 | |
0b92e594 | 7540 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); |
3f518509 MW |
7541 | if (!priv) |
7542 | return -ENOMEM; | |
7543 | ||
692b82c5 | 7544 | priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev); |
faca9247 | 7545 | |
1e27a628 MC |
7546 | /* multi queue mode isn't supported on PPV2.1, fallback to single |
7547 | * mode | |
7548 | */ | |
7549 | if (priv->hw_version == MVPP21) | |
7550 | queue_mode = MVPP2_QDIST_SINGLE_MODE; | |
7551 | ||
3230a55b | 7552 | base = devm_platform_ioremap_resource(pdev, 0); |
a786841d TP |
7553 | if (IS_ERR(base)) |
7554 | return PTR_ERR(base); | |
7555 | ||
7556 | if (priv->hw_version == MVPP21) { | |
3230a55b | 7557 | priv->lms_base = devm_platform_ioremap_resource(pdev, 1); |
a786841d TP |
7558 | if (IS_ERR(priv->lms_base)) |
7559 | return PTR_ERR(priv->lms_base); | |
7560 | } else { | |
7561 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
0bb51a3a YY |
7562 | if (!res) { |
7563 | dev_err(&pdev->dev, "Invalid resource\n"); | |
7564 | return -EINVAL; | |
7565 | } | |
a75edc7c MW |
7566 | if (has_acpi_companion(&pdev->dev)) { |
7567 | /* In case the MDIO memory region is declared in | |
7568 | * the ACPI, it can already appear as 'in-use' | |
7569 | * in the OS. Because it is overlapped by second | |
7570 | * region of the network controller, make | |
7571 | * sure it is released, before requesting it again. | |
7572 | * The care is taken by mvpp2 driver to avoid | |
7573 | * concurrent access to this memory region. | |
7574 | */ | |
7575 | release_resource(res); | |
7576 | } | |
a786841d TP |
7577 | priv->iface_base = devm_ioremap_resource(&pdev->dev, res); |
7578 | if (IS_ERR(priv->iface_base)) | |
7579 | return PTR_ERR(priv->iface_base); | |
e54ad1e0 SC |
7580 | |
7581 | /* Map CM3 SRAM */ | |
7582 | err = mvpp2_get_sram(pdev, priv); | |
7583 | if (err) | |
7584 | dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n"); | |
a59d3542 SC |
7585 | |
7586 | /* Enable global Flow Control only if handler to SRAM not NULL */ | |
7587 | if (priv->cm3_base) | |
7588 | priv->global_tx_fc = true; | |
a75edc7c | 7589 | } |
f84bf386 | 7590 | |
f704177e | 7591 | if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) { |
f84bf386 AT |
7592 | priv->sysctrl_base = |
7593 | syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | |
7594 | "marvell,system-controller"); | |
7595 | if (IS_ERR(priv->sysctrl_base)) | |
7596 | /* The system controller regmap is optional for dt | |
7597 | * compatibility reasons. When not provided, the | |
7598 | * configuration of the GoP relies on the | |
7599 | * firmware/bootloader. | |
7600 | */ | |
7601 | priv->sysctrl_base = NULL; | |
a786841d TP |
7602 | } |
7603 | ||
f704177e | 7604 | if (priv->hw_version >= MVPP22 && |
7d04b0b1 MC |
7605 | mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS) |
7606 | priv->percpu_pools = 1; | |
7607 | ||
01d04936 SC |
7608 | mvpp2_setup_bm_pool(); |
7609 | ||
e531f767 AT |
7610 | |
7611 | priv->nthreads = min_t(unsigned int, num_present_cpus(), | |
7612 | MVPP2_MAX_THREADS); | |
7613 | ||
7614 | shared = num_present_cpus() - priv->nthreads; | |
7615 | if (shared > 0) | |
b83f5ac7 | 7616 | bitmap_set(&priv->lock_map, 0, |
e531f767 AT |
7617 | min_t(int, shared, MVPP2_MAX_THREADS)); |
7618 | ||
df089aa0 | 7619 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
a786841d TP |
7620 | u32 addr_space_sz; |
7621 | ||
7622 | addr_space_sz = (priv->hw_version == MVPP21 ? | |
7623 | MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); | |
df089aa0 | 7624 | priv->swth_base[i] = base + i * addr_space_sz; |
a786841d | 7625 | } |
3f518509 | 7626 | |
59b9a31e TP |
7627 | if (priv->hw_version == MVPP21) |
7628 | priv->max_port_rxqs = 8; | |
7629 | else | |
7630 | priv->max_port_rxqs = 32; | |
7631 | ||
a75edc7c MW |
7632 | if (dev_of_node(&pdev->dev)) { |
7633 | priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); | |
7634 | if (IS_ERR(priv->pp_clk)) | |
7635 | return PTR_ERR(priv->pp_clk); | |
7636 | err = clk_prepare_enable(priv->pp_clk); | |
7637 | if (err < 0) | |
7638 | return err; | |
3f518509 | 7639 | |
a75edc7c MW |
7640 | priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); |
7641 | if (IS_ERR(priv->gop_clk)) { | |
7642 | err = PTR_ERR(priv->gop_clk); | |
7643 | goto err_pp_clk; | |
fceb55d4 | 7644 | } |
a75edc7c | 7645 | err = clk_prepare_enable(priv->gop_clk); |
fceb55d4 | 7646 | if (err < 0) |
a75edc7c MW |
7647 | goto err_pp_clk; |
7648 | ||
f704177e | 7649 | if (priv->hw_version >= MVPP22) { |
a75edc7c MW |
7650 | priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); |
7651 | if (IS_ERR(priv->mg_clk)) { | |
7652 | err = PTR_ERR(priv->mg_clk); | |
7653 | goto err_gop_clk; | |
7654 | } | |
7655 | ||
7656 | err = clk_prepare_enable(priv->mg_clk); | |
7657 | if (err < 0) | |
7658 | goto err_gop_clk; | |
9af771ce | 7659 | |
cf3399b7 | 7660 | priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk"); |
9af771ce | 7661 | if (IS_ERR(priv->mg_core_clk)) { |
cf3399b7 AS |
7662 | err = PTR_ERR(priv->mg_core_clk); |
7663 | goto err_mg_clk; | |
9af771ce | 7664 | } |
cf3399b7 AS |
7665 | |
7666 | err = clk_prepare_enable(priv->mg_core_clk); | |
7667 | if (err < 0) | |
7668 | goto err_mg_clk; | |
a75edc7c | 7669 | } |
4792ea04 | 7670 | |
cf3399b7 | 7671 | priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk"); |
4792ea04 GC |
7672 | if (IS_ERR(priv->axi_clk)) { |
7673 | err = PTR_ERR(priv->axi_clk); | |
cf3399b7 | 7674 | goto err_mg_core_clk; |
4792ea04 | 7675 | } |
fceb55d4 | 7676 | |
cf3399b7 AS |
7677 | err = clk_prepare_enable(priv->axi_clk); |
7678 | if (err < 0) | |
7679 | goto err_mg_core_clk; | |
7680 | ||
a75edc7c MW |
7681 | /* Get system's tclk rate */ |
7682 | priv->tclk = clk_get_rate(priv->pp_clk); | |
58452555 AS |
7683 | } else { |
7684 | err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk); | |
7685 | if (err) { | |
7686 | dev_err(&pdev->dev, "missing clock-frequency value\n"); | |
7687 | return err; | |
7688 | } | |
a75edc7c | 7689 | } |
3f518509 | 7690 | |
f704177e | 7691 | if (priv->hw_version >= MVPP22) { |
da42bb27 | 7692 | err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); |
2067e0a1 | 7693 | if (err) |
45f972ad | 7694 | goto err_axi_clk; |
2067e0a1 TP |
7695 | /* Sadly, the BM pools all share the same register to |
7696 | * store the high 32 bits of their address. So they | |
7697 | * must all have the same high 32 bits, which forces | |
7698 | * us to restrict coherent memory to DMA_BIT_MASK(32). | |
7699 | */ | |
7700 | err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | |
7701 | if (err) | |
45f972ad | 7702 | goto err_axi_clk; |
2067e0a1 TP |
7703 | } |
7704 | ||
9a71baf7 | 7705 | /* Map DTS-active ports. Should be done before FIFO mvpp2_init */ |
a7b32744 | 7706 | device_for_each_child_node_scoped(&pdev->dev, port_fwnode) { |
9a71baf7 SC |
7707 | if (!fwnode_property_read_u32(port_fwnode, "port-id", &i)) |
7708 | priv->port_map |= BIT(i); | |
7709 | } | |
7710 | ||
8b986866 SC |
7711 | if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) |
7712 | priv->hw_version = MVPP23; | |
6af27a1d | 7713 | |
96844075 | 7714 | /* Init locks for shared packet processor resources */ |
3bd17fdc | 7715 | spin_lock_init(&priv->mss_spinlock); |
96844075 | 7716 | spin_lock_init(&priv->prs_spinlock); |
3bd17fdc | 7717 | |
3f518509 MW |
7718 | /* Initialize network controller */ |
7719 | err = mvpp2_init(pdev, priv); | |
7720 | if (err < 0) { | |
7721 | dev_err(&pdev->dev, "failed to initialize controller\n"); | |
45f972ad | 7722 | goto err_axi_clk; |
3f518509 MW |
7723 | } |
7724 | ||
91dd7195 RK |
7725 | err = mvpp22_tai_probe(&pdev->dev, priv); |
7726 | if (err < 0) | |
7727 | goto err_axi_clk; | |
7728 | ||
3f518509 | 7729 | /* Initialize ports */ |
a7b32744 | 7730 | device_for_each_child_node_scoped(&pdev->dev, port_fwnode) { |
24812221 | 7731 | err = mvpp2_port_probe(pdev, port_fwnode, priv); |
3f518509 | 7732 | if (err < 0) |
26146b0e | 7733 | goto err_port_probe; |
bf147153 MW |
7734 | } |
7735 | ||
7736 | if (priv->port_count == 0) { | |
7737 | dev_err(&pdev->dev, "no ports enabled\n"); | |
7738 | err = -ENODEV; | |
45f972ad | 7739 | goto err_axi_clk; |
3f518509 MW |
7740 | } |
7741 | ||
118d6298 MR |
7742 | /* Statistics must be gathered regularly because some of them (like |
7743 | * packets counters) are 32-bit registers and could overflow quite | |
7744 | * quickly. For instance, a 10Gb link used at full bandwidth with the | |
7745 | * smallest packets (64B) will overflow a 32-bit counter in less than | |
7746 | * 30 seconds. Then, use a workqueue to fill 64-bit counters. | |
7747 | */ | |
118d6298 MR |
7748 | snprintf(priv->queue_name, sizeof(priv->queue_name), |
7749 | "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), | |
7750 | priv->port_count > 1 ? "+" : ""); | |
7751 | priv->stats_queue = create_singlethread_workqueue(priv->queue_name); | |
7752 | if (!priv->stats_queue) { | |
7753 | err = -ENOMEM; | |
26146b0e | 7754 | goto err_port_probe; |
118d6298 MR |
7755 | } |
7756 | ||
f704177e | 7757 | if (priv->global_tx_fc && priv->hw_version >= MVPP22) { |
9ca5e767 SC |
7758 | err = mvpp2_enable_global_fc(priv); |
7759 | if (err) | |
7760 | dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n"); | |
a59d3542 SC |
7761 | } |
7762 | ||
21da57a2 MC |
7763 | mvpp2_dbgfs_init(priv, pdev->name); |
7764 | ||
3f518509 MW |
7765 | platform_set_drvdata(pdev, priv); |
7766 | return 0; | |
7767 | ||
26146b0e | 7768 | err_port_probe: |
e81d00a6 JC |
7769 | for (i = 0; i < priv->port_count; i++) |
7770 | mvpp2_port_remove(priv->port_list[i]); | |
45f972ad | 7771 | err_axi_clk: |
4792ea04 | 7772 | clk_disable_unprepare(priv->axi_clk); |
9af771ce | 7773 | err_mg_core_clk: |
cf3399b7 | 7774 | clk_disable_unprepare(priv->mg_core_clk); |
45f972ad | 7775 | err_mg_clk: |
cf3399b7 | 7776 | clk_disable_unprepare(priv->mg_clk); |
3f518509 MW |
7777 | err_gop_clk: |
7778 | clk_disable_unprepare(priv->gop_clk); | |
7779 | err_pp_clk: | |
7780 | clk_disable_unprepare(priv->pp_clk); | |
7781 | return err; | |
7782 | } | |
7783 | ||
fee02f49 | 7784 | static void mvpp2_remove(struct platform_device *pdev) |
3f518509 MW |
7785 | { |
7786 | struct mvpp2 *priv = platform_get_drvdata(pdev); | |
e81d00a6 | 7787 | int i, poolnum = MVPP2_BM_POOLS_NUM; |
3f518509 | 7788 | |
21da57a2 MC |
7789 | mvpp2_dbgfs_cleanup(priv); |
7790 | ||
e81d00a6 JC |
7791 | for (i = 0; i < priv->port_count; i++) { |
7792 | mutex_destroy(&priv->port_list[i]->gather_stats_lock); | |
7793 | mvpp2_port_remove(priv->port_list[i]); | |
3f518509 MW |
7794 | } |
7795 | ||
944a83a2 MC |
7796 | destroy_workqueue(priv->stats_queue); |
7797 | ||
807eaf99 SA |
7798 | if (priv->percpu_pools) |
7799 | poolnum = mvpp2_get_nrxqs(priv) * 2; | |
7800 | ||
7801 | for (i = 0; i < poolnum; i++) { | |
3f518509 MW |
7802 | struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; |
7803 | ||
13616361 | 7804 | mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); |
3f518509 MW |
7805 | } |
7806 | ||
074c74df | 7807 | for (i = 0; i < MVPP2_MAX_THREADS; i++) { |
3f518509 MW |
7808 | struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; |
7809 | ||
7810 | dma_free_coherent(&pdev->dev, | |
7811 | MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, | |
7812 | aggr_txq->descs, | |
20396136 | 7813 | aggr_txq->descs_dma); |
3f518509 MW |
7814 | } |
7815 | ||
e81d00a6 | 7816 | if (!dev_of_node(&pdev->dev)) |
fee02f49 | 7817 | return; |
a75edc7c | 7818 | |
4792ea04 | 7819 | clk_disable_unprepare(priv->axi_clk); |
9af771ce | 7820 | clk_disable_unprepare(priv->mg_core_clk); |
fceb55d4 | 7821 | clk_disable_unprepare(priv->mg_clk); |
3f518509 MW |
7822 | clk_disable_unprepare(priv->pp_clk); |
7823 | clk_disable_unprepare(priv->gop_clk); | |
3f518509 MW |
7824 | } |
7825 | ||
7826 | static const struct of_device_id mvpp2_match[] = { | |
faca9247 TP |
7827 | { |
7828 | .compatible = "marvell,armada-375-pp2", | |
7829 | .data = (void *)MVPP21, | |
7830 | }, | |
fc5e1550 TP |
7831 | { |
7832 | .compatible = "marvell,armada-7k-pp22", | |
7833 | .data = (void *)MVPP22, | |
7834 | }, | |
3f518509 MW |
7835 | { } |
7836 | }; | |
7837 | MODULE_DEVICE_TABLE(of, mvpp2_match); | |
7838 | ||
36563ce6 | 7839 | #ifdef CONFIG_ACPI |
a75edc7c MW |
7840 | static const struct acpi_device_id mvpp2_acpi_match[] = { |
7841 | { "MRVL0110", MVPP22 }, | |
7842 | { }, | |
7843 | }; | |
7844 | MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); | |
36563ce6 | 7845 | #endif |
a75edc7c | 7846 | |
3f518509 MW |
7847 | static struct platform_driver mvpp2_driver = { |
7848 | .probe = mvpp2_probe, | |
e96321fa | 7849 | .remove = mvpp2_remove, |
3f518509 MW |
7850 | .driver = { |
7851 | .name = MVPP2_DRIVER_NAME, | |
7852 | .of_match_table = mvpp2_match, | |
a75edc7c | 7853 | .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), |
3f518509 MW |
7854 | }, |
7855 | }; | |
7856 | ||
0152dfee RKO |
7857 | static int __init mvpp2_driver_init(void) |
7858 | { | |
7859 | return platform_driver_register(&mvpp2_driver); | |
7860 | } | |
7861 | module_init(mvpp2_driver_init); | |
7862 | ||
7863 | static void __exit mvpp2_driver_exit(void) | |
7864 | { | |
7865 | platform_driver_unregister(&mvpp2_driver); | |
7866 | mvpp2_dbgfs_exit(); | |
7867 | } | |
7868 | module_exit(mvpp2_driver_exit); | |
3f518509 MW |
7869 | |
7870 | MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); | |
7871 | MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); | |
c634099d | 7872 | MODULE_LICENSE("GPL v2"); |