1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/phy.h>
28 #include <linux/phylink.h>
29 #include <linux/phy/phy.h>
30 #include <linux/ptp_classify.h>
31 #include <linux/clk.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/regmap.h>
35 #include <uapi/linux/ppp_defs.h>
38 #include <net/page_pool/helpers.h>
40 #include <linux/bpf_trace.h>
43 #include "mvpp2_prs.h"
44 #include "mvpp2_cls.h"
46 enum mvpp2_bm_pool_log_num {
56 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
58 /* The prototype is added here to be used in start_dev when using ACPI. This
59 * will be removed once phylink is used for all modes (dt+ACPI).
61 static void mvpp2_acpi_start(struct mvpp2_port *port);
64 #define MVPP2_QDIST_SINGLE_MODE 0
65 #define MVPP2_QDIST_MULTI_MODE 1
67 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
69 module_param(queue_mode, int, 0444);
70 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
72 /* Utility/helper methods */
74 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
76 writel(data, priv->swth_base[0] + offset);
79 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
81 return readl(priv->swth_base[0] + offset);
84 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
86 return readl_relaxed(priv->swth_base[0] + offset);
89 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
91 return cpu % priv->nthreads;
94 static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
96 writel(data, priv->cm3_base + offset);
99 static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
101 return readl(priv->cm3_base + offset);
104 static struct page_pool *
105 mvpp2_create_page_pool(struct device *dev, int num, int len,
106 enum dma_data_direction dma_dir)
108 struct page_pool_params pp_params = {
109 /* internal DMA mapping in page_pool */
110 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
115 .offset = MVPP2_SKB_HEADROOM,
119 return page_pool_create(&pp_params);
122 /* These accessors should be used to access:
124 * - per-thread registers, where each thread has its own copy of the
127 * MVPP2_BM_VIRT_ALLOC_REG
128 * MVPP2_BM_ADDR_HIGH_ALLOC
129 * MVPP22_BM_ADDR_HIGH_RLS_REG
130 * MVPP2_BM_VIRT_RLS_REG
131 * MVPP2_ISR_RX_TX_CAUSE_REG
132 * MVPP2_ISR_RX_TX_MASK_REG
134 * MVPP2_AGGR_TXQ_UPDATE_REG
135 * MVPP2_TXQ_RSVD_REQ_REG
136 * MVPP2_TXQ_RSVD_RSLT_REG
140 * - global registers that must be accessed through a specific thread
141 * window, because they are related to an access to a per-thread
144 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
145 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
146 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
147 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
148 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
149 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
150 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
151 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
152 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
153 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
154 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
155 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
156 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
158 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
159 u32 offset, u32 data)
161 writel(data, priv->swth_base[thread] + offset);
164 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
167 return readl(priv->swth_base[thread] + offset);
170 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
171 u32 offset, u32 data)
173 writel_relaxed(data, priv->swth_base[thread] + offset);
176 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
179 return readl_relaxed(priv->swth_base[thread] + offset);
182 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
183 struct mvpp2_tx_desc *tx_desc)
185 if (port->priv->hw_version == MVPP21)
186 return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
188 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
192 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
193 struct mvpp2_tx_desc *tx_desc,
196 dma_addr_t addr, offset;
198 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
199 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
201 if (port->priv->hw_version == MVPP21) {
202 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
203 tx_desc->pp21.packet_offset = offset;
205 __le64 val = cpu_to_le64(addr);
207 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
208 tx_desc->pp22.buf_dma_addr_ptp |= val;
209 tx_desc->pp22.packet_offset = offset;
213 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
214 struct mvpp2_tx_desc *tx_desc)
216 if (port->priv->hw_version == MVPP21)
217 return le16_to_cpu(tx_desc->pp21.data_size);
219 return le16_to_cpu(tx_desc->pp22.data_size);
222 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
223 struct mvpp2_tx_desc *tx_desc,
226 if (port->priv->hw_version == MVPP21)
227 tx_desc->pp21.data_size = cpu_to_le16(size);
229 tx_desc->pp22.data_size = cpu_to_le16(size);
232 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
233 struct mvpp2_tx_desc *tx_desc,
236 if (port->priv->hw_version == MVPP21)
237 tx_desc->pp21.phys_txq = txq;
239 tx_desc->pp22.phys_txq = txq;
242 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
243 struct mvpp2_tx_desc *tx_desc,
244 unsigned int command)
246 if (port->priv->hw_version == MVPP21)
247 tx_desc->pp21.command = cpu_to_le32(command);
249 tx_desc->pp22.command = cpu_to_le32(command);
252 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
253 struct mvpp2_tx_desc *tx_desc)
255 if (port->priv->hw_version == MVPP21)
256 return tx_desc->pp21.packet_offset;
258 return tx_desc->pp22.packet_offset;
261 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
262 struct mvpp2_rx_desc *rx_desc)
264 if (port->priv->hw_version == MVPP21)
265 return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
267 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
271 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
272 struct mvpp2_rx_desc *rx_desc)
274 if (port->priv->hw_version == MVPP21)
275 return le32_to_cpu(rx_desc->pp21.buf_cookie);
277 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
281 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
282 struct mvpp2_rx_desc *rx_desc)
284 if (port->priv->hw_version == MVPP21)
285 return le16_to_cpu(rx_desc->pp21.data_size);
287 return le16_to_cpu(rx_desc->pp22.data_size);
290 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
291 struct mvpp2_rx_desc *rx_desc)
293 if (port->priv->hw_version == MVPP21)
294 return le32_to_cpu(rx_desc->pp21.status);
296 return le32_to_cpu(rx_desc->pp22.status);
299 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
301 txq_pcpu->txq_get_index++;
302 if (txq_pcpu->txq_get_index == txq_pcpu->size)
303 txq_pcpu->txq_get_index = 0;
306 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
307 struct mvpp2_txq_pcpu *txq_pcpu,
309 struct mvpp2_tx_desc *tx_desc,
310 enum mvpp2_tx_buf_type buf_type)
312 struct mvpp2_txq_pcpu_buf *tx_buf =
313 txq_pcpu->buffs + txq_pcpu->txq_put_index;
314 tx_buf->type = buf_type;
315 if (buf_type == MVPP2_TYPE_SKB)
319 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
320 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
321 mvpp2_txdesc_offset_get(port, tx_desc);
322 txq_pcpu->txq_put_index++;
323 if (txq_pcpu->txq_put_index == txq_pcpu->size)
324 txq_pcpu->txq_put_index = 0;
327 /* Get number of maximum RXQ */
328 static int mvpp2_get_nrxqs(struct mvpp2 *priv)
332 if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
335 /* According to the PPv2.2 datasheet and our experiments on
336 * PPv2.1, RX queues have an allocation granularity of 4 (when
337 * more than a single one on PPv2.2).
338 * Round up to nearest multiple of 4.
340 nrxqs = (num_possible_cpus() + 3) & ~0x3;
341 if (nrxqs > MVPP2_PORT_MAX_RXQ)
342 nrxqs = MVPP2_PORT_MAX_RXQ;
347 /* Get number of physical egress port */
348 static inline int mvpp2_egress_port(struct mvpp2_port *port)
350 return MVPP2_MAX_TCONT + port->id;
353 /* Get number of physical TXQ */
354 static inline int mvpp2_txq_phys(int port, int txq)
356 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
359 /* Returns a struct page if page_pool is set, otherwise a buffer */
360 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
361 struct page_pool *page_pool)
364 return page_pool_dev_alloc_pages(page_pool);
366 if (likely(pool->frag_size <= PAGE_SIZE))
367 return netdev_alloc_frag(pool->frag_size);
369 return kmalloc(pool->frag_size, GFP_ATOMIC);
372 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
373 struct page_pool *page_pool, void *data)
376 page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
377 else if (likely(pool->frag_size <= PAGE_SIZE))
383 /* Buffer Manager configuration routines */
386 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
387 struct mvpp2_bm_pool *bm_pool, int size)
391 /* Number of buffer pointers must be a multiple of 16, as per
392 * hardware constraints
394 if (!IS_ALIGNED(size, 16))
397 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16
398 * bytes per buffer pointer
400 if (priv->hw_version == MVPP21)
401 bm_pool->size_bytes = 2 * sizeof(u32) * size;
403 bm_pool->size_bytes = 2 * sizeof(u64) * size;
405 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
408 if (!bm_pool->virt_addr)
411 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
412 MVPP2_BM_POOL_PTR_ALIGN)) {
413 dma_free_coherent(dev, bm_pool->size_bytes,
414 bm_pool->virt_addr, bm_pool->dma_addr);
415 dev_err(dev, "BM pool %d is not %d bytes aligned\n",
416 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
420 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
421 lower_32_bits(bm_pool->dma_addr));
422 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
424 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
425 val |= MVPP2_BM_START_MASK;
427 val &= ~MVPP2_BM_LOW_THRESH_MASK;
428 val &= ~MVPP2_BM_HIGH_THRESH_MASK;
430 /* Set 8 Pools BPPI threshold for MVPP23 */
431 if (priv->hw_version == MVPP23) {
432 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
433 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
435 val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
436 val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
439 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
441 bm_pool->size = size;
442 bm_pool->pkt_size = 0;
443 bm_pool->buf_num = 0;
448 /* Set pool buffer size */
449 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
450 struct mvpp2_bm_pool *bm_pool,
455 bm_pool->buf_size = buf_size;
457 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
458 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
461 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
462 struct mvpp2_bm_pool *bm_pool,
463 dma_addr_t *dma_addr,
464 phys_addr_t *phys_addr)
466 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
468 *dma_addr = mvpp2_thread_read(priv, thread,
469 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
470 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
472 if (priv->hw_version >= MVPP22) {
474 u32 dma_addr_highbits, phys_addr_highbits;
476 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
477 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
478 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
479 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
481 if (sizeof(dma_addr_t) == 8)
482 *dma_addr |= (u64)dma_addr_highbits << 32;
484 if (sizeof(phys_addr_t) == 8)
485 *phys_addr |= (u64)phys_addr_highbits << 32;
491 /* Free all buffers from the pool */
492 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
493 struct mvpp2_bm_pool *bm_pool, int buf_num)
495 struct page_pool *pp = NULL;
498 if (buf_num > bm_pool->buf_num) {
499 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
500 bm_pool->id, buf_num);
501 buf_num = bm_pool->buf_num;
504 if (priv->percpu_pools)
505 pp = priv->page_pool[bm_pool->id];
507 for (i = 0; i < buf_num; i++) {
508 dma_addr_t buf_dma_addr;
509 phys_addr_t buf_phys_addr;
512 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
513 &buf_dma_addr, &buf_phys_addr);
516 dma_unmap_single(dev, buf_dma_addr,
517 bm_pool->buf_size, DMA_FROM_DEVICE);
519 data = (void *)phys_to_virt(buf_phys_addr);
523 mvpp2_frag_free(bm_pool, pp, data);
526 /* Update BM driver with number of buffers removed from pool */
527 bm_pool->buf_num -= i;
530 /* Check number of buffers in BM pool */
531 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
535 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
536 MVPP22_BM_POOL_PTRS_NUM_MASK;
537 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
538 MVPP2_BM_BPPI_PTR_NUM_MASK;
540 /* HW has one buffer ready which is not reflected in the counters */
548 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
549 struct mvpp2_bm_pool *bm_pool)
554 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
555 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
557 /* Check buffer counters after free */
558 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
560 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
561 bm_pool->id, bm_pool->buf_num);
565 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
566 val |= MVPP2_BM_STOP_MASK;
567 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
569 if (priv->percpu_pools) {
570 page_pool_destroy(priv->page_pool[bm_pool->id]);
571 priv->page_pool[bm_pool->id] = NULL;
574 dma_free_coherent(dev, bm_pool->size_bytes,
580 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
582 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
583 struct mvpp2_bm_pool *bm_pool;
585 if (priv->percpu_pools)
586 poolnum = mvpp2_get_nrxqs(priv) * 2;
588 /* Create all pools with maximum size */
589 size = MVPP2_BM_POOL_SIZE_MAX;
590 for (i = 0; i < poolnum; i++) {
591 bm_pool = &priv->bm_pools[i];
593 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
595 goto err_unroll_pools;
596 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
601 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
602 for (i = i - 1; i >= 0; i--)
603 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
607 /* Routine enable PPv23 8 pool mode */
608 static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
612 val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
613 val |= MVPP23_BM_8POOL_MODE;
614 mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
617 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
619 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
620 int i, err, poolnum = MVPP2_BM_POOLS_NUM;
621 struct mvpp2_port *port;
623 if (priv->percpu_pools) {
624 for (i = 0; i < priv->port_count; i++) {
625 port = priv->port_list[i];
626 if (port->xdp_prog) {
627 dma_dir = DMA_BIDIRECTIONAL;
632 poolnum = mvpp2_get_nrxqs(priv) * 2;
633 for (i = 0; i < poolnum; i++) {
634 /* the pool in use */
635 int pn = i / (poolnum / 2);
638 mvpp2_create_page_pool(dev,
639 mvpp2_pools[pn].buf_num,
640 mvpp2_pools[pn].pkt_size,
642 if (IS_ERR(priv->page_pool[i])) {
645 for (j = 0; j < i; j++) {
646 page_pool_destroy(priv->page_pool[j]);
647 priv->page_pool[j] = NULL;
649 return PTR_ERR(priv->page_pool[i]);
654 dev_info(dev, "using %d %s buffers\n", poolnum,
655 priv->percpu_pools ? "per-cpu" : "shared");
657 for (i = 0; i < poolnum; i++) {
658 /* Mask BM all interrupts */
659 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
660 /* Clear BM cause register */
661 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
664 /* Allocate and initialize BM pools */
665 priv->bm_pools = devm_kcalloc(dev, poolnum,
666 sizeof(*priv->bm_pools), GFP_KERNEL);
670 if (priv->hw_version == MVPP23)
671 mvpp23_bm_set_8pool_mode(priv);
673 err = mvpp2_bm_pools_init(dev, priv);
679 static void mvpp2_setup_bm_pool(void)
682 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
683 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
686 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
687 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
690 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
691 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
694 /* Attach long pool to rxq */
695 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
696 int lrxq, int long_pool)
701 /* Get queue physical ID */
702 prxq = port->rxqs[lrxq]->id;
704 if (port->priv->hw_version == MVPP21)
705 mask = MVPP21_RXQ_POOL_LONG_MASK;
707 mask = MVPP22_RXQ_POOL_LONG_MASK;
709 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
711 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
712 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
715 /* Attach short pool to rxq */
716 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
717 int lrxq, int short_pool)
722 /* Get queue physical ID */
723 prxq = port->rxqs[lrxq]->id;
725 if (port->priv->hw_version == MVPP21)
726 mask = MVPP21_RXQ_POOL_SHORT_MASK;
728 mask = MVPP22_RXQ_POOL_SHORT_MASK;
730 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
732 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
733 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
736 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
737 struct mvpp2_bm_pool *bm_pool,
738 struct page_pool *page_pool,
739 dma_addr_t *buf_dma_addr,
740 phys_addr_t *buf_phys_addr,
747 data = mvpp2_frag_alloc(bm_pool, page_pool);
752 page = (struct page *)data;
753 dma_addr = page_pool_get_dma_addr(page);
754 data = page_to_virt(page);
756 dma_addr = dma_map_single(port->dev->dev.parent, data,
757 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
759 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
760 mvpp2_frag_free(bm_pool, NULL, data);
764 *buf_dma_addr = dma_addr;
765 *buf_phys_addr = virt_to_phys(data);
770 /* Routine enable flow control for RXQs condition */
771 static void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
773 int val, cm3_state, host_id, q;
774 int fq = port->first_rxq;
777 spin_lock_irqsave(&port->priv->mss_spinlock, flags);
779 /* Remove Flow control enable bit to prevent race between FW and Kernel
780 * If Flow control was enabled, it would be re-enabled.
782 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
783 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
784 val &= ~FLOW_CONTROL_ENABLE_BIT;
785 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
787 /* Set same Flow control for all RXQs */
788 for (q = 0; q < port->nrxqs; q++) {
789 /* Set stop and start Flow control RXQ thresholds */
790 val = MSS_THRESHOLD_START;
791 val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
792 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
794 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
795 /* Set RXQ port ID */
796 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
797 val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
798 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
799 + MSS_RXQ_ASS_HOSTID_OFFS));
801 /* Calculate RXQ host ID:
802 * In Single queue mode: Host ID equal to Host ID used for
803 * shared RX interrupt
804 * In Multi queue mode: Host ID equal to number of
805 * RXQ ID / number of CoS queues
806 * In Single resource mode: Host ID always equal to 0
808 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
809 host_id = port->nqvecs;
810 else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
815 /* Set RXQ host ID */
816 val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
817 + MSS_RXQ_ASS_HOSTID_OFFS));
819 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
822 /* Notify Firmware that Flow control config space ready for update */
823 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
824 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
826 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
828 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
831 /* Routine disable flow control for RXQs condition */
832 static void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
834 int val, cm3_state, q;
836 int fq = port->first_rxq;
838 spin_lock_irqsave(&port->priv->mss_spinlock, flags);
840 /* Remove Flow control enable bit to prevent race between FW and Kernel
841 * If Flow control was enabled, it would be re-enabled.
843 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
844 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
845 val &= ~FLOW_CONTROL_ENABLE_BIT;
846 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
848 /* Disable Flow control for all RXQs */
849 for (q = 0; q < port->nrxqs; q++) {
850 /* Set threshold 0 to disable Flow control */
852 val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
853 mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
855 val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
857 val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
859 val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
860 + MSS_RXQ_ASS_HOSTID_OFFS));
862 mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
865 /* Notify Firmware that Flow control config space ready for update */
866 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
867 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
869 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
871 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
874 /* Routine disable/enable flow control for BM pool condition */
875 static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
876 struct mvpp2_bm_pool *pool,
882 spin_lock_irqsave(&port->priv->mss_spinlock, flags);
884 /* Remove Flow control enable bit to prevent race between FW and Kernel
885 * If Flow control were enabled, it would be re-enabled.
887 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
888 cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
889 val &= ~FLOW_CONTROL_ENABLE_BIT;
890 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
892 /* Check if BM pool should be enabled/disable */
894 /* Set BM pool start and stop thresholds per port */
895 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
896 val |= MSS_BUF_POOL_PORT_OFFS(port->id);
897 val &= ~MSS_BUF_POOL_START_MASK;
898 val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
899 val &= ~MSS_BUF_POOL_STOP_MASK;
900 val |= MSS_THRESHOLD_STOP;
901 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
903 /* Remove BM pool from the port */
904 val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
905 val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
907 /* Zero BM pool start and stop thresholds to disable pool
908 * flow control if pool empty (not used by any port)
910 if (!pool->buf_num) {
911 val &= ~MSS_BUF_POOL_START_MASK;
912 val &= ~MSS_BUF_POOL_STOP_MASK;
915 mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
918 /* Notify Firmware that Flow control config space ready for update */
919 val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
920 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
922 mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
924 spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
927 /* disable/enable flow control for BM pool on all ports */
928 static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
930 struct mvpp2_port *port;
933 for (i = 0; i < priv->port_count; i++) {
934 port = priv->port_list[i];
935 if (port->priv->percpu_pools) {
936 for (i = 0; i < port->nrxqs; i++)
937 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
940 mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
941 mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
946 static int mvpp2_enable_global_fc(struct mvpp2 *priv)
948 int val, timeout = 0;
950 /* Enable global flow control. In this stage global
951 * flow control enabled, but still disabled per port.
953 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
954 val |= FLOW_CONTROL_ENABLE_BIT;
955 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
957 /* Check if Firmware running and disable FC if not*/
958 val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
959 mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
961 while (timeout < MSS_FC_MAX_TIMEOUT) {
962 val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
964 if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
966 usleep_range(10, 20);
970 priv->global_tx_fc = false;
974 /* Release buffer to BM */
975 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
976 dma_addr_t buf_dma_addr,
977 phys_addr_t buf_phys_addr)
979 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
980 unsigned long flags = 0;
982 if (test_bit(thread, &port->priv->lock_map))
983 spin_lock_irqsave(&port->bm_lock[thread], flags);
985 if (port->priv->hw_version >= MVPP22) {
988 if (sizeof(dma_addr_t) == 8)
989 val |= upper_32_bits(buf_dma_addr) &
990 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
992 if (sizeof(phys_addr_t) == 8)
993 val |= (upper_32_bits(buf_phys_addr)
994 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
995 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
997 mvpp2_thread_write_relaxed(port->priv, thread,
998 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
1001 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
1002 * returned in the "cookie" field of the RX
1003 * descriptor. Instead of storing the virtual address, we
1004 * store the physical address
1006 mvpp2_thread_write_relaxed(port->priv, thread,
1007 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
1008 mvpp2_thread_write_relaxed(port->priv, thread,
1009 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
1011 if (test_bit(thread, &port->priv->lock_map))
1012 spin_unlock_irqrestore(&port->bm_lock[thread], flags);
1017 /* Allocate buffers for the pool */
1018 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
1019 struct mvpp2_bm_pool *bm_pool, int buf_num)
1021 int i, buf_size, total_size;
1022 dma_addr_t dma_addr;
1023 phys_addr_t phys_addr;
1024 struct page_pool *pp = NULL;
1027 if (port->priv->percpu_pools &&
1028 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
1029 netdev_err(port->dev,
1030 "attempted to use jumbo frames with per-cpu pools");
1034 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
1035 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
1038 (buf_num + bm_pool->buf_num > bm_pool->size)) {
1039 netdev_err(port->dev,
1040 "cannot allocate %d buffers for pool %d\n",
1041 buf_num, bm_pool->id);
1045 if (port->priv->percpu_pools)
1046 pp = port->priv->page_pool[bm_pool->id];
1047 for (i = 0; i < buf_num; i++) {
1048 buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
1049 &phys_addr, GFP_KERNEL);
1053 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
1057 /* Update BM driver with number of buffers added to pool */
1058 bm_pool->buf_num += i;
1060 netdev_dbg(port->dev,
1061 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
1062 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
1064 netdev_dbg(port->dev,
1065 "pool %d: %d of %d buffers added\n",
1066 bm_pool->id, i, buf_num);
1070 /* Notify the driver that BM pool is being used as specific type and return the
1071 * pool pointer on success
1073 static struct mvpp2_bm_pool *
1074 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
1076 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
1079 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
1080 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
1081 netdev_err(port->dev, "Invalid pool %d\n", pool);
1085 /* Allocate buffers in case BM pool is used as long pool, but packet
1086 * size doesn't match MTU or BM pool hasn't being used yet
1088 if (new_pool->pkt_size == 0) {
1091 /* Set default buffer number or free all the buffers in case
1092 * the pool is not empty
1094 pkts_num = new_pool->buf_num;
1095 if (pkts_num == 0) {
1096 if (port->priv->percpu_pools) {
1097 if (pool < port->nrxqs)
1098 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
1100 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
1102 pkts_num = mvpp2_pools[pool].buf_num;
1105 mvpp2_bm_bufs_free(port->dev->dev.parent,
1106 port->priv, new_pool, pkts_num);
1109 new_pool->pkt_size = pkt_size;
1110 new_pool->frag_size =
1111 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
1112 MVPP2_SKB_SHINFO_SIZE;
1114 /* Allocate buffers for this pool */
1115 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
1116 if (num != pkts_num) {
1117 WARN(1, "pool %d: %d of %d allocated\n",
1118 new_pool->id, num, pkts_num);
1123 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
1124 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
1129 static struct mvpp2_bm_pool *
1130 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
1131 unsigned int pool, int pkt_size)
1133 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
1136 if (pool > port->nrxqs * 2) {
1137 netdev_err(port->dev, "Invalid pool %d\n", pool);
1141 /* Allocate buffers in case BM pool is used as long pool, but packet
1142 * size doesn't match MTU or BM pool hasn't being used yet
1144 if (new_pool->pkt_size == 0) {
1147 /* Set default buffer number or free all the buffers in case
1148 * the pool is not empty
1150 pkts_num = new_pool->buf_num;
1152 pkts_num = mvpp2_pools[type].buf_num;
1154 mvpp2_bm_bufs_free(port->dev->dev.parent,
1155 port->priv, new_pool, pkts_num);
1157 new_pool->pkt_size = pkt_size;
1158 new_pool->frag_size =
1159 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
1160 MVPP2_SKB_SHINFO_SIZE;
1162 /* Allocate buffers for this pool */
1163 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
1164 if (num != pkts_num) {
1165 WARN(1, "pool %d: %d of %d allocated\n",
1166 new_pool->id, num, pkts_num);
1171 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
1172 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
1177 /* Initialize pools for swf, shared buffers variant */
1178 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
1180 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
1183 /* If port pkt_size is higher than 1518B:
1184 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1185 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1187 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
1188 long_log_pool = MVPP2_BM_JUMBO;
1189 short_log_pool = MVPP2_BM_LONG;
1191 long_log_pool = MVPP2_BM_LONG;
1192 short_log_pool = MVPP2_BM_SHORT;
1195 if (!port->pool_long) {
1197 mvpp2_bm_pool_use(port, long_log_pool,
1198 mvpp2_pools[long_log_pool].pkt_size);
1199 if (!port->pool_long)
1202 port->pool_long->port_map |= BIT(port->id);
1204 for (rxq = 0; rxq < port->nrxqs; rxq++)
1205 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
1208 if (!port->pool_short) {
1210 mvpp2_bm_pool_use(port, short_log_pool,
1211 mvpp2_pools[short_log_pool].pkt_size);
1212 if (!port->pool_short)
1215 port->pool_short->port_map |= BIT(port->id);
1217 for (rxq = 0; rxq < port->nrxqs; rxq++)
1218 mvpp2_rxq_short_pool_set(port, rxq,
1219 port->pool_short->id);
1225 /* Initialize pools for swf, percpu buffers variant */
1226 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
1228 struct mvpp2_bm_pool *bm_pool;
1231 for (i = 0; i < port->nrxqs; i++) {
1232 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
1233 mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
1237 bm_pool->port_map |= BIT(port->id);
1238 mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
1241 for (i = 0; i < port->nrxqs; i++) {
1242 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
1243 mvpp2_pools[MVPP2_BM_LONG].pkt_size);
1247 bm_pool->port_map |= BIT(port->id);
1248 mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
1251 port->pool_long = NULL;
1252 port->pool_short = NULL;
1257 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
1259 if (port->priv->percpu_pools)
1260 return mvpp2_swf_bm_pool_init_percpu(port);
1262 return mvpp2_swf_bm_pool_init_shared(port);
1265 static void mvpp2_set_hw_csum(struct mvpp2_port *port,
1266 enum mvpp2_bm_pool_log_num new_long_pool)
1268 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1270 /* Update L4 checksum when jumbo enable/disable on port.
1271 * Only port 0 supports hardware checksum offload due to
1272 * the Tx FIFO size limitation.
1273 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
1274 * has 7 bits, so the maximum L3 offset is 128.
1276 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1277 port->dev->features &= ~csums;
1278 port->dev->hw_features &= ~csums;
1280 port->dev->features |= csums;
1281 port->dev->hw_features |= csums;
1285 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
1287 struct mvpp2_port *port = netdev_priv(dev);
1288 enum mvpp2_bm_pool_log_num new_long_pool;
1289 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
1291 if (port->priv->percpu_pools)
1294 /* If port MTU is higher than 1518B:
1295 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1296 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1298 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1299 new_long_pool = MVPP2_BM_JUMBO;
1301 new_long_pool = MVPP2_BM_LONG;
1303 if (new_long_pool != port->pool_long->id) {
1305 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1306 mvpp2_bm_pool_update_fc(port,
1310 mvpp2_bm_pool_update_fc(port, port->pool_long,
1314 /* Remove port from old short & long pool */
1315 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
1316 port->pool_long->pkt_size);
1317 port->pool_long->port_map &= ~BIT(port->id);
1318 port->pool_long = NULL;
1320 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
1321 port->pool_short->pkt_size);
1322 port->pool_short->port_map &= ~BIT(port->id);
1323 port->pool_short = NULL;
1325 port->pkt_size = pkt_size;
1327 /* Add port to new short & long pool */
1328 mvpp2_swf_bm_pool_init(port);
1330 mvpp2_set_hw_csum(port, new_long_pool);
1333 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1334 mvpp2_bm_pool_update_fc(port, port->pool_long,
1337 mvpp2_bm_pool_update_fc(port, port->pool_short,
1341 /* Update L4 checksum when jumbo enable/disable on port */
1342 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1343 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
1344 dev->hw_features &= ~(NETIF_F_IP_CSUM |
1347 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1348 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1354 dev->wanted_features = dev->features;
1356 netdev_update_features(dev);
1360 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1362 int i, sw_thread_mask = 0;
1364 for (i = 0; i < port->nqvecs; i++)
1365 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1367 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1368 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1371 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1373 int i, sw_thread_mask = 0;
1375 for (i = 0; i < port->nqvecs; i++)
1376 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1378 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1379 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1382 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1384 struct mvpp2_port *port = qvec->port;
1386 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1387 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1390 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1392 struct mvpp2_port *port = qvec->port;
1394 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1395 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1398 /* Mask the current thread's Rx/Tx interrupts
1399 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1400 * using smp_processor_id() is OK.
1402 static void mvpp2_interrupts_mask(void *arg)
1404 struct mvpp2_port *port = arg;
1405 int cpu = smp_processor_id();
1408 /* If the thread isn't used, don't do anything */
1409 if (cpu > port->priv->nthreads)
1412 thread = mvpp2_cpu_to_thread(port->priv, cpu);
1414 mvpp2_thread_write(port->priv, thread,
1415 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1416 mvpp2_thread_write(port->priv, thread,
1417 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
1420 /* Unmask the current thread's Rx/Tx interrupts.
1421 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1422 * using smp_processor_id() is OK.
1424 static void mvpp2_interrupts_unmask(void *arg)
1426 struct mvpp2_port *port = arg;
1427 int cpu = smp_processor_id();
1430 /* If the thread isn't used, don't do anything */
1431 if (cpu >= port->priv->nthreads)
1434 thread = mvpp2_cpu_to_thread(port->priv, cpu);
1436 val = MVPP2_CAUSE_MISC_SUM_MASK |
1437 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1438 if (port->has_tx_irqs)
1439 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1441 mvpp2_thread_write(port->priv, thread,
1442 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1443 mvpp2_thread_write(port->priv, thread,
1444 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1445 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1449 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1454 if (port->priv->hw_version == MVPP21)
1460 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1462 for (i = 0; i < port->nqvecs; i++) {
1463 struct mvpp2_queue_vector *v = port->qvecs + i;
1465 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1468 mvpp2_thread_write(port->priv, v->sw_thread_id,
1469 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1470 mvpp2_thread_write(port->priv, v->sw_thread_id,
1471 MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1472 MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1476 /* Only GOP port 0 has an XLG MAC */
1477 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
1479 return port->gop_id == 0;
1482 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
1484 return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0);
1487 /* Port configuration routines */
1488 static bool mvpp2_is_xlg(phy_interface_t interface)
1490 return interface == PHY_INTERFACE_MODE_10GBASER ||
1491 interface == PHY_INTERFACE_MODE_5GBASER ||
1492 interface == PHY_INTERFACE_MODE_XAUI;
1495 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
1499 old = val = readl(ptr);
1506 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1508 struct mvpp2 *priv = port->priv;
1511 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1512 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1513 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1515 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1516 if (port->gop_id == 2) {
1517 val |= GENCONF_CTRL0_PORT2_RGMII;
1518 } else if (port->gop_id == 3) {
1519 val |= GENCONF_CTRL0_PORT3_RGMII_MII;
1521 /* According to the specification, GENCONF_CTRL0_PORT3_RGMII
1522 * should be set to 1 for RGMII and 0 for MII. However, tests
1523 * show that it is the other way around. This is also what
1524 * U-Boot does for mvpp2, so it is assumed to be correct.
1526 if (port->phy_interface == PHY_INTERFACE_MODE_MII)
1527 val |= GENCONF_CTRL0_PORT3_RGMII;
1529 val &= ~GENCONF_CTRL0_PORT3_RGMII;
1531 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1534 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1536 struct mvpp2 *priv = port->priv;
1539 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1540 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1541 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1542 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1544 if (port->gop_id > 1) {
1545 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1546 if (port->gop_id == 2)
1547 val &= ~GENCONF_CTRL0_PORT2_RGMII;
1548 else if (port->gop_id == 3)
1549 val &= ~GENCONF_CTRL0_PORT3_RGMII_MII;
1550 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1554 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1556 struct mvpp2 *priv = port->priv;
1557 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1558 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1561 val = readl(xpcs + MVPP22_XPCS_CFG0);
1562 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1563 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1564 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1565 writel(val, xpcs + MVPP22_XPCS_CFG0);
1567 val = readl(mpcs + MVPP22_MPCS_CTRL);
1568 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1569 writel(val, mpcs + MVPP22_MPCS_CTRL);
1571 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1572 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1573 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1574 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1577 static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
1579 struct mvpp2 *priv = port->priv;
1580 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
1583 val = readl(fca + MVPP22_FCA_CONTROL_REG);
1584 val &= ~MVPP22_FCA_ENABLE_PERIODIC;
1586 val |= MVPP22_FCA_ENABLE_PERIODIC;
1587 writel(val, fca + MVPP22_FCA_CONTROL_REG);
1590 static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
1592 struct mvpp2 *priv = port->priv;
1593 void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
1596 lsb = timer & MVPP22_FCA_REG_MASK;
1597 msb = timer >> MVPP22_FCA_REG_SIZE;
1599 writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
1600 writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
1603 /* Set Flow Control timer x100 faster than pause quanta to ensure that link
1604 * partner won't send traffic if port is in XOFF mode.
1606 static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
1610 timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
1613 mvpp22_gop_fca_enable_periodic(port, false);
1615 mvpp22_gop_fca_set_timer(port, timer);
1617 mvpp22_gop_fca_enable_periodic(port, true);
1620 static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
1622 struct mvpp2 *priv = port->priv;
1625 if (!priv->sysctrl_base)
1628 switch (interface) {
1629 case PHY_INTERFACE_MODE_MII:
1630 case PHY_INTERFACE_MODE_RGMII:
1631 case PHY_INTERFACE_MODE_RGMII_ID:
1632 case PHY_INTERFACE_MODE_RGMII_RXID:
1633 case PHY_INTERFACE_MODE_RGMII_TXID:
1634 if (!mvpp2_port_supports_rgmii(port))
1636 mvpp22_gop_init_rgmii(port);
1638 case PHY_INTERFACE_MODE_SGMII:
1639 case PHY_INTERFACE_MODE_1000BASEX:
1640 case PHY_INTERFACE_MODE_2500BASEX:
1641 mvpp22_gop_init_sgmii(port);
1643 case PHY_INTERFACE_MODE_5GBASER:
1644 case PHY_INTERFACE_MODE_10GBASER:
1645 if (!mvpp2_port_supports_xlg(port))
1647 mvpp22_gop_init_10gkr(port);
1650 goto unsupported_conf;
1653 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1654 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1655 GENCONF_PORT_CTRL1_EN(port->gop_id);
1656 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1658 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1659 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1660 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1662 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1663 val |= GENCONF_SOFT_RESET1_GOP;
1664 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1666 mvpp22_gop_fca_set_periodic_timer(port);
1672 netdev_err(port->dev, "Invalid port configuration\n");
1676 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1680 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1681 phy_interface_mode_is_8023z(port->phy_interface) ||
1682 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1683 /* Enable the GMAC link status irq for this port */
1684 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1685 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1686 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1689 if (mvpp2_port_supports_xlg(port)) {
1690 /* Enable the XLG/GIG irqs for this port */
1691 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1692 if (mvpp2_is_xlg(port->phy_interface))
1693 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1695 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1696 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1700 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1704 if (mvpp2_port_supports_xlg(port)) {
1705 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1706 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1707 MVPP22_XLG_EXT_INT_MASK_GIG);
1708 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1711 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1712 phy_interface_mode_is_8023z(port->phy_interface) ||
1713 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1714 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1715 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1716 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1720 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1724 mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
1725 MVPP22_GMAC_INT_SUM_MASK_PTP,
1726 MVPP22_GMAC_INT_SUM_MASK_PTP);
1728 if (port->phylink ||
1729 phy_interface_mode_is_rgmii(port->phy_interface) ||
1730 phy_interface_mode_is_8023z(port->phy_interface) ||
1731 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1732 val = readl(port->base + MVPP22_GMAC_INT_MASK);
1733 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1734 writel(val, port->base + MVPP22_GMAC_INT_MASK);
1737 if (mvpp2_port_supports_xlg(port)) {
1738 val = readl(port->base + MVPP22_XLG_INT_MASK);
1739 val |= MVPP22_XLG_INT_MASK_LINK;
1740 writel(val, port->base + MVPP22_XLG_INT_MASK);
1742 mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
1743 MVPP22_XLG_EXT_INT_MASK_PTP,
1744 MVPP22_XLG_EXT_INT_MASK_PTP);
1747 mvpp22_gop_unmask_irq(port);
1750 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1752 * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1753 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1756 * The COMPHY configures the serdes lanes regardless of the actual use of the
1757 * lanes by the physical layer. This is why configurations like
1758 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1760 static int mvpp22_comphy_init(struct mvpp2_port *port,
1761 phy_interface_t interface)
1768 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET, interface);
1772 return phy_power_on(port->comphy);
1775 static void mvpp2_port_enable(struct mvpp2_port *port)
1779 if (mvpp2_port_supports_xlg(port) &&
1780 mvpp2_is_xlg(port->phy_interface)) {
1781 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1782 val |= MVPP22_XLG_CTRL0_PORT_EN;
1783 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1784 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1786 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1787 val |= MVPP2_GMAC_PORT_EN_MASK;
1788 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1789 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1793 static void mvpp2_port_disable(struct mvpp2_port *port)
1797 if (mvpp2_port_supports_xlg(port) &&
1798 mvpp2_is_xlg(port->phy_interface)) {
1799 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1800 val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1801 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1804 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1805 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1806 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1809 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1810 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1814 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1815 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1816 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1819 /* Configure loopback port */
1820 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1821 const struct phylink_link_state *state)
1825 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1827 if (state->speed == 1000)
1828 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1830 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1832 if (phy_interface_mode_is_8023z(state->interface) ||
1833 state->interface == PHY_INTERFACE_MODE_SGMII)
1834 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1836 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1838 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1842 ETHTOOL_XDP_REDIRECT,
1848 ETHTOOL_XDP_XMIT_ERR,
1851 struct mvpp2_ethtool_counter {
1852 unsigned int offset;
1853 const char string[ETH_GSTRING_LEN];
1857 static u64 mvpp2_read_count(struct mvpp2_port *port,
1858 const struct mvpp2_ethtool_counter *counter)
1862 val = readl(port->stats_base + counter->offset);
1863 if (counter->reg_is_64b)
1864 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1869 /* Some counters are accessed indirectly by first writing an index to
1870 * MVPP2_CTRS_IDX. The index can represent various resources depending on the
1871 * register we access, it can be a hit counter for some classification tables,
1872 * a counter specific to a rxq, a txq or a buffer pool.
1874 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1876 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1877 return mvpp2_read(priv, reg);
1880 /* Due to the fact that software statistics and hardware statistics are, by
1881 * design, incremented at different moments in the chain of packet processing,
1882 * it is very likely that incoming packets could have been dropped after being
1883 * counted by hardware but before reaching software statistics (most probably
1884 * multicast packets), and in the opposite way, during transmission, FCS bytes
1885 * are added in between as well as TSO skb will be split and header bytes added.
1886 * Hence, statistics gathered from userspace with ifconfig (software) and
1887 * ethtool (hardware) cannot be compared.
1889 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1890 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1891 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1892 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1893 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1894 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1895 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1896 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1897 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1898 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1899 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1900 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1901 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1902 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1903 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1904 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1905 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1906 { MVPP2_MIB_FC_SENT, "fc_sent" },
1907 { MVPP2_MIB_FC_RCVD, "fc_received" },
1908 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1909 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1910 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1911 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1912 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1913 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1914 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1915 { MVPP2_MIB_COLLISION, "collision" },
1916 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
1919 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1920 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1921 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1924 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1925 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1926 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1927 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1928 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1929 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1930 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1931 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1932 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1933 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1936 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1937 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1938 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1939 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1940 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1943 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
1944 { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
1945 { ETHTOOL_XDP_PASS, "rx_xdp_pass", },
1946 { ETHTOOL_XDP_DROP, "rx_xdp_drop", },
1947 { ETHTOOL_XDP_TX, "rx_xdp_tx", },
1948 { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
1949 { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
1950 { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
1953 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1954 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1955 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1956 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
1957 ARRAY_SIZE(mvpp2_ethtool_xdp))
1959 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1962 struct mvpp2_port *port = netdev_priv(netdev);
1965 if (sset != ETH_SS_STATS)
1968 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1969 strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1971 data += ETH_GSTRING_LEN;
1974 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
1975 strscpy(data, mvpp2_ethtool_port_regs[i].string,
1977 data += ETH_GSTRING_LEN;
1980 for (q = 0; q < port->ntxqs; q++) {
1981 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
1982 snprintf(data, ETH_GSTRING_LEN,
1983 mvpp2_ethtool_txq_regs[i].string, q);
1984 data += ETH_GSTRING_LEN;
1988 for (q = 0; q < port->nrxqs; q++) {
1989 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
1990 snprintf(data, ETH_GSTRING_LEN,
1991 mvpp2_ethtool_rxq_regs[i].string,
1993 data += ETH_GSTRING_LEN;
1997 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
1998 strscpy(data, mvpp2_ethtool_xdp[i].string,
2000 data += ETH_GSTRING_LEN;
2005 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
2010 /* Gather XDP Statistics */
2011 for_each_possible_cpu(cpu) {
2012 struct mvpp2_pcpu_stats *cpu_stats;
2021 cpu_stats = per_cpu_ptr(port->stats, cpu);
2023 start = u64_stats_fetch_begin(&cpu_stats->syncp);
2024 xdp_redirect = cpu_stats->xdp_redirect;
2025 xdp_pass = cpu_stats->xdp_pass;
2026 xdp_drop = cpu_stats->xdp_drop;
2027 xdp_xmit = cpu_stats->xdp_xmit;
2028 xdp_xmit_err = cpu_stats->xdp_xmit_err;
2029 xdp_tx = cpu_stats->xdp_tx;
2030 xdp_tx_err = cpu_stats->xdp_tx_err;
2031 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
2033 xdp_stats->xdp_redirect += xdp_redirect;
2034 xdp_stats->xdp_pass += xdp_pass;
2035 xdp_stats->xdp_drop += xdp_drop;
2036 xdp_stats->xdp_xmit += xdp_xmit;
2037 xdp_stats->xdp_xmit_err += xdp_xmit_err;
2038 xdp_stats->xdp_tx += xdp_tx;
2039 xdp_stats->xdp_tx_err += xdp_tx_err;
2043 static void mvpp2_read_stats(struct mvpp2_port *port)
2045 struct mvpp2_pcpu_stats xdp_stats = {};
2046 const struct mvpp2_ethtool_counter *s;
2050 pstats = port->ethtool_stats;
2052 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
2053 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
2055 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
2056 *pstats++ += mvpp2_read(port->priv,
2057 mvpp2_ethtool_port_regs[i].offset +
2060 for (q = 0; q < port->ntxqs; q++)
2061 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
2062 *pstats++ += mvpp2_read_index(port->priv,
2063 MVPP22_CTRS_TX_CTR(port->id, q),
2064 mvpp2_ethtool_txq_regs[i].offset);
2066 /* Rxqs are numbered from 0 from the user standpoint, but not from the
2067 * driver's. We need to add the port->first_rxq offset.
2069 for (q = 0; q < port->nrxqs; q++)
2070 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
2071 *pstats++ += mvpp2_read_index(port->priv,
2072 port->first_rxq + q,
2073 mvpp2_ethtool_rxq_regs[i].offset);
2075 /* Gather XDP Statistics */
2076 mvpp2_get_xdp_stats(port, &xdp_stats);
2078 for (i = 0, s = mvpp2_ethtool_xdp;
2079 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
2081 switch (s->offset) {
2082 case ETHTOOL_XDP_REDIRECT:
2083 *pstats++ = xdp_stats.xdp_redirect;
2085 case ETHTOOL_XDP_PASS:
2086 *pstats++ = xdp_stats.xdp_pass;
2088 case ETHTOOL_XDP_DROP:
2089 *pstats++ = xdp_stats.xdp_drop;
2091 case ETHTOOL_XDP_TX:
2092 *pstats++ = xdp_stats.xdp_tx;
2094 case ETHTOOL_XDP_TX_ERR:
2095 *pstats++ = xdp_stats.xdp_tx_err;
2097 case ETHTOOL_XDP_XMIT:
2098 *pstats++ = xdp_stats.xdp_xmit;
2100 case ETHTOOL_XDP_XMIT_ERR:
2101 *pstats++ = xdp_stats.xdp_xmit_err;
2107 static void mvpp2_gather_hw_statistics(struct work_struct *work)
2109 struct delayed_work *del_work = to_delayed_work(work);
2110 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
2113 mutex_lock(&port->gather_stats_lock);
2115 mvpp2_read_stats(port);
2117 /* No need to read again the counters right after this function if it
2118 * was called asynchronously by the user (ie. use of ethtool).
2120 cancel_delayed_work(&port->stats_work);
2121 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
2122 MVPP2_MIB_COUNTERS_STATS_DELAY);
2124 mutex_unlock(&port->gather_stats_lock);
2127 static void mvpp2_ethtool_get_stats(struct net_device *dev,
2128 struct ethtool_stats *stats, u64 *data)
2130 struct mvpp2_port *port = netdev_priv(dev);
2132 /* Update statistics for the given port, then take the lock to avoid
2133 * concurrent accesses on the ethtool_stats structure during its copy.
2135 mvpp2_gather_hw_statistics(&port->stats_work.work);
2137 mutex_lock(&port->gather_stats_lock);
2138 memcpy(data, port->ethtool_stats,
2139 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
2140 mutex_unlock(&port->gather_stats_lock);
2143 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
2145 struct mvpp2_port *port = netdev_priv(dev);
2147 if (sset == ETH_SS_STATS)
2148 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
2153 static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
2157 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
2158 MVPP2_GMAC_PORT_RESET_MASK;
2159 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2161 if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) {
2162 val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
2163 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
2164 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
2168 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
2170 struct mvpp2 *priv = port->priv;
2171 void __iomem *mpcs, *xpcs;
2174 if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
2177 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
2178 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
2180 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
2181 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
2182 val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
2183 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
2185 val = readl(xpcs + MVPP22_XPCS_CFG0);
2186 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
2189 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
2190 phy_interface_t interface)
2192 struct mvpp2 *priv = port->priv;
2193 void __iomem *mpcs, *xpcs;
2196 if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
2199 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
2200 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
2202 switch (interface) {
2203 case PHY_INTERFACE_MODE_5GBASER:
2204 case PHY_INTERFACE_MODE_10GBASER:
2205 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
2206 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
2207 MAC_CLK_RESET_SD_TX;
2208 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
2209 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
2211 case PHY_INTERFACE_MODE_XAUI:
2212 case PHY_INTERFACE_MODE_RXAUI:
2213 val = readl(xpcs + MVPP22_XPCS_CFG0);
2214 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
2221 /* Change maximum receive size of the port */
2222 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2226 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2227 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2228 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2229 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2230 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2233 /* Change maximum receive size of the port */
2234 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
2238 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
2239 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
2240 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2241 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
2242 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
2245 /* Set defaults to the MVPP2 port */
2246 static void mvpp2_defaults_set(struct mvpp2_port *port)
2248 int tx_port_num, val, queue, lrxq;
2250 if (port->priv->hw_version == MVPP21) {
2251 /* Update TX FIFO MIN Threshold */
2252 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2253 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2254 /* Min. TX threshold must be less than minimal packet length */
2255 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2256 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2259 /* Disable Legacy WRR, Disable EJP, Release from reset */
2260 tx_port_num = mvpp2_egress_port(port);
2261 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2263 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2265 /* Set TXQ scheduling to Round-Robin */
2266 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
2268 /* Close bandwidth for all queues */
2269 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
2270 mvpp2_write(port->priv,
2271 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
2273 /* Set refill period to 1 usec, refill tokens
2274 * and bucket size to maximum
2276 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
2277 port->priv->tclk / USEC_PER_SEC);
2278 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2279 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2280 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2281 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2282 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2283 val = MVPP2_TXP_TOKEN_SIZE_MAX;
2284 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2286 /* Set MaximumLowLatencyPacketSize value to 256 */
2287 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2288 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2289 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2291 /* Enable Rx cache snoop */
2292 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2293 queue = port->rxqs[lrxq]->id;
2294 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2295 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2296 MVPP2_SNOOP_BUF_HDR_MASK;
2297 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2300 /* At default, mask all interrupts to all present cpus */
2301 mvpp2_interrupts_disable(port);
2304 /* Enable/disable receiving packets */
2305 static void mvpp2_ingress_enable(struct mvpp2_port *port)
2310 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2311 queue = port->rxqs[lrxq]->id;
2312 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2313 val &= ~MVPP2_RXQ_DISABLE_MASK;
2314 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2318 static void mvpp2_ingress_disable(struct mvpp2_port *port)
2323 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2324 queue = port->rxqs[lrxq]->id;
2325 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2326 val |= MVPP2_RXQ_DISABLE_MASK;
2327 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2331 /* Enable transmit via physical egress queue
2332 * - HW starts take descriptors from DRAM
2334 static void mvpp2_egress_enable(struct mvpp2_port *port)
2338 int tx_port_num = mvpp2_egress_port(port);
2340 /* Enable all initialized TXs. */
2342 for (queue = 0; queue < port->ntxqs; queue++) {
2343 struct mvpp2_tx_queue *txq = port->txqs[queue];
2346 qmap |= (1 << queue);
2349 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2350 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2353 /* Disable transmit via physical egress queue
2354 * - HW doesn't take descriptors from DRAM
2356 static void mvpp2_egress_disable(struct mvpp2_port *port)
2360 int tx_port_num = mvpp2_egress_port(port);
2362 /* Issue stop command for active channels only */
2363 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2364 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2365 MVPP2_TXP_SCHED_ENQ_MASK;
2367 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2368 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2370 /* Wait for all Tx activity to terminate. */
2373 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2374 netdev_warn(port->dev,
2375 "Tx stop timed out, status=0x%08x\n",
2382 /* Check port TX Command register that all
2383 * Tx queues are stopped
2385 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2386 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2389 /* Rx descriptors helper methods */
2391 /* Get number of Rx descriptors occupied by received packets */
2393 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2395 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2397 return val & MVPP2_RXQ_OCCUPIED_MASK;
2400 /* Update Rx queue status with the number of occupied and available
2401 * Rx descriptor slots.
2404 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2405 int used_count, int free_count)
2407 /* Decrement the number of used descriptors and increment count
2408 * increment the number of free descriptors.
2410 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2412 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2415 /* Get pointer to next RX descriptor to be processed by SW */
2416 static inline struct mvpp2_rx_desc *
2417 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2419 int rx_desc = rxq->next_desc_to_proc;
2421 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2422 prefetch(rxq->descs + rxq->next_desc_to_proc);
2423 return rxq->descs + rx_desc;
2426 /* Set rx queue offset */
2427 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2428 int prxq, int offset)
2432 /* Convert offset from bytes to units of 32 bytes */
2433 offset = offset >> 5;
2435 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2436 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2439 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2440 MVPP2_RXQ_PACKET_OFFSET_MASK);
2442 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2445 /* Tx descriptors helper methods */
2447 /* Get pointer to next Tx descriptor to be processed (send) by HW */
2448 static struct mvpp2_tx_desc *
2449 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2451 int tx_desc = txq->next_desc_to_proc;
2453 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2454 return txq->descs + tx_desc;
2457 /* Update HW with number of aggregated Tx descriptors to be sent
2459 * Called only from mvpp2_tx(), so migration is disabled, using
2460 * smp_processor_id() is OK.
2462 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2464 /* aggregated access - relevant TXQ number is written in TX desc */
2465 mvpp2_thread_write(port->priv,
2466 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2467 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2470 /* Check if there are enough free descriptors in aggregated txq.
2471 * If not, update the number of occupied descriptors and repeat the check.
2473 * Called only from mvpp2_tx(), so migration is disabled, using
2474 * smp_processor_id() is OK.
2476 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
2477 struct mvpp2_tx_queue *aggr_txq, int num)
2479 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
2480 /* Update number of occupied aggregated Tx descriptors */
2481 unsigned int thread =
2482 mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2483 u32 val = mvpp2_read_relaxed(port->priv,
2484 MVPP2_AGGR_TXQ_STATUS_REG(thread));
2486 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
2488 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
2494 /* Reserved Tx descriptors allocation request
2496 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
2497 * only by mvpp2_tx(), so migration is disabled, using
2498 * smp_processor_id() is OK.
2500 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
2501 struct mvpp2_tx_queue *txq, int num)
2503 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2504 struct mvpp2 *priv = port->priv;
2507 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
2508 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
2510 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
2512 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
2515 /* Check if there are enough reserved descriptors for transmission.
2516 * If not, request chunk of reserved descriptors and check again.
2518 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
2519 struct mvpp2_tx_queue *txq,
2520 struct mvpp2_txq_pcpu *txq_pcpu,
2523 int req, desc_count;
2524 unsigned int thread;
2526 if (txq_pcpu->reserved_num >= num)
2529 /* Not enough descriptors reserved! Update the reserved descriptor
2530 * count and check again.
2534 /* Compute total of used descriptors */
2535 for (thread = 0; thread < port->priv->nthreads; thread++) {
2536 struct mvpp2_txq_pcpu *txq_pcpu_aux;
2538 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
2539 desc_count += txq_pcpu_aux->count;
2540 desc_count += txq_pcpu_aux->reserved_num;
2543 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
2547 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
2550 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
2552 /* OK, the descriptor could have been updated: check again. */
2553 if (txq_pcpu->reserved_num < num)
2558 /* Release the last allocated Tx descriptor. Useful to handle DMA
2559 * mapping failures in the Tx path.
2561 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2563 if (txq->next_desc_to_proc == 0)
2564 txq->next_desc_to_proc = txq->last_desc - 1;
2566 txq->next_desc_to_proc--;
2569 /* Set Tx descriptors fields relevant for CSUM calculation */
2570 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2571 int ip_hdr_len, int l4_proto)
2575 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
2576 * G_L4_chk, L4_type required only for checksum calculation
2578 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2579 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2580 command |= MVPP2_TXD_IP_CSUM_DISABLE;
2582 if (l3_proto == htons(ETH_P_IP)) {
2583 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
2584 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
2586 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
2589 if (l4_proto == IPPROTO_TCP) {
2590 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
2591 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2592 } else if (l4_proto == IPPROTO_UDP) {
2593 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
2594 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2596 command |= MVPP2_TXD_L4_CSUM_NOT;
2602 /* Get number of sent descriptors and decrement counter.
2603 * The number of sent descriptors is returned.
2606 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
2607 * (migration disabled) and from the TX completion tasklet (migration
2608 * disabled) so using smp_processor_id() is OK.
2610 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2611 struct mvpp2_tx_queue *txq)
2615 /* Reading status reg resets transmitted descriptor counter */
2616 val = mvpp2_thread_read_relaxed(port->priv,
2617 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2618 MVPP2_TXQ_SENT_REG(txq->id));
2620 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2621 MVPP2_TRANSMITTED_COUNT_OFFSET;
2624 /* Called through on_each_cpu(), so runs on all CPUs, with migration
2625 * disabled, therefore using smp_processor_id() is OK.
2627 static void mvpp2_txq_sent_counter_clear(void *arg)
2629 struct mvpp2_port *port = arg;
2632 /* If the thread isn't used, don't do anything */
2633 if (smp_processor_id() >= port->priv->nthreads)
2636 for (queue = 0; queue < port->ntxqs; queue++) {
2637 int id = port->txqs[queue]->id;
2639 mvpp2_thread_read(port->priv,
2640 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2641 MVPP2_TXQ_SENT_REG(id));
2645 /* Set max sizes for Tx queues */
2646 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2649 int txq, tx_port_num;
2651 mtu = port->pkt_size * 8;
2652 if (mtu > MVPP2_TXP_MTU_MAX)
2653 mtu = MVPP2_TXP_MTU_MAX;
2655 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2658 /* Indirect access to registers */
2659 tx_port_num = mvpp2_egress_port(port);
2660 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2663 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2664 val &= ~MVPP2_TXP_MTU_MAX;
2666 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2668 /* TXP token size and all TXQs token size must be larger that MTU */
2669 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2670 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2673 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2675 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2678 for (txq = 0; txq < port->ntxqs; txq++) {
2679 val = mvpp2_read(port->priv,
2680 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2681 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2685 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2687 mvpp2_write(port->priv,
2688 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2694 /* Set the number of non-occupied descriptors threshold */
2695 static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
2696 struct mvpp2_rx_queue *rxq)
2700 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
2702 val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
2703 val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
2704 val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
2705 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
2708 /* Set the number of packets that will be received before Rx interrupt
2709 * will be generated by HW.
2711 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2712 struct mvpp2_rx_queue *rxq)
2714 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2716 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2717 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2719 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2720 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2726 /* For some reason in the LSP this is done on each CPU. Why ? */
2727 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2728 struct mvpp2_tx_queue *txq)
2730 unsigned int thread;
2733 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2734 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2736 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2737 /* PKT-coalescing registers are per-queue + per-thread */
2738 for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
2739 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2740 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2744 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2746 u64 tmp = (u64)clk_hz * usec;
2748 do_div(tmp, USEC_PER_SEC);
2750 return tmp > U32_MAX ? U32_MAX : tmp;
2753 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2755 u64 tmp = (u64)cycles * USEC_PER_SEC;
2757 do_div(tmp, clk_hz);
2759 return tmp > U32_MAX ? U32_MAX : tmp;
2762 /* Set the time delay in usec before Rx interrupt */
2763 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2764 struct mvpp2_rx_queue *rxq)
2766 unsigned long freq = port->priv->tclk;
2767 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2769 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2771 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2773 /* re-evaluate to get actual register value */
2774 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2777 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2780 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2782 unsigned long freq = port->priv->tclk;
2783 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2785 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2786 port->tx_time_coal =
2787 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2789 /* re-evaluate to get actual register value */
2790 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2793 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2796 /* Free Tx queue skbuffs */
2797 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2798 struct mvpp2_tx_queue *txq,
2799 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2801 struct xdp_frame_bulk bq;
2804 xdp_frame_bulk_init(&bq);
2806 rcu_read_lock(); /* need for xdp_return_frame_bulk */
2808 for (i = 0; i < num; i++) {
2809 struct mvpp2_txq_pcpu_buf *tx_buf =
2810 txq_pcpu->buffs + txq_pcpu->txq_get_index;
2812 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
2813 tx_buf->type != MVPP2_TYPE_XDP_TX)
2814 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2815 tx_buf->size, DMA_TO_DEVICE);
2816 if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
2817 dev_kfree_skb_any(tx_buf->skb);
2818 else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
2819 tx_buf->type == MVPP2_TYPE_XDP_NDO)
2820 xdp_return_frame_bulk(tx_buf->xdpf, &bq);
2822 mvpp2_txq_inc_get(txq_pcpu);
2824 xdp_flush_frame_bulk(&bq);
2829 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2832 int queue = fls(cause) - 1;
2834 return port->rxqs[queue];
2837 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2840 int queue = fls(cause) - 1;
2842 return port->txqs[queue];
2845 /* Handle end of transmission */
2846 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2847 struct mvpp2_txq_pcpu *txq_pcpu)
2849 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2852 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2853 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2855 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2858 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2860 txq_pcpu->count -= tx_done;
2862 if (netif_tx_queue_stopped(nq))
2863 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2864 netif_tx_wake_queue(nq);
2867 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2868 unsigned int thread)
2870 struct mvpp2_tx_queue *txq;
2871 struct mvpp2_txq_pcpu *txq_pcpu;
2872 unsigned int tx_todo = 0;
2875 txq = mvpp2_get_tx_queue(port, cause);
2879 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2881 if (txq_pcpu->count) {
2882 mvpp2_txq_done(port, txq, txq_pcpu);
2883 tx_todo += txq_pcpu->count;
2886 cause &= ~(1 << txq->log_id);
2891 /* Rx/Tx queue initialization/cleanup methods */
2893 /* Allocate and initialize descriptors for aggr TXQ */
2894 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2895 struct mvpp2_tx_queue *aggr_txq,
2896 unsigned int thread, struct mvpp2 *priv)
2900 /* Allocate memory for TX descriptors */
2901 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2902 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2903 &aggr_txq->descs_dma, GFP_KERNEL);
2904 if (!aggr_txq->descs)
2907 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2909 /* Aggr TXQ no reset WA */
2910 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2911 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2913 /* Set Tx descriptors queue starting address indirect
2916 if (priv->hw_version == MVPP21)
2917 txq_dma = aggr_txq->descs_dma;
2919 txq_dma = aggr_txq->descs_dma >>
2920 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2922 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2923 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2924 MVPP2_AGGR_TXQ_SIZE);
2929 /* Create a specified Rx queue */
2930 static int mvpp2_rxq_init(struct mvpp2_port *port,
2931 struct mvpp2_rx_queue *rxq)
2933 struct mvpp2 *priv = port->priv;
2934 unsigned int thread;
2938 rxq->size = port->rx_ring_size;
2940 /* Allocate memory for RX descriptors */
2941 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2942 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2943 &rxq->descs_dma, GFP_KERNEL);
2947 rxq->last_desc = rxq->size - 1;
2949 /* Zero occupied and non-occupied counters - direct access */
2950 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2952 /* Set Rx descriptors queue starting address - indirect access */
2953 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2954 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2955 if (port->priv->hw_version == MVPP21)
2956 rxq_dma = rxq->descs_dma;
2958 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2959 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2960 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2961 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2965 mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
2967 /* Set coalescing pkts and time */
2968 mvpp2_rx_pkts_coal_set(port, rxq);
2969 mvpp2_rx_time_coal_set(port, rxq);
2971 /* Set the number of non occupied descriptors threshold */
2972 mvpp2_set_rxq_free_tresh(port, rxq);
2974 /* Add number of descriptors ready for receiving packets */
2975 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2977 if (priv->percpu_pools) {
2978 err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
2982 err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
2984 goto err_unregister_rxq_short;
2986 /* Every RXQ has a pool for short and another for long packets */
2987 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
2989 priv->page_pool[rxq->logic_rxq]);
2991 goto err_unregister_rxq_long;
2993 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
2995 priv->page_pool[rxq->logic_rxq +
2998 goto err_unregister_mem_rxq_short;
3003 err_unregister_mem_rxq_short:
3004 xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
3005 err_unregister_rxq_long:
3006 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
3007 err_unregister_rxq_short:
3008 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
3010 dma_free_coherent(port->dev->dev.parent,
3011 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
3012 rxq->descs, rxq->descs_dma);
3016 /* Push packets received by the RXQ to BM pool */
3017 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3018 struct mvpp2_rx_queue *rxq)
3022 rx_received = mvpp2_rxq_received(port, rxq->id);
3026 for (i = 0; i < rx_received; i++) {
3027 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3028 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3031 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3032 MVPP2_RXD_BM_POOL_ID_OFFS;
3034 mvpp2_bm_pool_put(port, pool,
3035 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
3036 mvpp2_rxdesc_cookie_get(port, rx_desc));
3038 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3041 /* Cleanup Rx queue */
3042 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3043 struct mvpp2_rx_queue *rxq)
3045 unsigned int thread;
3047 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
3048 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
3050 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
3051 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
3053 mvpp2_rxq_drop_pkts(port, rxq);
3056 dma_free_coherent(port->dev->dev.parent,
3057 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
3063 rxq->next_desc_to_proc = 0;
3066 /* Clear Rx descriptors queue starting address and size;
3067 * free descriptor number
3069 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3070 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3071 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
3072 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
3073 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
3077 /* Create and initialize a Tx queue */
3078 static int mvpp2_txq_init(struct mvpp2_port *port,
3079 struct mvpp2_tx_queue *txq)
3082 unsigned int thread;
3083 int desc, desc_per_txq, tx_port_num;
3084 struct mvpp2_txq_pcpu *txq_pcpu;
3086 txq->size = port->tx_ring_size;
3088 /* Allocate memory for Tx descriptors */
3089 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
3090 txq->size * MVPP2_DESC_ALIGNED_SIZE,
3091 &txq->descs_dma, GFP_KERNEL);
3095 txq->last_desc = txq->size - 1;
3097 /* Set Tx descriptors queue starting address - indirect access */
3098 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3099 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3100 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
3102 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
3103 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
3104 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
3105 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
3106 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3107 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
3108 val &= ~MVPP2_TXQ_PENDING_MASK;
3109 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
3111 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
3112 * for each existing TXQ.
3113 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3114 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
3117 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3118 (txq->log_id * desc_per_txq);
3120 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
3121 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
3122 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
3125 /* WRR / EJP configuration - indirect access */
3126 tx_port_num = mvpp2_egress_port(port);
3127 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3129 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3130 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3131 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3132 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3133 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3135 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3136 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3139 for (thread = 0; thread < port->priv->nthreads; thread++) {
3140 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3141 txq_pcpu->size = txq->size;
3142 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
3143 sizeof(*txq_pcpu->buffs),
3145 if (!txq_pcpu->buffs)
3148 txq_pcpu->count = 0;
3149 txq_pcpu->reserved_num = 0;
3150 txq_pcpu->txq_put_index = 0;
3151 txq_pcpu->txq_get_index = 0;
3152 txq_pcpu->tso_headers = NULL;
3154 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
3155 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
3157 txq_pcpu->tso_headers =
3158 dma_alloc_coherent(port->dev->dev.parent,
3159 txq_pcpu->size * TSO_HEADER_SIZE,
3160 &txq_pcpu->tso_headers_dma,
3162 if (!txq_pcpu->tso_headers)
3169 /* Free allocated TXQ resources */
3170 static void mvpp2_txq_deinit(struct mvpp2_port *port,
3171 struct mvpp2_tx_queue *txq)
3173 struct mvpp2_txq_pcpu *txq_pcpu;
3174 unsigned int thread;
3176 for (thread = 0; thread < port->priv->nthreads; thread++) {
3177 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3178 kfree(txq_pcpu->buffs);
3180 if (txq_pcpu->tso_headers)
3181 dma_free_coherent(port->dev->dev.parent,
3182 txq_pcpu->size * TSO_HEADER_SIZE,
3183 txq_pcpu->tso_headers,
3184 txq_pcpu->tso_headers_dma);
3186 txq_pcpu->tso_headers = NULL;
3190 dma_free_coherent(port->dev->dev.parent,
3191 txq->size * MVPP2_DESC_ALIGNED_SIZE,
3192 txq->descs, txq->descs_dma);
3196 txq->next_desc_to_proc = 0;
3199 /* Set minimum bandwidth for disabled TXQs */
3200 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
3202 /* Set Tx descriptors queue starting address and size */
3203 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3204 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3205 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
3206 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
3210 /* Cleanup Tx ports */
3211 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3213 struct mvpp2_txq_pcpu *txq_pcpu;
3215 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3218 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3219 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
3220 val |= MVPP2_TXQ_DRAIN_EN_MASK;
3221 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3223 /* The napi queue has been stopped so wait for all packets
3224 * to be transmitted.
3228 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3229 netdev_warn(port->dev,
3230 "port %d: cleaning queue %d timed out\n",
3231 port->id, txq->log_id);
3237 pending = mvpp2_thread_read(port->priv, thread,
3238 MVPP2_TXQ_PENDING_REG);
3239 pending &= MVPP2_TXQ_PENDING_MASK;
3242 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3243 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3246 for (thread = 0; thread < port->priv->nthreads; thread++) {
3247 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3249 /* Release all packets */
3250 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3253 txq_pcpu->count = 0;
3254 txq_pcpu->txq_put_index = 0;
3255 txq_pcpu->txq_get_index = 0;
3259 /* Cleanup all Tx queues */
3260 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3262 struct mvpp2_tx_queue *txq;
3266 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3268 /* Reset Tx ports and delete Tx queues */
3269 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3270 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3272 for (queue = 0; queue < port->ntxqs; queue++) {
3273 txq = port->txqs[queue];
3274 mvpp2_txq_clean(port, txq);
3275 mvpp2_txq_deinit(port, txq);
3278 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3280 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3281 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3284 /* Cleanup all Rx queues */
3285 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3289 for (queue = 0; queue < port->nrxqs; queue++)
3290 mvpp2_rxq_deinit(port, port->rxqs[queue]);
3293 mvpp2_rxq_disable_fc(port);
3296 /* Init all Rx queues for port */
3297 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3301 for (queue = 0; queue < port->nrxqs; queue++) {
3302 err = mvpp2_rxq_init(port, port->rxqs[queue]);
3308 mvpp2_rxq_enable_fc(port);
3313 mvpp2_cleanup_rxqs(port);
3317 /* Init all tx queues for port */
3318 static int mvpp2_setup_txqs(struct mvpp2_port *port)
3320 struct mvpp2_tx_queue *txq;
3323 for (queue = 0; queue < port->ntxqs; queue++) {
3324 txq = port->txqs[queue];
3325 err = mvpp2_txq_init(port, txq);
3329 /* Assign this queue to a CPU */
3330 if (queue < num_possible_cpus())
3331 netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
3334 if (port->has_tx_irqs) {
3335 mvpp2_tx_time_coal_set(port);
3336 for (queue = 0; queue < port->ntxqs; queue++) {
3337 txq = port->txqs[queue];
3338 mvpp2_tx_pkts_coal_set(port, txq);
3342 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3346 mvpp2_cleanup_txqs(port);
3350 /* The callback for per-port interrupt */
3351 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
3353 struct mvpp2_queue_vector *qv = dev_id;
3355 mvpp2_qvec_interrupt_disable(qv);
3357 napi_schedule(&qv->napi);
3362 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
3364 struct skb_shared_hwtstamps shhwtstamps;
3365 struct mvpp2_hwtstamp_queue *queue;
3366 struct sk_buff *skb;
3367 void __iomem *ptp_q;
3371 ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3373 ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
3375 queue = &port->tx_hwtstamp_queue[nq];
3378 r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
3382 r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
3383 r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
3385 id = (r0 >> 1) & 31;
3387 skb = queue->skb[id];
3388 queue->skb[id] = NULL;
3390 u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
3392 mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
3393 skb_tstamp_tx(skb, &shhwtstamps);
3394 dev_kfree_skb_any(skb);
3399 static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
3404 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3405 val = readl(ptp + MVPP22_PTP_INT_CAUSE);
3406 if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
3407 mvpp2_isr_handle_ptp_queue(port, 0);
3408 if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
3409 mvpp2_isr_handle_ptp_queue(port, 1);
3412 static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
3414 struct net_device *dev = port->dev;
3416 if (port->phylink) {
3417 phylink_mac_change(port->phylink, link);
3421 if (!netif_running(dev))
3425 mvpp2_interrupts_enable(port);
3427 mvpp2_egress_enable(port);
3428 mvpp2_ingress_enable(port);
3429 netif_carrier_on(dev);
3430 netif_tx_wake_all_queues(dev);
3432 netif_tx_stop_all_queues(dev);
3433 netif_carrier_off(dev);
3434 mvpp2_ingress_disable(port);
3435 mvpp2_egress_disable(port);
3437 mvpp2_interrupts_disable(port);
3441 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
3446 val = readl(port->base + MVPP22_XLG_INT_STAT);
3447 if (val & MVPP22_XLG_INT_STAT_LINK) {
3448 val = readl(port->base + MVPP22_XLG_STATUS);
3449 link = (val & MVPP22_XLG_STATUS_LINK_UP);
3450 mvpp2_isr_handle_link(port, link);
3454 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
3459 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
3460 phy_interface_mode_is_8023z(port->phy_interface) ||
3461 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
3462 val = readl(port->base + MVPP22_GMAC_INT_STAT);
3463 if (val & MVPP22_GMAC_INT_STAT_LINK) {
3464 val = readl(port->base + MVPP2_GMAC_STATUS0);
3465 link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
3466 mvpp2_isr_handle_link(port, link);
3471 /* Per-port interrupt for link status changes */
3472 static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
3474 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
3477 mvpp22_gop_mask_irq(port);
3479 if (mvpp2_port_supports_xlg(port) &&
3480 mvpp2_is_xlg(port->phy_interface)) {
3481 /* Check the external status register */
3482 val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
3483 if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
3484 mvpp2_isr_handle_xlg(port);
3485 if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
3486 mvpp2_isr_handle_ptp(port);
3488 /* If it's not the XLG, we must be using the GMAC.
3489 * Check the summary status.
3491 val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
3492 if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
3493 mvpp2_isr_handle_gmac_internal(port);
3494 if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
3495 mvpp2_isr_handle_ptp(port);
3498 mvpp22_gop_unmask_irq(port);
3502 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
3504 struct net_device *dev;
3505 struct mvpp2_port *port;
3506 struct mvpp2_port_pcpu *port_pcpu;
3507 unsigned int tx_todo, cause;
3509 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
3510 dev = port_pcpu->dev;
3512 if (!netif_running(dev))
3513 return HRTIMER_NORESTART;
3515 port_pcpu->timer_scheduled = false;
3516 port = netdev_priv(dev);
3518 /* Process all the Tx queues */
3519 cause = (1 << port->ntxqs) - 1;
3520 tx_todo = mvpp2_tx_done(port, cause,
3521 mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
3523 /* Set the timer in case not all the packets were processed */
3524 if (tx_todo && !port_pcpu->timer_scheduled) {
3525 port_pcpu->timer_scheduled = true;
3526 hrtimer_forward_now(&port_pcpu->tx_done_timer,
3527 MVPP2_TXDONE_HRTIMER_PERIOD_NS);
3529 return HRTIMER_RESTART;
3531 return HRTIMER_NORESTART;
3534 /* Main RX/TX processing routines */
3536 /* Display more error info */
3537 static void mvpp2_rx_error(struct mvpp2_port *port,
3538 struct mvpp2_rx_desc *rx_desc)
3540 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3541 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3542 char *err_str = NULL;
3544 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3545 case MVPP2_RXD_ERR_CRC:
3548 case MVPP2_RXD_ERR_OVERRUN:
3549 err_str = "overrun";
3551 case MVPP2_RXD_ERR_RESOURCE:
3552 err_str = "resource";
3555 if (err_str && net_ratelimit())
3556 netdev_err(port->dev,
3557 "bad rx status %08x (%s error), size=%zu\n",
3558 status, err_str, sz);
3561 /* Handle RX checksum offload */
3562 static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status)
3564 if (((status & MVPP2_RXD_L3_IP4) &&
3565 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
3566 (status & MVPP2_RXD_L3_IP6))
3567 if (((status & MVPP2_RXD_L4_UDP) ||
3568 (status & MVPP2_RXD_L4_TCP)) &&
3569 (status & MVPP2_RXD_L4_CSUM_OK))
3570 return CHECKSUM_UNNECESSARY;
3572 return CHECKSUM_NONE;
3575 /* Allocate a new skb and add it to BM pool */
3576 static int mvpp2_rx_refill(struct mvpp2_port *port,
3577 struct mvpp2_bm_pool *bm_pool,
3578 struct page_pool *page_pool, int pool)
3580 dma_addr_t dma_addr;
3581 phys_addr_t phys_addr;
3584 buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
3585 &dma_addr, &phys_addr, GFP_ATOMIC);
3589 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3594 /* Handle tx checksum */
3595 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
3597 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3600 __be16 l3_proto = vlan_get_protocol(skb);
3602 if (l3_proto == htons(ETH_P_IP)) {
3603 struct iphdr *ip4h = ip_hdr(skb);
3605 /* Calculate IPv4 checksum and L4 checksum */
3606 ip_hdr_len = ip4h->ihl;
3607 l4_proto = ip4h->protocol;
3608 } else if (l3_proto == htons(ETH_P_IPV6)) {
3609 struct ipv6hdr *ip6h = ipv6_hdr(skb);
3611 /* Read l4_protocol from one of IPv6 extra headers */
3612 if (skb_network_header_len(skb) > 0)
3613 ip_hdr_len = (skb_network_header_len(skb) >> 2);
3614 l4_proto = ip6h->nexthdr;
3616 return MVPP2_TXD_L4_CSUM_NOT;
3619 return mvpp2_txq_desc_csum(skb_network_offset(skb),
3620 l3_proto, ip_hdr_len, l4_proto);
3623 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
3626 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
3628 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3629 struct mvpp2_tx_queue *aggr_txq;
3630 struct mvpp2_txq_pcpu *txq_pcpu;
3631 struct mvpp2_tx_queue *txq;
3632 struct netdev_queue *nq;
3634 txq = port->txqs[txq_id];
3635 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3636 nq = netdev_get_tx_queue(port->dev, txq_id);
3637 aggr_txq = &port->priv->aggr_txqs[thread];
3639 txq_pcpu->reserved_num -= nxmit;
3640 txq_pcpu->count += nxmit;
3641 aggr_txq->count += nxmit;
3643 /* Enable transmit */
3645 mvpp2_aggr_txq_pend_desc_add(port, nxmit);
3647 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3648 netif_tx_stop_queue(nq);
3650 /* Finalize TX processing */
3651 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3652 mvpp2_txq_done(port, txq, txq_pcpu);
3656 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
3657 struct xdp_frame *xdpf, bool dma_map)
3659 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3660 u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
3661 MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3662 enum mvpp2_tx_buf_type buf_type;
3663 struct mvpp2_txq_pcpu *txq_pcpu;
3664 struct mvpp2_tx_queue *aggr_txq;
3665 struct mvpp2_tx_desc *tx_desc;
3666 struct mvpp2_tx_queue *txq;
3667 int ret = MVPP2_XDP_TX;
3668 dma_addr_t dma_addr;
3670 txq = port->txqs[txq_id];
3671 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3672 aggr_txq = &port->priv->aggr_txqs[thread];
3674 /* Check number of available descriptors */
3675 if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
3676 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
3677 ret = MVPP2_XDP_DROPPED;
3681 /* Get a descriptor for the first part of the packet */
3682 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3683 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3684 mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
3687 /* XDP_REDIRECT or AF_XDP */
3688 dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
3689 xdpf->len, DMA_TO_DEVICE);
3691 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3692 mvpp2_txq_desc_put(txq);
3693 ret = MVPP2_XDP_DROPPED;
3697 buf_type = MVPP2_TYPE_XDP_NDO;
3700 struct page *page = virt_to_page(xdpf->data);
3702 dma_addr = page_pool_get_dma_addr(page) +
3703 sizeof(*xdpf) + xdpf->headroom;
3704 dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
3705 xdpf->len, DMA_BIDIRECTIONAL);
3707 buf_type = MVPP2_TYPE_XDP_TX;
3710 mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
3712 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3713 mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
3720 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
3722 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3723 struct xdp_frame *xdpf;
3727 xdpf = xdp_convert_buff_to_frame(xdp);
3728 if (unlikely(!xdpf))
3729 return MVPP2_XDP_DROPPED;
3731 /* The first of the TX queues are used for XPS,
3732 * the second half for XDP_TX
3734 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3736 ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
3737 if (ret == MVPP2_XDP_TX) {
3738 u64_stats_update_begin(&stats->syncp);
3739 stats->tx_bytes += xdpf->len;
3740 stats->tx_packets++;
3742 u64_stats_update_end(&stats->syncp);
3744 mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
3746 u64_stats_update_begin(&stats->syncp);
3747 stats->xdp_tx_err++;
3748 u64_stats_update_end(&stats->syncp);
3755 mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
3756 struct xdp_frame **frames, u32 flags)
3758 struct mvpp2_port *port = netdev_priv(dev);
3759 int i, nxmit_byte = 0, nxmit = 0;
3760 struct mvpp2_pcpu_stats *stats;
3764 if (unlikely(test_bit(0, &port->state)))
3767 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3770 /* The first of the TX queues are used for XPS,
3771 * the second half for XDP_TX
3773 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3775 for (i = 0; i < num_frame; i++) {
3776 ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
3777 if (ret != MVPP2_XDP_TX)
3780 nxmit_byte += frames[i]->len;
3784 if (likely(nxmit > 0))
3785 mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
3787 stats = this_cpu_ptr(port->stats);
3788 u64_stats_update_begin(&stats->syncp);
3789 stats->tx_bytes += nxmit_byte;
3790 stats->tx_packets += nxmit;
3791 stats->xdp_xmit += nxmit;
3792 stats->xdp_xmit_err += num_frame - nxmit;
3793 u64_stats_update_end(&stats->syncp);
3799 mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
3800 struct xdp_buff *xdp, struct page_pool *pp,
3801 struct mvpp2_pcpu_stats *stats)
3803 unsigned int len, sync, err;
3807 len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3808 act = bpf_prog_run_xdp(prog, xdp);
3810 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
3811 sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3812 sync = max(sync, len);
3817 ret = MVPP2_XDP_PASS;
3820 err = xdp_do_redirect(port->dev, xdp, prog);
3821 if (unlikely(err)) {
3822 ret = MVPP2_XDP_DROPPED;
3823 page = virt_to_head_page(xdp->data);
3824 page_pool_put_page(pp, page, sync, true);
3826 ret = MVPP2_XDP_REDIR;
3827 stats->xdp_redirect++;
3831 ret = mvpp2_xdp_xmit_back(port, xdp);
3832 if (ret != MVPP2_XDP_TX) {
3833 page = virt_to_head_page(xdp->data);
3834 page_pool_put_page(pp, page, sync, true);
3838 bpf_warn_invalid_xdp_action(port->dev, prog, act);
3841 trace_xdp_exception(port->dev, prog, act);
3844 page = virt_to_head_page(xdp->data);
3845 page_pool_put_page(pp, page, sync, true);
3846 ret = MVPP2_XDP_DROPPED;
3854 static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
3855 int pool, u32 rx_status)
3857 phys_addr_t phys_addr, phys_addr_next;
3858 dma_addr_t dma_addr, dma_addr_next;
3859 struct mvpp2_buff_hdr *buff_hdr;
3861 phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3862 dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3865 buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
3867 phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
3868 dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
3870 if (port->priv->hw_version >= MVPP22) {
3871 phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
3872 dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
3875 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3877 phys_addr = phys_addr_next;
3878 dma_addr = dma_addr_next;
3880 } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
3883 /* Main rx processing */
3884 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
3885 int rx_todo, struct mvpp2_rx_queue *rxq)
3887 struct net_device *dev = port->dev;
3888 struct mvpp2_pcpu_stats ps = {};
3889 enum dma_data_direction dma_dir;
3890 struct bpf_prog *xdp_prog;
3891 struct xdp_buff xdp;
3896 xdp_prog = READ_ONCE(port->xdp_prog);
3898 /* Get number of received packets and clamp the to-do */
3899 rx_received = mvpp2_rxq_received(port, rxq->id);
3900 if (rx_todo > rx_received)
3901 rx_todo = rx_received;
3903 while (rx_done < rx_todo) {
3904 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3905 struct mvpp2_bm_pool *bm_pool;
3906 struct page_pool *pp = NULL;
3907 struct sk_buff *skb;
3908 unsigned int frag_size;
3909 dma_addr_t dma_addr;
3910 phys_addr_t phys_addr;
3911 u32 rx_status, timestamp;
3912 int pool, rx_bytes, err, ret;
3916 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3917 data = (void *)phys_to_virt(phys_addr);
3918 page = virt_to_page(data);
3922 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3923 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3924 rx_bytes -= MVPP2_MH_SIZE;
3925 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3927 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3928 MVPP2_RXD_BM_POOL_ID_OFFS;
3929 bm_pool = &port->priv->bm_pools[pool];
3931 if (port->priv->percpu_pools) {
3932 pp = port->priv->page_pool[pool];
3933 dma_dir = page_pool_get_dma_dir(pp);
3935 dma_dir = DMA_FROM_DEVICE;
3938 dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
3939 rx_bytes + MVPP2_MH_SIZE,
3942 /* Buffer header not supported */
3943 if (rx_status & MVPP2_RXD_BUF_HDR)
3944 goto err_drop_frame;
3946 /* In case of an error, release the requested buffer pointer
3947 * to the Buffer Manager. This request process is controlled
3948 * by the hardware, and the information about the buffer is
3949 * comprised by the RX descriptor.
3951 if (rx_status & MVPP2_RXD_ERR_SUMMARY)
3952 goto err_drop_frame;
3954 /* Prefetch header */
3955 prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
3957 if (bm_pool->frag_size > PAGE_SIZE)
3960 frag_size = bm_pool->frag_size;
3963 struct xdp_rxq_info *xdp_rxq;
3965 if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
3966 xdp_rxq = &rxq->xdp_rxq_short;
3968 xdp_rxq = &rxq->xdp_rxq_long;
3970 xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
3971 xdp_prepare_buff(&xdp, data,
3972 MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
3975 ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
3979 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3981 netdev_err(port->dev, "failed to refill BM pools\n");
3982 goto err_drop_frame;
3986 ps.rx_bytes += rx_bytes;
3991 skb = build_skb(data, frag_size);
3993 netdev_warn(port->dev, "skb build failed\n");
3994 goto err_drop_frame;
3997 /* If we have RX hardware timestamping enabled, grab the
3998 * timestamp from the queue and convert.
4000 if (mvpp22_rx_hwtstamping(port)) {
4001 timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
4002 mvpp22_tai_tstamp(port->priv->tai, timestamp,
4003 skb_hwtstamps(skb));
4006 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
4008 netdev_err(port->dev, "failed to refill BM pools\n");
4009 dev_kfree_skb_any(skb);
4010 goto err_drop_frame;
4014 skb_mark_for_recycle(skb);
4016 dma_unmap_single_attrs(dev->dev.parent, dma_addr,
4017 bm_pool->buf_size, DMA_FROM_DEVICE,
4018 DMA_ATTR_SKIP_CPU_SYNC);
4021 ps.rx_bytes += rx_bytes;
4023 skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
4024 skb_put(skb, rx_bytes);
4025 skb->ip_summed = mvpp2_rx_csum(port, rx_status);
4026 skb->protocol = eth_type_trans(skb, dev);
4028 napi_gro_receive(napi, skb);
4032 dev->stats.rx_errors++;
4033 mvpp2_rx_error(port, rx_desc);
4034 /* Return the buffer to the pool */
4035 if (rx_status & MVPP2_RXD_BUF_HDR)
4036 mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
4038 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
4041 if (xdp_ret & MVPP2_XDP_REDIR)
4044 if (ps.rx_packets) {
4045 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
4047 u64_stats_update_begin(&stats->syncp);
4048 stats->rx_packets += ps.rx_packets;
4049 stats->rx_bytes += ps.rx_bytes;
4051 stats->xdp_redirect += ps.xdp_redirect;
4052 stats->xdp_pass += ps.xdp_pass;
4053 stats->xdp_drop += ps.xdp_drop;
4054 u64_stats_update_end(&stats->syncp);
4057 /* Update Rx queue management counters */
4059 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
4065 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4066 struct mvpp2_tx_desc *desc)
4068 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4069 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4071 dma_addr_t buf_dma_addr =
4072 mvpp2_txdesc_dma_addr_get(port, desc);
4074 mvpp2_txdesc_size_get(port, desc);
4075 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
4076 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
4077 buf_sz, DMA_TO_DEVICE);
4078 mvpp2_txq_desc_put(txq);
4081 static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
4082 struct mvpp2_tx_desc *desc)
4084 /* We only need to clear the low bits */
4085 if (port->priv->hw_version >= MVPP22)
4086 desc->pp22.ptp_descriptor &=
4087 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4090 static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
4091 struct mvpp2_tx_desc *tx_desc,
4092 struct sk_buff *skb)
4094 struct mvpp2_hwtstamp_queue *queue;
4095 unsigned int mtype, type, i;
4096 struct ptp_header *hdr;
4099 if (port->priv->hw_version == MVPP21 ||
4100 port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
4103 type = ptp_classify_raw(skb);
4107 hdr = ptp_parse_header(skb, type);
4111 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4113 ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
4114 MVPP22_PTP_ACTION_CAPTURE;
4115 queue = &port->tx_hwtstamp_queue[0];
4117 switch (type & PTP_CLASS_VMASK) {
4119 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
4123 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
4124 mtype = hdr->tsmt & 15;
4125 /* Direct PTP Sync messages to queue 1 */
4127 ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
4128 queue = &port->tx_hwtstamp_queue[1];
4133 /* Take a reference on the skb and insert into our queue */
4135 queue->next = (i + 1) & 31;
4137 dev_kfree_skb_any(queue->skb[i]);
4138 queue->skb[i] = skb_get(skb);
4140 ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
4144 * 6:4 - PTPPacketFormat
4145 * 7 - PTP_CF_WraparoundCheckEn
4146 * 9:8 - IngressTimestampSeconds[1:0]
4148 * 11 - MACTimestampingEn
4149 * 17:12 - PTP_TimestampQueueEntryID[5:0]
4150 * 18 - PTPTimestampQueueSelect
4151 * 19 - UDPChecksumUpdateEn
4152 * 27:20 - TimestampOffset
4153 * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
4154 * NTPTs, Y.1731 - L3 to timestamp entry
4155 * 35:28 - UDP Checksum Offset
4157 * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
4159 tx_desc->pp22.ptp_descriptor &=
4160 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4161 tx_desc->pp22.ptp_descriptor |=
4162 cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
4163 tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
4164 tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
4169 /* Handle tx fragmentation processing */
4170 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
4171 struct mvpp2_tx_queue *aggr_txq,
4172 struct mvpp2_tx_queue *txq)
4174 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4175 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4176 struct mvpp2_tx_desc *tx_desc;
4178 dma_addr_t buf_dma_addr;
4180 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4181 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4182 void *addr = skb_frag_address(frag);
4184 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4185 mvpp2_txdesc_clear_ptp(port, tx_desc);
4186 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4187 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
4189 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
4190 skb_frag_size(frag),
4192 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
4193 mvpp2_txq_desc_put(txq);
4197 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4199 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
4200 /* Last descriptor */
4201 mvpp2_txdesc_cmd_set(port, tx_desc,
4203 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4205 /* Descriptor in the middle: Not First, Not Last */
4206 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4207 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4213 /* Release all descriptors that were used to map fragments of
4214 * this packet, as well as the corresponding DMA mappings
4216 for (i = i - 1; i >= 0; i--) {
4217 tx_desc = txq->descs + i;
4218 tx_desc_unmap_put(port, txq, tx_desc);
4224 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
4225 struct net_device *dev,
4226 struct mvpp2_tx_queue *txq,
4227 struct mvpp2_tx_queue *aggr_txq,
4228 struct mvpp2_txq_pcpu *txq_pcpu,
4231 struct mvpp2_port *port = netdev_priv(dev);
4232 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4235 mvpp2_txdesc_clear_ptp(port, tx_desc);
4236 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4237 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
4239 addr = txq_pcpu->tso_headers_dma +
4240 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4241 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
4243 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
4245 MVPP2_TXD_PADDING_DISABLE);
4246 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4249 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
4250 struct net_device *dev, struct tso_t *tso,
4251 struct mvpp2_tx_queue *txq,
4252 struct mvpp2_tx_queue *aggr_txq,
4253 struct mvpp2_txq_pcpu *txq_pcpu,
4254 int sz, bool left, bool last)
4256 struct mvpp2_port *port = netdev_priv(dev);
4257 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4258 dma_addr_t buf_dma_addr;
4260 mvpp2_txdesc_clear_ptp(port, tx_desc);
4261 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4262 mvpp2_txdesc_size_set(port, tx_desc, sz);
4264 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
4266 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4267 mvpp2_txq_desc_put(txq);
4271 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4274 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
4276 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4280 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4283 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4287 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
4288 struct mvpp2_tx_queue *txq,
4289 struct mvpp2_tx_queue *aggr_txq,
4290 struct mvpp2_txq_pcpu *txq_pcpu)
4292 struct mvpp2_port *port = netdev_priv(dev);
4293 int hdr_sz, i, len, descs = 0;
4296 /* Check number of available descriptors */
4297 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
4298 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
4299 tso_count_descs(skb)))
4302 hdr_sz = tso_start(skb, &tso);
4304 len = skb->len - hdr_sz;
4306 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
4307 char *hdr = txq_pcpu->tso_headers +
4308 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4313 tso_build_hdr(skb, hdr, &tso, left, len == 0);
4314 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
4317 int sz = min_t(int, tso.size, left);
4321 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
4322 txq_pcpu, sz, left, len == 0))
4324 tso_build_data(skb, &tso, sz);
4331 for (i = descs - 1; i >= 0; i--) {
4332 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
4333 tx_desc_unmap_put(port, txq, tx_desc);
4338 /* Main tx processing */
4339 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
4341 struct mvpp2_port *port = netdev_priv(dev);
4342 struct mvpp2_tx_queue *txq, *aggr_txq;
4343 struct mvpp2_txq_pcpu *txq_pcpu;
4344 struct mvpp2_tx_desc *tx_desc;
4345 dma_addr_t buf_dma_addr;
4346 unsigned long flags = 0;
4347 unsigned int thread;
4352 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4354 txq_id = skb_get_queue_mapping(skb);
4355 txq = port->txqs[txq_id];
4356 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4357 aggr_txq = &port->priv->aggr_txqs[thread];
4359 if (test_bit(thread, &port->priv->lock_map))
4360 spin_lock_irqsave(&port->tx_lock[thread], flags);
4362 if (skb_is_gso(skb)) {
4363 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
4366 frags = skb_shinfo(skb)->nr_frags + 1;
4368 /* Check number of available descriptors */
4369 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
4370 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
4375 /* Get a descriptor for the first part of the packet */
4376 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4377 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
4378 !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
4379 mvpp2_txdesc_clear_ptp(port, tx_desc);
4380 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4381 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
4383 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
4384 skb_headlen(skb), DMA_TO_DEVICE);
4385 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4386 mvpp2_txq_desc_put(txq);
4391 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4393 tx_cmd = mvpp2_skb_tx_csum(port, skb);
4396 /* First and Last descriptor */
4397 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
4398 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4399 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4401 /* First but not Last */
4402 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
4403 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4404 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4406 /* Continue with other skb fragments */
4407 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
4408 tx_desc_unmap_put(port, txq, tx_desc);
4415 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
4416 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
4418 txq_pcpu->reserved_num -= frags;
4419 txq_pcpu->count += frags;
4420 aggr_txq->count += frags;
4422 /* Enable transmit */
4424 mvpp2_aggr_txq_pend_desc_add(port, frags);
4426 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
4427 netif_tx_stop_queue(nq);
4429 u64_stats_update_begin(&stats->syncp);
4430 stats->tx_packets++;
4431 stats->tx_bytes += skb->len;
4432 u64_stats_update_end(&stats->syncp);
4434 dev->stats.tx_dropped++;
4435 dev_kfree_skb_any(skb);
4438 /* Finalize TX processing */
4439 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
4440 mvpp2_txq_done(port, txq, txq_pcpu);
4442 /* Set the timer in case not all frags were processed */
4443 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
4444 txq_pcpu->count > 0) {
4445 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
4447 if (!port_pcpu->timer_scheduled) {
4448 port_pcpu->timer_scheduled = true;
4449 hrtimer_start(&port_pcpu->tx_done_timer,
4450 MVPP2_TXDONE_HRTIMER_PERIOD_NS,
4451 HRTIMER_MODE_REL_PINNED_SOFT);
4455 if (test_bit(thread, &port->priv->lock_map))
4456 spin_unlock_irqrestore(&port->tx_lock[thread], flags);
4458 return NETDEV_TX_OK;
4461 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
4463 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
4464 netdev_err(dev, "FCS error\n");
4465 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
4466 netdev_err(dev, "rx fifo overrun error\n");
4467 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
4468 netdev_err(dev, "tx fifo underrun error\n");
4471 static int mvpp2_poll(struct napi_struct *napi, int budget)
4473 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
4475 struct mvpp2_port *port = netdev_priv(napi->dev);
4476 struct mvpp2_queue_vector *qv;
4477 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4479 qv = container_of(napi, struct mvpp2_queue_vector, napi);
4481 /* Rx/Tx cause register
4483 * Bits 0-15: each bit indicates received packets on the Rx queue
4484 * (bit 0 is for Rx queue 0).
4486 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
4487 * (bit 16 is for Tx queue 0).
4489 * Each CPU has its own Rx/Tx cause register
4491 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
4492 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
4494 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
4496 mvpp2_cause_error(port->dev, cause_misc);
4498 /* Clear the cause register */
4499 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
4500 mvpp2_thread_write(port->priv, thread,
4501 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
4502 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
4505 if (port->has_tx_irqs) {
4506 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4508 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
4509 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
4513 /* Process RX packets */
4514 cause_rx = cause_rx_tx &
4515 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
4516 cause_rx <<= qv->first_rxq;
4517 cause_rx |= qv->pending_cause_rx;
4518 while (cause_rx && budget > 0) {
4520 struct mvpp2_rx_queue *rxq;
4522 rxq = mvpp2_get_rx_queue(port, cause_rx);
4526 count = mvpp2_rx(port, napi, budget, rxq);
4530 /* Clear the bit associated to this Rx queue
4531 * so that next iteration will continue from
4532 * the next Rx queue.
4534 cause_rx &= ~(1 << rxq->logic_rxq);
4540 napi_complete_done(napi, rx_done);
4542 mvpp2_qvec_interrupt_enable(qv);
4544 qv->pending_cause_rx = cause_rx;
4548 static void mvpp22_mode_reconfigure(struct mvpp2_port *port,
4549 phy_interface_t interface)
4553 /* Set the GMAC & XLG MAC in reset */
4554 mvpp2_mac_reset_assert(port);
4556 /* Set the MPCS and XPCS in reset */
4557 mvpp22_pcs_reset_assert(port);
4559 /* comphy reconfiguration */
4560 mvpp22_comphy_init(port, interface);
4562 /* gop reconfiguration */
4563 mvpp22_gop_init(port, interface);
4565 mvpp22_pcs_reset_deassert(port, interface);
4567 if (mvpp2_port_supports_xlg(port)) {
4568 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
4569 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4571 if (mvpp2_is_xlg(interface))
4572 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4574 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4576 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
4579 if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(interface))
4580 mvpp2_xlg_max_rx_size_set(port);
4582 mvpp2_gmac_max_rx_size_set(port);
4585 /* Set hw internals when starting port */
4586 static void mvpp2_start_dev(struct mvpp2_port *port)
4590 mvpp2_txp_max_tx_size_set(port);
4592 for (i = 0; i < port->nqvecs; i++)
4593 napi_enable(&port->qvecs[i].napi);
4595 /* Enable interrupts on all threads */
4596 mvpp2_interrupts_enable(port);
4598 if (port->priv->hw_version >= MVPP22)
4599 mvpp22_mode_reconfigure(port, port->phy_interface);
4601 if (port->phylink) {
4602 phylink_start(port->phylink);
4604 mvpp2_acpi_start(port);
4607 netif_tx_start_all_queues(port->dev);
4609 clear_bit(0, &port->state);
4612 /* Set hw internals when stopping port */
4613 static void mvpp2_stop_dev(struct mvpp2_port *port)
4617 set_bit(0, &port->state);
4619 /* Disable interrupts on all threads */
4620 mvpp2_interrupts_disable(port);
4622 for (i = 0; i < port->nqvecs; i++)
4623 napi_disable(&port->qvecs[i].napi);
4626 phylink_stop(port->phylink);
4627 phy_power_off(port->comphy);
4630 static int mvpp2_check_ringparam_valid(struct net_device *dev,
4631 struct ethtool_ringparam *ring)
4633 u16 new_rx_pending = ring->rx_pending;
4634 u16 new_tx_pending = ring->tx_pending;
4636 if (ring->rx_pending == 0 || ring->tx_pending == 0)
4639 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
4640 new_rx_pending = MVPP2_MAX_RXD_MAX;
4641 else if (ring->rx_pending < MSS_THRESHOLD_START)
4642 new_rx_pending = MSS_THRESHOLD_START;
4643 else if (!IS_ALIGNED(ring->rx_pending, 16))
4644 new_rx_pending = ALIGN(ring->rx_pending, 16);
4646 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
4647 new_tx_pending = MVPP2_MAX_TXD_MAX;
4648 else if (!IS_ALIGNED(ring->tx_pending, 32))
4649 new_tx_pending = ALIGN(ring->tx_pending, 32);
4651 /* The Tx ring size cannot be smaller than the minimum number of
4652 * descriptors needed for TSO.
4654 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
4655 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
4657 if (ring->rx_pending != new_rx_pending) {
4658 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
4659 ring->rx_pending, new_rx_pending);
4660 ring->rx_pending = new_rx_pending;
4663 if (ring->tx_pending != new_tx_pending) {
4664 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
4665 ring->tx_pending, new_tx_pending);
4666 ring->tx_pending = new_tx_pending;
4672 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
4674 u32 mac_addr_l, mac_addr_m, mac_addr_h;
4676 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4677 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
4678 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
4679 addr[0] = (mac_addr_h >> 24) & 0xFF;
4680 addr[1] = (mac_addr_h >> 16) & 0xFF;
4681 addr[2] = (mac_addr_h >> 8) & 0xFF;
4682 addr[3] = mac_addr_h & 0xFF;
4683 addr[4] = mac_addr_m & 0xFF;
4684 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
4687 static int mvpp2_irqs_init(struct mvpp2_port *port)
4691 for (i = 0; i < port->nqvecs; i++) {
4692 struct mvpp2_queue_vector *qv = port->qvecs + i;
4694 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4695 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
4701 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
4704 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
4708 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4711 for_each_present_cpu(cpu) {
4712 if (mvpp2_cpu_to_thread(port->priv, cpu) ==
4714 cpumask_set_cpu(cpu, qv->mask);
4717 irq_set_affinity_hint(qv->irq, qv->mask);
4723 for (i = 0; i < port->nqvecs; i++) {
4724 struct mvpp2_queue_vector *qv = port->qvecs + i;
4726 irq_set_affinity_hint(qv->irq, NULL);
4729 free_irq(qv->irq, qv);
4735 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
4739 for (i = 0; i < port->nqvecs; i++) {
4740 struct mvpp2_queue_vector *qv = port->qvecs + i;
4742 irq_set_affinity_hint(qv->irq, NULL);
4745 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
4746 free_irq(qv->irq, qv);
4750 static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
4752 return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
4753 !(port->flags & MVPP2_F_LOOPBACK);
4756 static int mvpp2_open(struct net_device *dev)
4758 struct mvpp2_port *port = netdev_priv(dev);
4759 struct mvpp2 *priv = port->priv;
4760 unsigned char mac_bcast[ETH_ALEN] = {
4761 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4765 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
4767 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4770 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
4772 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
4775 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
4777 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
4780 err = mvpp2_prs_def_flow(port);
4782 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4786 /* Allocate the Rx/Tx queues */
4787 err = mvpp2_setup_rxqs(port);
4789 netdev_err(port->dev, "cannot allocate Rx queues\n");
4793 err = mvpp2_setup_txqs(port);
4795 netdev_err(port->dev, "cannot allocate Tx queues\n");
4796 goto err_cleanup_rxqs;
4799 err = mvpp2_irqs_init(port);
4801 netdev_err(port->dev, "cannot init IRQs\n");
4802 goto err_cleanup_txqs;
4805 if (port->phylink) {
4806 err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
4808 netdev_err(port->dev, "could not attach PHY (%d)\n",
4816 if (priv->hw_version >= MVPP22 && port->port_irq) {
4817 err = request_irq(port->port_irq, mvpp2_port_isr, 0,
4820 netdev_err(port->dev,
4821 "cannot request port link/ptp IRQ %d\n",
4826 mvpp22_gop_setup_irq(port);
4828 /* In default link is down */
4829 netif_carrier_off(port->dev);
4837 netdev_err(port->dev,
4838 "invalid configuration: no dt or link IRQ");
4843 /* Unmask interrupts on all CPUs */
4844 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
4845 mvpp2_shared_interrupt_mask_unmask(port, false);
4847 mvpp2_start_dev(port);
4849 /* Start hardware statistics gathering */
4850 queue_delayed_work(priv->stats_queue, &port->stats_work,
4851 MVPP2_MIB_COUNTERS_STATS_DELAY);
4856 mvpp2_irqs_deinit(port);
4858 mvpp2_cleanup_txqs(port);
4860 mvpp2_cleanup_rxqs(port);
4864 static int mvpp2_stop(struct net_device *dev)
4866 struct mvpp2_port *port = netdev_priv(dev);
4867 struct mvpp2_port_pcpu *port_pcpu;
4868 unsigned int thread;
4870 mvpp2_stop_dev(port);
4872 /* Mask interrupts on all threads */
4873 on_each_cpu(mvpp2_interrupts_mask, port, 1);
4874 mvpp2_shared_interrupt_mask_unmask(port, true);
4877 phylink_disconnect_phy(port->phylink);
4879 free_irq(port->port_irq, port);
4881 mvpp2_irqs_deinit(port);
4882 if (!port->has_tx_irqs) {
4883 for (thread = 0; thread < port->priv->nthreads; thread++) {
4884 port_pcpu = per_cpu_ptr(port->pcpu, thread);
4886 hrtimer_cancel(&port_pcpu->tx_done_timer);
4887 port_pcpu->timer_scheduled = false;
4890 mvpp2_cleanup_rxqs(port);
4891 mvpp2_cleanup_txqs(port);
4893 cancel_delayed_work_sync(&port->stats_work);
4895 mvpp2_mac_reset_assert(port);
4896 mvpp22_pcs_reset_assert(port);
4901 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
4902 struct netdev_hw_addr_list *list)
4904 struct netdev_hw_addr *ha;
4907 netdev_hw_addr_list_for_each(ha, list) {
4908 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
4916 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
4918 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
4919 mvpp2_prs_vid_enable_filtering(port);
4921 mvpp2_prs_vid_disable_filtering(port);
4923 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4924 MVPP2_PRS_L2_UNI_CAST, enable);
4926 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4927 MVPP2_PRS_L2_MULTI_CAST, enable);
4930 static void mvpp2_set_rx_mode(struct net_device *dev)
4932 struct mvpp2_port *port = netdev_priv(dev);
4934 /* Clear the whole UC and MC list */
4935 mvpp2_prs_mac_del_all(port);
4937 if (dev->flags & IFF_PROMISC) {
4938 mvpp2_set_rx_promisc(port, true);
4942 mvpp2_set_rx_promisc(port, false);
4944 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
4945 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
4946 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4947 MVPP2_PRS_L2_UNI_CAST, true);
4949 if (dev->flags & IFF_ALLMULTI) {
4950 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4951 MVPP2_PRS_L2_MULTI_CAST, true);
4955 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
4956 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
4957 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4958 MVPP2_PRS_L2_MULTI_CAST, true);
4961 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
4963 const struct sockaddr *addr = p;
4966 if (!is_valid_ether_addr(addr->sa_data))
4967 return -EADDRNOTAVAIL;
4969 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
4971 /* Reconfigure parser accept the original MAC address */
4972 mvpp2_prs_update_mac_da(dev, dev->dev_addr);
4973 netdev_err(dev, "failed to change MAC address\n");
4978 /* Shut down all the ports, reconfigure the pools as percpu or shared,
4979 * then bring up again all ports.
4981 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
4983 bool change_percpu = (percpu != priv->percpu_pools);
4984 int numbufs = MVPP2_BM_POOLS_NUM, i;
4985 struct mvpp2_port *port = NULL;
4986 bool status[MVPP2_MAX_PORTS];
4988 for (i = 0; i < priv->port_count; i++) {
4989 port = priv->port_list[i];
4990 status[i] = netif_running(port->dev);
4992 mvpp2_stop(port->dev);
4995 /* nrxqs is the same for all ports */
4996 if (priv->percpu_pools)
4997 numbufs = port->nrxqs * 2;
5000 mvpp2_bm_pool_update_priv_fc(priv, false);
5002 for (i = 0; i < numbufs; i++)
5003 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
5005 devm_kfree(port->dev->dev.parent, priv->bm_pools);
5006 priv->percpu_pools = percpu;
5007 mvpp2_bm_init(port->dev->dev.parent, priv);
5009 for (i = 0; i < priv->port_count; i++) {
5010 port = priv->port_list[i];
5011 if (percpu && port->ntxqs >= num_possible_cpus() * 2)
5012 xdp_set_features_flag(port->dev,
5013 NETDEV_XDP_ACT_BASIC |
5014 NETDEV_XDP_ACT_REDIRECT |
5015 NETDEV_XDP_ACT_NDO_XMIT);
5017 xdp_clear_features_flag(port->dev);
5019 mvpp2_swf_bm_pool_init(port);
5021 mvpp2_open(port->dev);
5025 mvpp2_bm_pool_update_priv_fc(priv, true);
5030 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5032 struct mvpp2_port *port = netdev_priv(dev);
5033 bool running = netif_running(dev);
5034 struct mvpp2 *priv = port->priv;
5037 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5038 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5039 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5040 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5043 if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
5044 netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
5045 mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
5049 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
5050 if (priv->percpu_pools) {
5051 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
5052 mvpp2_bm_switch_buffers(priv, false);
5058 for (i = 0; i < priv->port_count; i++)
5059 if (priv->port_list[i] != port &&
5060 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
5061 MVPP2_BM_LONG_PKT_SIZE) {
5066 /* No port is using jumbo frames */
5068 dev_info(port->dev->dev.parent,
5069 "all ports have a low MTU, switching to per-cpu buffers");
5070 mvpp2_bm_switch_buffers(priv, true);
5075 mvpp2_stop_dev(port);
5077 err = mvpp2_bm_update_mtu(dev, mtu);
5079 netdev_err(dev, "failed to change MTU\n");
5080 /* Reconfigure BM to the original MTU */
5081 mvpp2_bm_update_mtu(dev, dev->mtu);
5083 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5087 mvpp2_start_dev(port);
5088 mvpp2_egress_enable(port);
5089 mvpp2_ingress_enable(port);
5095 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
5097 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
5098 struct mvpp2 *priv = port->priv;
5101 if (!priv->percpu_pools)
5104 if (!priv->page_pool[0])
5107 for (i = 0; i < priv->port_count; i++) {
5108 port = priv->port_list[i];
5109 if (port->xdp_prog) {
5110 dma_dir = DMA_BIDIRECTIONAL;
5115 /* All pools are equal in terms of DMA direction */
5116 if (priv->page_pool[0]->p.dma_dir != dma_dir)
5117 err = mvpp2_bm_switch_buffers(priv, true);
5123 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5125 struct mvpp2_port *port = netdev_priv(dev);
5129 for_each_possible_cpu(cpu) {
5130 struct mvpp2_pcpu_stats *cpu_stats;
5136 cpu_stats = per_cpu_ptr(port->stats, cpu);
5138 start = u64_stats_fetch_begin(&cpu_stats->syncp);
5139 rx_packets = cpu_stats->rx_packets;
5140 rx_bytes = cpu_stats->rx_bytes;
5141 tx_packets = cpu_stats->tx_packets;
5142 tx_bytes = cpu_stats->tx_bytes;
5143 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
5145 stats->rx_packets += rx_packets;
5146 stats->rx_bytes += rx_bytes;
5147 stats->tx_packets += tx_packets;
5148 stats->tx_bytes += tx_bytes;
5151 stats->rx_errors = dev->stats.rx_errors;
5152 stats->rx_dropped = dev->stats.rx_dropped;
5153 stats->tx_dropped = dev->stats.tx_dropped;
5156 static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5158 struct hwtstamp_config config;
5162 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5165 if (config.tx_type != HWTSTAMP_TX_OFF &&
5166 config.tx_type != HWTSTAMP_TX_ON)
5169 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
5172 if (config.tx_type != HWTSTAMP_TX_OFF) {
5173 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
5174 int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
5175 MVPP22_PTP_INT_MASK_QUEUE0;
5178 /* It seems we must also release the TX reset when enabling the TSU */
5179 if (config.rx_filter != HWTSTAMP_FILTER_NONE)
5180 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
5181 MVPP22_PTP_GCR_TX_RESET;
5183 if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
5184 mvpp22_tai_start(port->priv->tai);
5186 if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
5187 config.rx_filter = HWTSTAMP_FILTER_ALL;
5188 mvpp2_modify(ptp + MVPP22_PTP_GCR,
5189 MVPP22_PTP_GCR_RX_RESET |
5190 MVPP22_PTP_GCR_TX_RESET |
5191 MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5192 port->rx_hwtstamp = true;
5194 port->rx_hwtstamp = false;
5195 mvpp2_modify(ptp + MVPP22_PTP_GCR,
5196 MVPP22_PTP_GCR_RX_RESET |
5197 MVPP22_PTP_GCR_TX_RESET |
5198 MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5201 mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
5202 MVPP22_PTP_INT_MASK_QUEUE1 |
5203 MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
5205 if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
5206 mvpp22_tai_stop(port->priv->tai);
5208 port->tx_hwtstamp_type = config.tx_type;
5210 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5216 static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5218 struct hwtstamp_config config;
5220 memset(&config, 0, sizeof(config));
5222 config.tx_type = port->tx_hwtstamp_type;
5223 config.rx_filter = port->rx_hwtstamp ?
5224 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
5226 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5232 static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
5233 struct ethtool_ts_info *info)
5235 struct mvpp2_port *port = netdev_priv(dev);
5237 if (!port->hwtstamp)
5240 info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
5241 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5242 SOF_TIMESTAMPING_RX_SOFTWARE |
5243 SOF_TIMESTAMPING_SOFTWARE |
5244 SOF_TIMESTAMPING_TX_HARDWARE |
5245 SOF_TIMESTAMPING_RX_HARDWARE |
5246 SOF_TIMESTAMPING_RAW_HARDWARE;
5247 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
5248 BIT(HWTSTAMP_TX_ON);
5249 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
5250 BIT(HWTSTAMP_FILTER_ALL);
5255 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5257 struct mvpp2_port *port = netdev_priv(dev);
5262 return mvpp2_set_ts_config(port, ifr);
5267 return mvpp2_get_ts_config(port, ifr);
5274 return phylink_mii_ioctl(port->phylink, ifr, cmd);
5277 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
5279 struct mvpp2_port *port = netdev_priv(dev);
5282 ret = mvpp2_prs_vid_entry_add(port, vid);
5284 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
5285 MVPP2_PRS_VLAN_FILT_MAX - 1);
5289 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
5291 struct mvpp2_port *port = netdev_priv(dev);
5293 mvpp2_prs_vid_entry_remove(port, vid);
5297 static int mvpp2_set_features(struct net_device *dev,
5298 netdev_features_t features)
5300 netdev_features_t changed = dev->features ^ features;
5301 struct mvpp2_port *port = netdev_priv(dev);
5303 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
5304 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
5305 mvpp2_prs_vid_enable_filtering(port);
5307 /* Invalidate all registered VID filters for this
5310 mvpp2_prs_vid_remove_all(port);
5312 mvpp2_prs_vid_disable_filtering(port);
5316 if (changed & NETIF_F_RXHASH) {
5317 if (features & NETIF_F_RXHASH)
5318 mvpp22_port_rss_enable(port);
5320 mvpp22_port_rss_disable(port);
5326 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
5328 struct bpf_prog *prog = bpf->prog, *old_prog;
5329 bool running = netif_running(port->dev);
5330 bool reset = !prog != !port->xdp_prog;
5332 if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
5333 NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
5337 if (!port->priv->percpu_pools) {
5338 NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
5342 if (port->ntxqs < num_possible_cpus() * 2) {
5343 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
5347 /* device is up and bpf is added/removed, must setup the RX queues */
5348 if (running && reset)
5349 mvpp2_stop(port->dev);
5351 old_prog = xchg(&port->xdp_prog, prog);
5353 bpf_prog_put(old_prog);
5355 /* bpf is just replaced, RXQ and MTU are already setup */
5359 /* device was up, restore the link */
5361 mvpp2_open(port->dev);
5363 /* Check Page Pool DMA Direction */
5364 mvpp2_check_pagepool_dma(port);
5369 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5371 struct mvpp2_port *port = netdev_priv(dev);
5373 switch (xdp->command) {
5374 case XDP_SETUP_PROG:
5375 return mvpp2_xdp_setup(port, xdp);
5381 /* Ethtool methods */
5383 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
5385 struct mvpp2_port *port = netdev_priv(dev);
5390 return phylink_ethtool_nway_reset(port->phylink);
5393 /* Set interrupt coalescing for ethtools */
5395 mvpp2_ethtool_set_coalesce(struct net_device *dev,
5396 struct ethtool_coalesce *c,
5397 struct kernel_ethtool_coalesce *kernel_coal,
5398 struct netlink_ext_ack *extack)
5400 struct mvpp2_port *port = netdev_priv(dev);
5403 for (queue = 0; queue < port->nrxqs; queue++) {
5404 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5406 rxq->time_coal = c->rx_coalesce_usecs;
5407 rxq->pkts_coal = c->rx_max_coalesced_frames;
5408 mvpp2_rx_pkts_coal_set(port, rxq);
5409 mvpp2_rx_time_coal_set(port, rxq);
5412 if (port->has_tx_irqs) {
5413 port->tx_time_coal = c->tx_coalesce_usecs;
5414 mvpp2_tx_time_coal_set(port);
5417 for (queue = 0; queue < port->ntxqs; queue++) {
5418 struct mvpp2_tx_queue *txq = port->txqs[queue];
5420 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5422 if (port->has_tx_irqs)
5423 mvpp2_tx_pkts_coal_set(port, txq);
5429 /* get coalescing for ethtools */
5431 mvpp2_ethtool_get_coalesce(struct net_device *dev,
5432 struct ethtool_coalesce *c,
5433 struct kernel_ethtool_coalesce *kernel_coal,
5434 struct netlink_ext_ack *extack)
5436 struct mvpp2_port *port = netdev_priv(dev);
5438 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5439 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5440 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5441 c->tx_coalesce_usecs = port->tx_time_coal;
5445 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5446 struct ethtool_drvinfo *drvinfo)
5448 strscpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5449 sizeof(drvinfo->driver));
5450 strscpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5451 sizeof(drvinfo->version));
5452 strscpy(drvinfo->bus_info, dev_name(&dev->dev),
5453 sizeof(drvinfo->bus_info));
5457 mvpp2_ethtool_get_ringparam(struct net_device *dev,
5458 struct ethtool_ringparam *ring,
5459 struct kernel_ethtool_ringparam *kernel_ring,
5460 struct netlink_ext_ack *extack)
5462 struct mvpp2_port *port = netdev_priv(dev);
5464 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
5465 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
5466 ring->rx_pending = port->rx_ring_size;
5467 ring->tx_pending = port->tx_ring_size;
5471 mvpp2_ethtool_set_ringparam(struct net_device *dev,
5472 struct ethtool_ringparam *ring,
5473 struct kernel_ethtool_ringparam *kernel_ring,
5474 struct netlink_ext_ack *extack)
5476 struct mvpp2_port *port = netdev_priv(dev);
5477 u16 prev_rx_ring_size = port->rx_ring_size;
5478 u16 prev_tx_ring_size = port->tx_ring_size;
5481 err = mvpp2_check_ringparam_valid(dev, ring);
5485 if (!netif_running(dev)) {
5486 port->rx_ring_size = ring->rx_pending;
5487 port->tx_ring_size = ring->tx_pending;
5491 /* The interface is running, so we have to force a
5492 * reallocation of the queues
5494 mvpp2_stop_dev(port);
5495 mvpp2_cleanup_rxqs(port);
5496 mvpp2_cleanup_txqs(port);
5498 port->rx_ring_size = ring->rx_pending;
5499 port->tx_ring_size = ring->tx_pending;
5501 err = mvpp2_setup_rxqs(port);
5503 /* Reallocate Rx queues with the original ring size */
5504 port->rx_ring_size = prev_rx_ring_size;
5505 ring->rx_pending = prev_rx_ring_size;
5506 err = mvpp2_setup_rxqs(port);
5510 err = mvpp2_setup_txqs(port);
5512 /* Reallocate Tx queues with the original ring size */
5513 port->tx_ring_size = prev_tx_ring_size;
5514 ring->tx_pending = prev_tx_ring_size;
5515 err = mvpp2_setup_txqs(port);
5517 goto err_clean_rxqs;
5520 mvpp2_start_dev(port);
5521 mvpp2_egress_enable(port);
5522 mvpp2_ingress_enable(port);
5527 mvpp2_cleanup_rxqs(port);
5529 netdev_err(dev, "failed to change ring parameters");
5533 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
5534 struct ethtool_pauseparam *pause)
5536 struct mvpp2_port *port = netdev_priv(dev);
5541 phylink_ethtool_get_pauseparam(port->phylink, pause);
5544 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
5545 struct ethtool_pauseparam *pause)
5547 struct mvpp2_port *port = netdev_priv(dev);
5552 return phylink_ethtool_set_pauseparam(port->phylink, pause);
5555 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
5556 struct ethtool_link_ksettings *cmd)
5558 struct mvpp2_port *port = netdev_priv(dev);
5563 return phylink_ethtool_ksettings_get(port->phylink, cmd);
5566 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
5567 const struct ethtool_link_ksettings *cmd)
5569 struct mvpp2_port *port = netdev_priv(dev);
5574 return phylink_ethtool_ksettings_set(port->phylink, cmd);
5577 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
5578 struct ethtool_rxnfc *info, u32 *rules)
5580 struct mvpp2_port *port = netdev_priv(dev);
5581 int ret = 0, i, loc = 0;
5583 if (!mvpp22_rss_is_supported(port))
5586 switch (info->cmd) {
5588 ret = mvpp2_ethtool_rxfh_get(port, info);
5590 case ETHTOOL_GRXRINGS:
5591 info->data = port->nrxqs;
5593 case ETHTOOL_GRXCLSRLCNT:
5594 info->rule_cnt = port->n_rfs_rules;
5596 case ETHTOOL_GRXCLSRULE:
5597 ret = mvpp2_ethtool_cls_rule_get(port, info);
5599 case ETHTOOL_GRXCLSRLALL:
5600 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
5601 if (loc == info->rule_cnt) {
5606 if (port->rfs_rules[i])
5617 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
5618 struct ethtool_rxnfc *info)
5620 struct mvpp2_port *port = netdev_priv(dev);
5623 if (!mvpp22_rss_is_supported(port))
5626 switch (info->cmd) {
5628 ret = mvpp2_ethtool_rxfh_set(port, info);
5630 case ETHTOOL_SRXCLSRLINS:
5631 ret = mvpp2_ethtool_cls_rule_ins(port, info);
5633 case ETHTOOL_SRXCLSRLDEL:
5634 ret = mvpp2_ethtool_cls_rule_del(port, info);
5642 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
5644 struct mvpp2_port *port = netdev_priv(dev);
5646 return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
5649 static int mvpp2_ethtool_get_rxfh(struct net_device *dev,
5650 struct ethtool_rxfh_param *rxfh)
5652 struct mvpp2_port *port = netdev_priv(dev);
5653 u32 rss_context = rxfh->rss_context;
5656 if (!mvpp22_rss_is_supported(port))
5658 if (rss_context >= MVPP22_N_RSS_TABLES)
5661 rxfh->hfunc = ETH_RSS_HASH_CRC32;
5664 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context,
5670 static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
5671 struct ethtool_rxfh_param *rxfh,
5672 struct netlink_ext_ack *extack)
5674 struct mvpp2_port *port = netdev_priv(dev);
5675 u32 *rss_context = &rxfh->rss_context;
5678 if (!mvpp22_rss_is_supported(port))
5681 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
5682 rxfh->hfunc != ETH_RSS_HASH_CRC32)
5688 if (*rss_context && rxfh->rss_delete)
5689 return mvpp22_port_rss_ctx_delete(port, *rss_context);
5691 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
5692 ret = mvpp22_port_rss_ctx_create(port, rss_context);
5698 ret = mvpp22_port_rss_ctx_indir_set(port, *rss_context,
5706 static const struct net_device_ops mvpp2_netdev_ops = {
5707 .ndo_open = mvpp2_open,
5708 .ndo_stop = mvpp2_stop,
5709 .ndo_start_xmit = mvpp2_tx,
5710 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5711 .ndo_set_mac_address = mvpp2_set_mac_address,
5712 .ndo_change_mtu = mvpp2_change_mtu,
5713 .ndo_get_stats64 = mvpp2_get_stats64,
5714 .ndo_eth_ioctl = mvpp2_ioctl,
5715 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
5716 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
5717 .ndo_set_features = mvpp2_set_features,
5718 .ndo_bpf = mvpp2_xdp,
5719 .ndo_xdp_xmit = mvpp2_xdp_xmit,
5722 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5723 .cap_rss_ctx_supported = true,
5724 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5725 ETHTOOL_COALESCE_MAX_FRAMES,
5726 .nway_reset = mvpp2_ethtool_nway_reset,
5727 .get_link = ethtool_op_get_link,
5728 .get_ts_info = mvpp2_ethtool_get_ts_info,
5729 .set_coalesce = mvpp2_ethtool_set_coalesce,
5730 .get_coalesce = mvpp2_ethtool_get_coalesce,
5731 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5732 .get_ringparam = mvpp2_ethtool_get_ringparam,
5733 .set_ringparam = mvpp2_ethtool_set_ringparam,
5734 .get_strings = mvpp2_ethtool_get_strings,
5735 .get_ethtool_stats = mvpp2_ethtool_get_stats,
5736 .get_sset_count = mvpp2_ethtool_get_sset_count,
5737 .get_pauseparam = mvpp2_ethtool_get_pause_param,
5738 .set_pauseparam = mvpp2_ethtool_set_pause_param,
5739 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
5740 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
5741 .get_rxnfc = mvpp2_ethtool_get_rxnfc,
5742 .set_rxnfc = mvpp2_ethtool_set_rxnfc,
5743 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
5744 .get_rxfh = mvpp2_ethtool_get_rxfh,
5745 .set_rxfh = mvpp2_ethtool_set_rxfh,
5748 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
5749 * had a single IRQ defined per-port.
5751 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
5752 struct device_node *port_node)
5754 struct mvpp2_queue_vector *v = &port->qvecs[0];
5757 v->nrxqs = port->nrxqs;
5758 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5759 v->sw_thread_id = 0;
5760 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
5762 v->irq = irq_of_parse_and_map(port_node, 0);
5765 netif_napi_add(port->dev, &v->napi, mvpp2_poll);
5772 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
5773 struct device_node *port_node)
5775 struct mvpp2 *priv = port->priv;
5776 struct mvpp2_queue_vector *v;
5779 switch (queue_mode) {
5780 case MVPP2_QDIST_SINGLE_MODE:
5781 port->nqvecs = priv->nthreads + 1;
5783 case MVPP2_QDIST_MULTI_MODE:
5784 port->nqvecs = priv->nthreads;
5788 for (i = 0; i < port->nqvecs; i++) {
5791 v = port->qvecs + i;
5794 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
5795 v->sw_thread_id = i;
5796 v->sw_thread_mask = BIT(i);
5798 if (port->flags & MVPP2_F_DT_COMPAT)
5799 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5801 snprintf(irqname, sizeof(irqname), "hif%d", i);
5803 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
5806 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
5807 i == (port->nqvecs - 1)) {
5809 v->nrxqs = port->nrxqs;
5810 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5812 if (port->flags & MVPP2_F_DT_COMPAT)
5813 strscpy(irqname, "rx-shared", sizeof(irqname));
5817 v->irq = of_irq_get_byname(port_node, irqname);
5819 v->irq = fwnode_irq_get(port->fwnode, i);
5825 netif_napi_add(port->dev, &v->napi, mvpp2_poll);
5831 for (i = 0; i < port->nqvecs; i++)
5832 irq_dispose_mapping(port->qvecs[i].irq);
5836 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
5837 struct device_node *port_node)
5839 if (port->has_tx_irqs)
5840 return mvpp2_multi_queue_vectors_init(port, port_node);
5842 return mvpp2_simple_queue_vectors_init(port, port_node);
5845 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
5849 for (i = 0; i < port->nqvecs; i++)
5850 irq_dispose_mapping(port->qvecs[i].irq);
5853 /* Configure Rx queue group interrupt for this port */
5854 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
5856 struct mvpp2 *priv = port->priv;
5860 if (priv->hw_version == MVPP21) {
5861 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
5866 /* Handle the more complicated PPv2.2 and PPv2.3 case */
5867 for (i = 0; i < port->nqvecs; i++) {
5868 struct mvpp2_queue_vector *qv = port->qvecs + i;
5873 val = qv->sw_thread_id;
5874 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
5875 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
5877 val = qv->first_rxq;
5878 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
5879 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
5883 /* Initialize port HW */
5884 static int mvpp2_port_init(struct mvpp2_port *port)
5886 struct device *dev = port->dev->dev.parent;
5887 struct mvpp2 *priv = port->priv;
5888 struct mvpp2_txq_pcpu *txq_pcpu;
5889 unsigned int thread;
5890 int queue, err, val;
5892 /* Checks for hardware constraints */
5893 if (port->first_rxq + port->nrxqs >
5894 MVPP2_MAX_PORTS * priv->max_port_rxqs)
5897 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
5901 mvpp2_egress_disable(port);
5902 mvpp2_port_disable(port);
5904 if (mvpp2_is_xlg(port->phy_interface)) {
5905 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5906 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5907 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5908 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5910 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5911 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5912 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5913 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5916 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
5918 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
5923 /* Associate physical Tx queues to this port and initialize.
5924 * The mapping is predefined.
5926 for (queue = 0; queue < port->ntxqs; queue++) {
5927 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5928 struct mvpp2_tx_queue *txq;
5930 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5933 goto err_free_percpu;
5936 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5939 goto err_free_percpu;
5942 txq->id = queue_phy_id;
5943 txq->log_id = queue;
5944 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5945 for (thread = 0; thread < priv->nthreads; thread++) {
5946 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
5947 txq_pcpu->thread = thread;
5950 port->txqs[queue] = txq;
5953 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
5957 goto err_free_percpu;
5960 /* Allocate and initialize Rx queue for this port */
5961 for (queue = 0; queue < port->nrxqs; queue++) {
5962 struct mvpp2_rx_queue *rxq;
5964 /* Map physical Rx queue to port's logical Rx queue */
5965 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5968 goto err_free_percpu;
5970 /* Map this Rx queue to a physical queue */
5971 rxq->id = port->first_rxq + queue;
5972 rxq->port = port->id;
5973 rxq->logic_rxq = queue;
5975 port->rxqs[queue] = rxq;
5978 mvpp2_rx_irqs_setup(port);
5980 /* Create Rx descriptor rings */
5981 for (queue = 0; queue < port->nrxqs; queue++) {
5982 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5984 rxq->size = port->rx_ring_size;
5985 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
5986 rxq->time_coal = MVPP2_RX_COAL_USEC;
5989 mvpp2_ingress_disable(port);
5991 /* Port default configuration */
5992 mvpp2_defaults_set(port);
5994 /* Port's classifier configuration */
5995 mvpp2_cls_oversize_rxq_set(port);
5996 mvpp2_cls_port_config(port);
5998 if (mvpp22_rss_is_supported(port))
5999 mvpp22_port_rss_init(port);
6001 /* Provide an initial Rx packet size */
6002 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6004 /* Initialize pools for swf */
6005 err = mvpp2_swf_bm_pool_init(port);
6007 goto err_free_percpu;
6009 /* Clear all port stats */
6010 mvpp2_read_stats(port);
6011 memset(port->ethtool_stats, 0,
6012 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
6017 for (queue = 0; queue < port->ntxqs; queue++) {
6018 if (!port->txqs[queue])
6020 free_percpu(port->txqs[queue]->pcpu);
6025 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
6026 unsigned long *flags)
6028 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
6032 for (i = 0; i < 5; i++)
6033 if (of_property_match_string(port_node, "interrupt-names",
6037 *flags |= MVPP2_F_DT_COMPAT;
6041 /* Checks if the port dt description has the required Tx interrupts:
6042 * - PPv2.1: there are no such interrupts.
6043 * - PPv2.2 and PPv2.3:
6044 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
6045 * - The new ones have: "hifX" with X in [0..8]
6047 * All those variants are supported to keep the backward compatibility.
6049 static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
6050 struct device_node *port_node,
6051 unsigned long *flags)
6060 if (priv->hw_version == MVPP21)
6063 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
6066 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6067 snprintf(name, 5, "hif%d", i);
6068 if (of_property_match_string(port_node, "interrupt-names",
6076 static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
6077 struct fwnode_handle *fwnode,
6080 struct mvpp2_port *port = netdev_priv(dev);
6081 char hw_mac_addr[ETH_ALEN] = {0};
6082 char fw_mac_addr[ETH_ALEN];
6085 if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
6086 *mac_from = "firmware node";
6087 eth_hw_addr_set(dev, fw_mac_addr);
6091 if (priv->hw_version == MVPP21) {
6092 mvpp21_get_mac_address(port, hw_mac_addr);
6093 if (is_valid_ether_addr(hw_mac_addr)) {
6094 *mac_from = "hardware";
6095 eth_hw_addr_set(dev, hw_mac_addr);
6100 /* Only valid on OF enabled platforms */
6101 ret = of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr);
6102 if (ret == -EPROBE_DEFER)
6105 *mac_from = "nvmem cell";
6106 eth_hw_addr_set(dev, fw_mac_addr);
6110 *mac_from = "random";
6111 eth_hw_addr_random(dev);
6116 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
6118 return container_of(config, struct mvpp2_port, phylink_config);
6121 static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs)
6123 return container_of(pcs, struct mvpp2_port, pcs_xlg);
6126 static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
6128 return container_of(pcs, struct mvpp2_port, pcs_gmac);
6131 static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
6132 struct phylink_link_state *state)
6134 struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
6137 if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER)
6138 state->speed = SPEED_5000;
6140 state->speed = SPEED_10000;
6142 state->an_complete = 1;
6144 val = readl(port->base + MVPP22_XLG_STATUS);
6145 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
6148 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6149 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
6150 state->pause |= MLO_PAUSE_TX;
6151 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
6152 state->pause |= MLO_PAUSE_RX;
6155 static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
6156 phy_interface_t interface,
6157 const unsigned long *advertising,
6158 bool permit_pause_to_mac)
6163 static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
6164 .pcs_get_state = mvpp2_xlg_pcs_get_state,
6165 .pcs_config = mvpp2_xlg_pcs_config,
6168 static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
6169 unsigned long *supported,
6170 const struct phylink_link_state *state)
6172 /* When in 802.3z mode, we must have AN enabled:
6173 * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
6174 * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
6176 if (phy_interface_mode_is_8023z(state->interface) &&
6177 !phylink_test(state->advertising, Autoneg))
6183 static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
6184 struct phylink_link_state *state)
6186 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6189 val = readl(port->base + MVPP2_GMAC_STATUS0);
6191 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
6192 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
6193 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
6195 switch (port->phy_interface) {
6196 case PHY_INTERFACE_MODE_1000BASEX:
6197 state->speed = SPEED_1000;
6199 case PHY_INTERFACE_MODE_2500BASEX:
6200 state->speed = SPEED_2500;
6203 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
6204 state->speed = SPEED_1000;
6205 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
6206 state->speed = SPEED_100;
6208 state->speed = SPEED_10;
6212 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
6213 state->pause |= MLO_PAUSE_RX;
6214 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
6215 state->pause |= MLO_PAUSE_TX;
6218 static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
6219 phy_interface_t interface,
6220 const unsigned long *advertising,
6221 bool permit_pause_to_mac)
6223 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6224 u32 mask, val, an, old_an, changed;
6226 mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
6227 MVPP2_GMAC_IN_BAND_AUTONEG |
6228 MVPP2_GMAC_AN_SPEED_EN |
6229 MVPP2_GMAC_FLOW_CTRL_AUTONEG |
6230 MVPP2_GMAC_AN_DUPLEX_EN;
6232 if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) {
6233 mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
6234 MVPP2_GMAC_CONFIG_GMII_SPEED |
6235 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6236 val = MVPP2_GMAC_IN_BAND_AUTONEG;
6238 if (interface == PHY_INTERFACE_MODE_SGMII) {
6239 /* SGMII mode receives the speed and duplex from PHY */
6240 val |= MVPP2_GMAC_AN_SPEED_EN |
6241 MVPP2_GMAC_AN_DUPLEX_EN;
6243 /* 802.3z mode has fixed speed and duplex */
6244 val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
6245 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6247 /* The FLOW_CTRL_AUTONEG bit selects either the hardware
6248 * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
6249 * manually controls the GMAC pause modes.
6251 if (permit_pause_to_mac)
6252 val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
6254 /* Configure advertisement bits */
6255 mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
6256 if (phylink_test(advertising, Pause))
6257 val |= MVPP2_GMAC_FC_ADV_EN;
6258 if (phylink_test(advertising, Asym_Pause))
6259 val |= MVPP2_GMAC_FC_ADV_ASM_EN;
6265 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6266 an = (an & ~mask) | val;
6267 changed = an ^ old_an;
6269 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6271 /* We are only interested in the advertisement bits changing */
6272 return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
6275 static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
6277 struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
6278 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6280 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
6281 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6282 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
6283 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6286 static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
6287 .pcs_validate = mvpp2_gmac_pcs_validate,
6288 .pcs_get_state = mvpp2_gmac_pcs_get_state,
6289 .pcs_config = mvpp2_gmac_pcs_config,
6290 .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
6293 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
6294 const struct phylink_link_state *state)
6298 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6299 MVPP22_XLG_CTRL0_MAC_RESET_DIS,
6300 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
6301 mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
6302 MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
6303 MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
6304 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
6305 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
6307 /* Wait for reset to deassert */
6309 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6310 } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
6313 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
6314 const struct phylink_link_state *state)
6316 u32 old_ctrl0, ctrl0;
6317 u32 old_ctrl2, ctrl2;
6318 u32 old_ctrl4, ctrl4;
6320 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
6321 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
6322 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
6324 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
6325 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK);
6327 /* Configure port type */
6328 if (phy_interface_mode_is_8023z(state->interface)) {
6329 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
6330 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6331 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6332 MVPP22_CTRL4_DP_CLK_SEL |
6333 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6334 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6335 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
6336 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6337 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6338 MVPP22_CTRL4_DP_CLK_SEL |
6339 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6340 } else if (phy_interface_mode_is_rgmii(state->interface)) {
6341 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
6342 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
6343 MVPP22_CTRL4_SYNC_BYPASS_DIS |
6344 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6347 /* Configure negotiation style */
6348 if (!phylink_autoneg_inband(mode)) {
6349 /* Phy or fixed speed - no in-band AN, nothing to do, leave the
6350 * configured speed, duplex and flow control as-is.
6352 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6353 /* SGMII in-band mode receives the speed and duplex from
6354 * the PHY. Flow control information is not received. */
6355 } else if (phy_interface_mode_is_8023z(state->interface)) {
6356 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
6357 * they negotiate duplex: they are always operating with a fixed
6358 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
6359 * speed and full duplex here.
6361 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
6364 if (old_ctrl0 != ctrl0)
6365 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
6366 if (old_ctrl2 != ctrl2)
6367 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
6368 if (old_ctrl4 != ctrl4)
6369 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
6372 static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config,
6373 phy_interface_t interface)
6375 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6377 /* Select the appropriate PCS operations depending on the
6378 * configured interface mode. We will only switch to a mode
6379 * that the validate() checks have already passed.
6381 if (mvpp2_is_xlg(interface))
6382 return &port->pcs_xlg;
6384 return &port->pcs_gmac;
6387 static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
6388 phy_interface_t interface)
6390 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6392 /* Check for invalid configuration */
6393 if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
6394 netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
6398 if (port->phy_interface != interface ||
6399 phylink_autoneg_inband(mode)) {
6400 /* Force the link down when changing the interface or if in
6401 * in-band mode to ensure we do not change the configuration
6402 * while the hardware is indicating link is up. We force both
6403 * XLG and GMAC down to ensure that they're both in a known
6406 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6407 MVPP2_GMAC_FORCE_LINK_PASS |
6408 MVPP2_GMAC_FORCE_LINK_DOWN,
6409 MVPP2_GMAC_FORCE_LINK_DOWN);
6411 if (mvpp2_port_supports_xlg(port))
6412 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6413 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6414 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
6415 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
6418 /* Make sure the port is disabled when reconfiguring the mode */
6419 mvpp2_port_disable(port);
6421 if (port->phy_interface != interface) {
6422 /* Place GMAC into reset */
6423 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6424 MVPP2_GMAC_PORT_RESET_MASK,
6425 MVPP2_GMAC_PORT_RESET_MASK);
6427 if (port->priv->hw_version >= MVPP22) {
6428 mvpp22_gop_mask_irq(port);
6430 phy_power_off(port->comphy);
6432 /* Reconfigure the serdes lanes */
6433 mvpp22_mode_reconfigure(port, interface);
6440 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
6441 const struct phylink_link_state *state)
6443 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6445 /* mac (re)configuration */
6446 if (mvpp2_is_xlg(state->interface))
6447 mvpp2_xlg_config(port, mode, state);
6448 else if (phy_interface_mode_is_rgmii(state->interface) ||
6449 phy_interface_mode_is_8023z(state->interface) ||
6450 state->interface == PHY_INTERFACE_MODE_SGMII)
6451 mvpp2_gmac_config(port, mode, state);
6453 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
6454 mvpp2_port_loopback_set(port, state);
6457 static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
6458 phy_interface_t interface)
6460 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6462 if (port->priv->hw_version >= MVPP22 &&
6463 port->phy_interface != interface) {
6464 port->phy_interface = interface;
6466 /* Unmask interrupts */
6467 mvpp22_gop_unmask_irq(port);
6470 if (!mvpp2_is_xlg(interface)) {
6471 /* Release GMAC reset and wait */
6472 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6473 MVPP2_GMAC_PORT_RESET_MASK, 0);
6475 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
6476 MVPP2_GMAC_PORT_RESET_MASK)
6480 mvpp2_port_enable(port);
6482 /* Allow the link to come up if in in-band mode, otherwise the
6483 * link is forced via mac_link_down()/mac_link_up()
6485 if (phylink_autoneg_inband(mode)) {
6486 if (mvpp2_is_xlg(interface))
6487 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6488 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6489 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
6491 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6492 MVPP2_GMAC_FORCE_LINK_PASS |
6493 MVPP2_GMAC_FORCE_LINK_DOWN, 0);
6499 static void mvpp2_mac_link_up(struct phylink_config *config,
6500 struct phy_device *phy,
6501 unsigned int mode, phy_interface_t interface,
6502 int speed, int duplex,
6503 bool tx_pause, bool rx_pause)
6505 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6509 if (mvpp2_is_xlg(interface)) {
6510 if (!phylink_autoneg_inband(mode)) {
6511 val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6513 val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
6515 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
6517 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6518 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
6519 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6520 MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
6521 MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
6524 if (!phylink_autoneg_inband(mode)) {
6525 val = MVPP2_GMAC_FORCE_LINK_PASS;
6527 if (speed == SPEED_1000 || speed == SPEED_2500)
6528 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6529 else if (speed == SPEED_100)
6530 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6532 if (duplex == DUPLEX_FULL)
6533 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6535 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6536 MVPP2_GMAC_FORCE_LINK_DOWN |
6537 MVPP2_GMAC_FORCE_LINK_PASS |
6538 MVPP2_GMAC_CONFIG_MII_SPEED |
6539 MVPP2_GMAC_CONFIG_GMII_SPEED |
6540 MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
6543 /* We can always update the flow control enable bits;
6544 * these will only be effective if flow control AN
6545 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
6549 val |= MVPP22_CTRL4_TX_FC_EN;
6551 val |= MVPP22_CTRL4_RX_FC_EN;
6553 mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
6554 MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
6558 if (port->priv->global_tx_fc) {
6559 port->tx_fc = tx_pause;
6561 mvpp2_rxq_enable_fc(port);
6563 mvpp2_rxq_disable_fc(port);
6564 if (port->priv->percpu_pools) {
6565 for (i = 0; i < port->nrxqs; i++)
6566 mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause);
6568 mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause);
6569 mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause);
6571 if (port->priv->hw_version == MVPP23)
6572 mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause);
6575 mvpp2_port_enable(port);
6577 mvpp2_egress_enable(port);
6578 mvpp2_ingress_enable(port);
6579 netif_tx_wake_all_queues(port->dev);
6582 static void mvpp2_mac_link_down(struct phylink_config *config,
6583 unsigned int mode, phy_interface_t interface)
6585 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6588 if (!phylink_autoneg_inband(mode)) {
6589 if (mvpp2_is_xlg(interface)) {
6590 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6591 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6592 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
6593 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
6595 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6596 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
6597 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
6598 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6602 netif_tx_stop_all_queues(port->dev);
6603 mvpp2_egress_disable(port);
6604 mvpp2_ingress_disable(port);
6606 mvpp2_port_disable(port);
6609 static const struct phylink_mac_ops mvpp2_phylink_ops = {
6610 .mac_select_pcs = mvpp2_select_pcs,
6611 .mac_prepare = mvpp2_mac_prepare,
6612 .mac_config = mvpp2_mac_config,
6613 .mac_finish = mvpp2_mac_finish,
6614 .mac_link_up = mvpp2_mac_link_up,
6615 .mac_link_down = mvpp2_mac_link_down,
6618 /* Work-around for ACPI */
6619 static void mvpp2_acpi_start(struct mvpp2_port *port)
6621 /* Phylink isn't used as of now for ACPI, so the MAC has to be
6622 * configured manually when the interface is started. This will
6623 * be removed as soon as the phylink ACPI support lands in.
6625 struct phylink_link_state state = {
6626 .interface = port->phy_interface,
6628 struct phylink_pcs *pcs;
6630 pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface);
6632 mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND,
6633 port->phy_interface);
6634 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
6635 pcs->ops->pcs_config(pcs, PHYLINK_PCS_NEG_INBAND_ENABLED,
6636 port->phy_interface, state.advertising,
6638 mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
6639 port->phy_interface);
6640 mvpp2_mac_link_up(&port->phylink_config, NULL,
6641 MLO_AN_INBAND, port->phy_interface,
6642 SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
6645 /* In order to ensure backward compatibility for ACPI, check if the port
6646 * firmware node comprises the necessary description allowing to use phylink.
6648 static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode)
6650 if (!is_acpi_node(port_fwnode))
6653 return (!fwnode_property_present(port_fwnode, "phy-handle") &&
6654 !fwnode_property_present(port_fwnode, "managed") &&
6655 !fwnode_get_named_child_node(port_fwnode, "fixed-link"));
6658 /* Ports initialization */
6659 static int mvpp2_port_probe(struct platform_device *pdev,
6660 struct fwnode_handle *port_fwnode,
6663 struct phy *comphy = NULL;
6664 struct mvpp2_port *port;
6665 struct mvpp2_port_pcpu *port_pcpu;
6666 struct device_node *port_node = to_of_node(port_fwnode);
6667 netdev_features_t features;
6668 struct net_device *dev;
6669 struct phylink *phylink;
6670 char *mac_from = "";
6671 unsigned int ntxqs, nrxqs, thread;
6672 unsigned long flags = 0;
6678 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
6679 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
6681 "not enough IRQs to support multi queue mode\n");
6685 ntxqs = MVPP2_MAX_TXQ;
6686 nrxqs = mvpp2_get_nrxqs(priv);
6688 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
6692 phy_mode = fwnode_get_phy_mode(port_fwnode);
6694 dev_err(&pdev->dev, "incorrect phy mode\n");
6696 goto err_free_netdev;
6700 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
6701 * Existing usage of 10GBASE-KR is not correct; no backplane
6702 * negotiation is done, and this driver does not actually support
6705 if (phy_mode == PHY_INTERFACE_MODE_10GKR)
6706 phy_mode = PHY_INTERFACE_MODE_10GBASER;
6709 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
6710 if (IS_ERR(comphy)) {
6711 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
6712 err = -EPROBE_DEFER;
6713 goto err_free_netdev;
6719 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
6721 dev_err(&pdev->dev, "missing port-id value\n");
6722 goto err_free_netdev;
6725 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
6726 dev->watchdog_timeo = 5 * HZ;
6727 dev->netdev_ops = &mvpp2_netdev_ops;
6728 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6730 port = netdev_priv(dev);
6732 port->fwnode = port_fwnode;
6733 port->ntxqs = ntxqs;
6734 port->nrxqs = nrxqs;
6736 port->has_tx_irqs = has_tx_irqs;
6737 port->flags = flags;
6739 err = mvpp2_queue_vectors_init(port, port_node);
6741 goto err_free_netdev;
6744 port->port_irq = of_irq_get_byname(port_node, "link");
6746 port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
6747 if (port->port_irq == -EPROBE_DEFER) {
6748 err = -EPROBE_DEFER;
6749 goto err_deinit_qvecs;
6751 if (port->port_irq <= 0)
6752 /* the link irq is optional */
6755 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
6756 port->flags |= MVPP2_F_LOOPBACK;
6759 if (priv->hw_version == MVPP21)
6760 port->first_rxq = port->id * port->nrxqs;
6762 port->first_rxq = port->id * priv->max_port_rxqs;
6764 port->of_node = port_node;
6765 port->phy_interface = phy_mode;
6766 port->comphy = comphy;
6768 if (priv->hw_version == MVPP21) {
6769 port->base = devm_platform_ioremap_resource(pdev, 2 + id);
6770 if (IS_ERR(port->base)) {
6771 err = PTR_ERR(port->base);
6775 port->stats_base = port->priv->lms_base +
6776 MVPP21_MIB_COUNTERS_OFFSET +
6777 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
6779 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
6782 dev_err(&pdev->dev, "missing gop-port-id value\n");
6783 goto err_deinit_qvecs;
6786 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6787 port->stats_base = port->priv->iface_base +
6788 MVPP22_MIB_COUNTERS_OFFSET +
6789 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
6791 /* We may want a property to describe whether we should use
6792 * MAC hardware timestamping.
6795 port->hwtstamp = true;
6798 /* Alloc per-cpu and ethtool stats */
6799 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6805 port->ethtool_stats = devm_kcalloc(&pdev->dev,
6806 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
6807 sizeof(u64), GFP_KERNEL);
6808 if (!port->ethtool_stats) {
6810 goto err_free_stats;
6813 mutex_init(&port->gather_stats_lock);
6814 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
6816 err = mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
6818 goto err_free_stats;
6820 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
6821 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
6822 SET_NETDEV_DEV(dev, &pdev->dev);
6824 err = mvpp2_port_init(port);
6826 dev_err(&pdev->dev, "failed to init port %d\n", id);
6827 goto err_free_stats;
6830 mvpp2_port_periodic_xon_disable(port);
6832 mvpp2_mac_reset_assert(port);
6833 mvpp22_pcs_reset_assert(port);
6835 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6838 goto err_free_txq_pcpu;
6841 if (!port->has_tx_irqs) {
6842 for (thread = 0; thread < priv->nthreads; thread++) {
6843 port_pcpu = per_cpu_ptr(port->pcpu, thread);
6845 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6846 HRTIMER_MODE_REL_PINNED_SOFT);
6847 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6848 port_pcpu->timer_scheduled = false;
6849 port_pcpu->dev = dev;
6853 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6855 dev->features = features | NETIF_F_RXCSUM;
6856 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
6857 NETIF_F_HW_VLAN_CTAG_FILTER;
6859 if (mvpp22_rss_is_supported(port)) {
6860 dev->hw_features |= NETIF_F_RXHASH;
6861 dev->features |= NETIF_F_NTUPLE;
6864 if (!port->priv->percpu_pools)
6865 mvpp2_set_hw_csum(port, port->pool_long->id);
6866 else if (port->ntxqs >= num_possible_cpus() * 2)
6867 dev->xdp_features = NETDEV_XDP_ACT_BASIC |
6868 NETDEV_XDP_ACT_REDIRECT |
6869 NETDEV_XDP_ACT_NDO_XMIT;
6871 dev->vlan_features |= features;
6872 netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
6874 dev->priv_flags |= IFF_UNICAST_FLT;
6876 /* MTU range: 68 - 9704 */
6877 dev->min_mtu = ETH_MIN_MTU;
6878 /* 9704 == 9728 - 20 and rounding to 8 */
6879 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
6880 device_set_node(&dev->dev, port_fwnode);
6882 port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
6883 port->pcs_gmac.neg_mode = true;
6884 port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
6885 port->pcs_xlg.neg_mode = true;
6887 if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
6888 port->phylink_config.dev = &dev->dev;
6889 port->phylink_config.type = PHYLINK_NETDEV;
6890 port->phylink_config.mac_capabilities =
6891 MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
6893 if (port->priv->global_tx_fc)
6894 port->phylink_config.mac_capabilities |=
6895 MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
6897 if (mvpp2_port_supports_xlg(port)) {
6898 /* If a COMPHY is present, we can support any of
6899 * the serdes modes and switch between them.
6902 __set_bit(PHY_INTERFACE_MODE_5GBASER,
6903 port->phylink_config.supported_interfaces);
6904 __set_bit(PHY_INTERFACE_MODE_10GBASER,
6905 port->phylink_config.supported_interfaces);
6906 __set_bit(PHY_INTERFACE_MODE_XAUI,
6907 port->phylink_config.supported_interfaces);
6908 } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) {
6909 __set_bit(PHY_INTERFACE_MODE_5GBASER,
6910 port->phylink_config.supported_interfaces);
6911 } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) {
6912 __set_bit(PHY_INTERFACE_MODE_10GBASER,
6913 port->phylink_config.supported_interfaces);
6914 } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) {
6915 __set_bit(PHY_INTERFACE_MODE_XAUI,
6916 port->phylink_config.supported_interfaces);
6920 port->phylink_config.mac_capabilities |=
6921 MAC_10000FD | MAC_5000FD;
6922 else if (phy_mode == PHY_INTERFACE_MODE_5GBASER)
6923 port->phylink_config.mac_capabilities |=
6926 port->phylink_config.mac_capabilities |=
6930 if (mvpp2_port_supports_rgmii(port)) {
6931 phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
6932 __set_bit(PHY_INTERFACE_MODE_MII,
6933 port->phylink_config.supported_interfaces);
6937 /* If a COMPHY is present, we can support any of the
6938 * serdes modes and switch between them.
6940 __set_bit(PHY_INTERFACE_MODE_SGMII,
6941 port->phylink_config.supported_interfaces);
6942 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
6943 port->phylink_config.supported_interfaces);
6944 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
6945 port->phylink_config.supported_interfaces);
6946 } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
6947 /* No COMPHY, with only 2500BASE-X mode supported */
6948 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
6949 port->phylink_config.supported_interfaces);
6950 } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
6951 phy_mode == PHY_INTERFACE_MODE_SGMII) {
6952 /* No COMPHY, we can switch between 1000BASE-X and SGMII
6954 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
6955 port->phylink_config.supported_interfaces);
6956 __set_bit(PHY_INTERFACE_MODE_SGMII,
6957 port->phylink_config.supported_interfaces);
6960 phylink = phylink_create(&port->phylink_config, port_fwnode,
6961 phy_mode, &mvpp2_phylink_ops);
6962 if (IS_ERR(phylink)) {
6963 err = PTR_ERR(phylink);
6964 goto err_free_port_pcpu;
6966 port->phylink = phylink;
6968 dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
6969 port->phylink = NULL;
6972 /* Cycle the comphy to power it down, saving 270mW per port -
6973 * don't worry about an error powering it up. When the comphy
6974 * driver does this, we can remove this code.
6977 err = mvpp22_comphy_init(port, port->phy_interface);
6979 phy_power_off(port->comphy);
6982 err = register_netdev(dev);
6984 dev_err(&pdev->dev, "failed to register netdev\n");
6987 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6989 priv->port_list[priv->port_count++] = port;
6995 phylink_destroy(port->phylink);
6997 free_percpu(port->pcpu);
6999 for (i = 0; i < port->ntxqs; i++)
7000 free_percpu(port->txqs[i]->pcpu);
7002 free_percpu(port->stats);
7005 irq_dispose_mapping(port->port_irq);
7007 mvpp2_queue_vectors_deinit(port);
7013 /* Ports removal routine */
7014 static void mvpp2_port_remove(struct mvpp2_port *port)
7018 unregister_netdev(port->dev);
7020 phylink_destroy(port->phylink);
7021 free_percpu(port->pcpu);
7022 free_percpu(port->stats);
7023 for (i = 0; i < port->ntxqs; i++)
7024 free_percpu(port->txqs[i]->pcpu);
7025 mvpp2_queue_vectors_deinit(port);
7027 irq_dispose_mapping(port->port_irq);
7028 free_netdev(port->dev);
7031 /* Initialize decoding windows */
7032 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7038 for (i = 0; i < 6; i++) {
7039 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7040 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7043 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7048 for (i = 0; i < dram->num_cs; i++) {
7049 const struct mbus_dram_window *cs = dram->cs + i;
7051 mvpp2_write(priv, MVPP2_WIN_BASE(i),
7052 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7053 dram->mbus_dram_target_id);
7055 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7056 (cs->size - 1) & 0xffff0000);
7058 win_enable |= (1 << i);
7061 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7064 /* Initialize Rx FIFO's */
7065 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7069 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7070 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7071 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
7072 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7073 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
7076 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7077 MVPP2_RX_FIFO_PORT_MIN_PKT);
7078 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7081 static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
7083 int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size);
7085 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size);
7086 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
7089 /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
7090 * 4kB fixed space must be assigned for the loopback port.
7091 * Redistribute remaining avialable 44kB space among all active ports.
7092 * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
7095 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
7097 int remaining_ports_count;
7098 unsigned long port_map;
7102 /* The loopback requires fixed 4kB of the FIFO space assignment. */
7103 mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
7104 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
7105 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
7107 /* Set RX FIFO size to 0 for inactive ports. */
7108 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
7109 mvpp22_rx_fifo_set_hw(priv, port, 0);
7111 /* Assign remaining RX FIFO space among all active ports. */
7112 size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
7113 remaining_ports_count = hweight_long(port_map);
7115 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
7116 if (remaining_ports_count == 1)
7117 size = size_remainder;
7119 size = max(size_remainder / remaining_ports_count,
7120 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
7122 size = max(size_remainder / remaining_ports_count,
7123 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
7125 size = size_remainder / remaining_ports_count;
7127 size_remainder -= size;
7128 remaining_ports_count--;
7130 mvpp22_rx_fifo_set_hw(priv, port, size);
7133 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7134 MVPP2_RX_FIFO_PORT_MIN_PKT);
7135 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7138 /* Configure Rx FIFO Flow control thresholds */
7139 static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
7143 /* Port 0: maximum speed -10Gb/s port
7144 * required by spec RX FIFO threshold 9KB
7145 * Port 1: maximum speed -5Gb/s port
7146 * required by spec RX FIFO threshold 4KB
7147 * Port 2: maximum speed -1Gb/s port
7148 * required by spec RX FIFO threshold 2KB
7151 /* Without loopback port */
7152 for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
7154 val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7155 << MVPP2_RX_FC_TRSH_OFFS;
7156 val &= MVPP2_RX_FC_TRSH_MASK;
7157 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7158 } else if (port == 1) {
7159 val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7160 << MVPP2_RX_FC_TRSH_OFFS;
7161 val &= MVPP2_RX_FC_TRSH_MASK;
7162 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7164 val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7165 << MVPP2_RX_FC_TRSH_OFFS;
7166 val &= MVPP2_RX_FC_TRSH_MASK;
7167 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7172 /* Configure Rx FIFO Flow control thresholds */
7173 void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
7177 val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
7180 val |= MVPP2_RX_FC_EN;
7182 val &= ~MVPP2_RX_FC_EN;
7184 mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7187 static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
7189 int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
7191 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
7192 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
7195 /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
7196 * 1kB fixed space must be assigned for the loopback port.
7197 * Redistribute remaining avialable 18kB space among all active ports.
7198 * The 10G interface should use 10kB (which is maximum possible size
7201 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
7203 int remaining_ports_count;
7204 unsigned long port_map;
7208 /* The loopback requires fixed 1kB of the FIFO space assignment. */
7209 mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
7210 MVPP22_TX_FIFO_DATA_SIZE_1KB);
7211 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
7213 /* Set TX FIFO size to 0 for inactive ports. */
7214 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
7215 mvpp22_tx_fifo_set_hw(priv, port, 0);
7217 /* Assign remaining TX FIFO space among all active ports. */
7218 size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
7219 remaining_ports_count = hweight_long(port_map);
7221 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
7222 if (remaining_ports_count == 1)
7223 size = min(size_remainder,
7224 MVPP22_TX_FIFO_DATA_SIZE_10KB);
7226 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
7228 size = size_remainder / remaining_ports_count;
7230 size_remainder -= size;
7231 remaining_ports_count--;
7233 mvpp22_tx_fifo_set_hw(priv, port, size);
7237 static void mvpp2_axi_init(struct mvpp2 *priv)
7239 u32 val, rdval, wrval;
7241 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7243 /* AXI Bridge Configuration */
7245 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7246 << MVPP22_AXI_ATTR_CACHE_OFFS;
7247 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7248 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7250 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7251 << MVPP22_AXI_ATTR_CACHE_OFFS;
7252 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7253 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7256 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7257 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7260 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7261 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7262 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7263 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7266 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7267 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7269 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7270 << MVPP22_AXI_CODE_CACHE_OFFS;
7271 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7272 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7273 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7274 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7276 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7277 << MVPP22_AXI_CODE_CACHE_OFFS;
7278 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7279 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7281 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7283 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7284 << MVPP22_AXI_CODE_CACHE_OFFS;
7285 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7286 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7288 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7291 /* Initialize network controller common part HW */
7292 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7294 const struct mbus_dram_target_info *dram_target_info;
7298 /* MBUS windows configuration */
7299 dram_target_info = mv_mbus_dram_info();
7300 if (dram_target_info)
7301 mvpp2_conf_mbus_windows(dram_target_info, priv);
7303 if (priv->hw_version >= MVPP22)
7304 mvpp2_axi_init(priv);
7306 /* Disable HW PHY polling */
7307 if (priv->hw_version == MVPP21) {
7308 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7309 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7310 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7312 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7313 val &= ~MVPP22_SMI_POLLING_EN;
7314 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7317 /* Allocate and initialize aggregated TXQs */
7318 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
7319 sizeof(*priv->aggr_txqs),
7321 if (!priv->aggr_txqs)
7324 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7325 priv->aggr_txqs[i].id = i;
7326 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
7327 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
7333 if (priv->hw_version == MVPP21) {
7334 mvpp2_rx_fifo_init(priv);
7336 mvpp22_rx_fifo_init(priv);
7337 mvpp22_tx_fifo_init(priv);
7338 if (priv->hw_version == MVPP23)
7339 mvpp23_rx_fifo_fc_set_tresh(priv);
7342 if (priv->hw_version == MVPP21)
7343 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7344 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
7346 /* Allow cache snoop when transmiting packets */
7347 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7349 /* Buffer Manager initialization */
7350 err = mvpp2_bm_init(&pdev->dev, priv);
7354 /* Parser default initialization */
7355 err = mvpp2_prs_default_init(pdev, priv);
7359 /* Classifier default initialization */
7360 mvpp2_cls_init(priv);
7365 static int mvpp2_get_sram(struct platform_device *pdev,
7368 struct resource *res;
7371 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
7373 if (has_acpi_companion(&pdev->dev))
7374 dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n");
7376 dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n");
7380 base = devm_ioremap_resource(&pdev->dev, res);
7382 return PTR_ERR(base);
7384 priv->cm3_base = base;
7388 static int mvpp2_probe(struct platform_device *pdev)
7390 struct fwnode_handle *fwnode = pdev->dev.fwnode;
7391 struct fwnode_handle *port_fwnode;
7393 struct resource *res;
7398 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
7402 priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev);
7404 /* multi queue mode isn't supported on PPV2.1, fallback to single
7407 if (priv->hw_version == MVPP21)
7408 queue_mode = MVPP2_QDIST_SINGLE_MODE;
7410 base = devm_platform_ioremap_resource(pdev, 0);
7412 return PTR_ERR(base);
7414 if (priv->hw_version == MVPP21) {
7415 priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
7416 if (IS_ERR(priv->lms_base))
7417 return PTR_ERR(priv->lms_base);
7419 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7421 dev_err(&pdev->dev, "Invalid resource\n");
7424 if (has_acpi_companion(&pdev->dev)) {
7425 /* In case the MDIO memory region is declared in
7426 * the ACPI, it can already appear as 'in-use'
7427 * in the OS. Because it is overlapped by second
7428 * region of the network controller, make
7429 * sure it is released, before requesting it again.
7430 * The care is taken by mvpp2 driver to avoid
7431 * concurrent access to this memory region.
7433 release_resource(res);
7435 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7436 if (IS_ERR(priv->iface_base))
7437 return PTR_ERR(priv->iface_base);
7440 err = mvpp2_get_sram(pdev, priv);
7442 dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
7444 /* Enable global Flow Control only if handler to SRAM not NULL */
7446 priv->global_tx_fc = true;
7449 if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) {
7450 priv->sysctrl_base =
7451 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
7452 "marvell,system-controller");
7453 if (IS_ERR(priv->sysctrl_base))
7454 /* The system controller regmap is optional for dt
7455 * compatibility reasons. When not provided, the
7456 * configuration of the GoP relies on the
7457 * firmware/bootloader.
7459 priv->sysctrl_base = NULL;
7462 if (priv->hw_version >= MVPP22 &&
7463 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
7464 priv->percpu_pools = 1;
7466 mvpp2_setup_bm_pool();
7469 priv->nthreads = min_t(unsigned int, num_present_cpus(),
7472 shared = num_present_cpus() - priv->nthreads;
7474 bitmap_set(&priv->lock_map, 0,
7475 min_t(int, shared, MVPP2_MAX_THREADS));
7477 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7480 addr_space_sz = (priv->hw_version == MVPP21 ?
7481 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
7482 priv->swth_base[i] = base + i * addr_space_sz;
7485 if (priv->hw_version == MVPP21)
7486 priv->max_port_rxqs = 8;
7488 priv->max_port_rxqs = 32;
7490 if (dev_of_node(&pdev->dev)) {
7491 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7492 if (IS_ERR(priv->pp_clk))
7493 return PTR_ERR(priv->pp_clk);
7494 err = clk_prepare_enable(priv->pp_clk);
7498 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7499 if (IS_ERR(priv->gop_clk)) {
7500 err = PTR_ERR(priv->gop_clk);
7503 err = clk_prepare_enable(priv->gop_clk);
7507 if (priv->hw_version >= MVPP22) {
7508 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7509 if (IS_ERR(priv->mg_clk)) {
7510 err = PTR_ERR(priv->mg_clk);
7514 err = clk_prepare_enable(priv->mg_clk);
7518 priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk");
7519 if (IS_ERR(priv->mg_core_clk)) {
7520 err = PTR_ERR(priv->mg_core_clk);
7524 err = clk_prepare_enable(priv->mg_core_clk);
7529 priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk");
7530 if (IS_ERR(priv->axi_clk)) {
7531 err = PTR_ERR(priv->axi_clk);
7532 goto err_mg_core_clk;
7535 err = clk_prepare_enable(priv->axi_clk);
7537 goto err_mg_core_clk;
7539 /* Get system's tclk rate */
7540 priv->tclk = clk_get_rate(priv->pp_clk);
7542 err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk);
7544 dev_err(&pdev->dev, "missing clock-frequency value\n");
7549 if (priv->hw_version >= MVPP22) {
7550 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
7553 /* Sadly, the BM pools all share the same register to
7554 * store the high 32 bits of their address. So they
7555 * must all have the same high 32 bits, which forces
7556 * us to restrict coherent memory to DMA_BIT_MASK(32).
7558 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7563 /* Map DTS-active ports. Should be done before FIFO mvpp2_init */
7564 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7565 if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
7566 priv->port_map |= BIT(i);
7569 if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
7570 priv->hw_version = MVPP23;
7573 spin_lock_init(&priv->mss_spinlock);
7575 /* Initialize network controller */
7576 err = mvpp2_init(pdev, priv);
7578 dev_err(&pdev->dev, "failed to initialize controller\n");
7582 err = mvpp22_tai_probe(&pdev->dev, priv);
7586 /* Initialize ports */
7587 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7588 err = mvpp2_port_probe(pdev, port_fwnode, priv);
7590 goto err_port_probe;
7593 if (priv->port_count == 0) {
7594 dev_err(&pdev->dev, "no ports enabled\n");
7599 /* Statistics must be gathered regularly because some of them (like
7600 * packets counters) are 32-bit registers and could overflow quite
7601 * quickly. For instance, a 10Gb link used at full bandwidth with the
7602 * smallest packets (64B) will overflow a 32-bit counter in less than
7603 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
7605 snprintf(priv->queue_name, sizeof(priv->queue_name),
7606 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
7607 priv->port_count > 1 ? "+" : "");
7608 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
7609 if (!priv->stats_queue) {
7611 goto err_port_probe;
7614 if (priv->global_tx_fc && priv->hw_version >= MVPP22) {
7615 err = mvpp2_enable_global_fc(priv);
7617 dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n");
7620 mvpp2_dbgfs_init(priv, pdev->name);
7622 platform_set_drvdata(pdev, priv);
7626 fwnode_handle_put(port_fwnode);
7629 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7630 if (priv->port_list[i])
7631 mvpp2_port_remove(priv->port_list[i]);
7635 clk_disable_unprepare(priv->axi_clk);
7637 clk_disable_unprepare(priv->mg_core_clk);
7639 clk_disable_unprepare(priv->mg_clk);
7641 clk_disable_unprepare(priv->gop_clk);
7643 clk_disable_unprepare(priv->pp_clk);
7647 static void mvpp2_remove(struct platform_device *pdev)
7649 struct mvpp2 *priv = platform_get_drvdata(pdev);
7650 struct fwnode_handle *fwnode = pdev->dev.fwnode;
7651 int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
7652 struct fwnode_handle *port_fwnode;
7654 mvpp2_dbgfs_cleanup(priv);
7656 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7657 if (priv->port_list[i]) {
7658 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
7659 mvpp2_port_remove(priv->port_list[i]);
7664 destroy_workqueue(priv->stats_queue);
7666 if (priv->percpu_pools)
7667 poolnum = mvpp2_get_nrxqs(priv) * 2;
7669 for (i = 0; i < poolnum; i++) {
7670 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7672 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
7675 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7676 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7678 dma_free_coherent(&pdev->dev,
7679 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7681 aggr_txq->descs_dma);
7684 if (is_acpi_node(port_fwnode))
7687 clk_disable_unprepare(priv->axi_clk);
7688 clk_disable_unprepare(priv->mg_core_clk);
7689 clk_disable_unprepare(priv->mg_clk);
7690 clk_disable_unprepare(priv->pp_clk);
7691 clk_disable_unprepare(priv->gop_clk);
7694 static const struct of_device_id mvpp2_match[] = {
7696 .compatible = "marvell,armada-375-pp2",
7697 .data = (void *)MVPP21,
7700 .compatible = "marvell,armada-7k-pp22",
7701 .data = (void *)MVPP22,
7705 MODULE_DEVICE_TABLE(of, mvpp2_match);
7708 static const struct acpi_device_id mvpp2_acpi_match[] = {
7709 { "MRVL0110", MVPP22 },
7712 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
7715 static struct platform_driver mvpp2_driver = {
7716 .probe = mvpp2_probe,
7717 .remove_new = mvpp2_remove,
7719 .name = MVPP2_DRIVER_NAME,
7720 .of_match_table = mvpp2_match,
7721 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
7725 static int __init mvpp2_driver_init(void)
7727 return platform_driver_register(&mvpp2_driver);
7729 module_init(mvpp2_driver_init);
7731 static void __exit mvpp2_driver_exit(void)
7733 platform_driver_unregister(&mvpp2_driver);
7736 module_exit(mvpp2_driver_exit);
7738 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7739 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7740 MODULE_LICENSE("GPL v2");