1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell OcteonTx2 RVU Ethernet driver
4 * Copyright (C) 2020 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
14 #include <linux/ethtool.h>
15 #include <linux/pci.h>
16 #include <linux/iommu.h>
17 #include <linux/net_tstamp.h>
18 #include <linux/ptp_clock_kernel.h>
19 #include <linux/timecounter.h>
20 #include <linux/soc/marvell/octeontx2/asm.h>
25 #include "otx2_txrx.h"
26 #include <rvu_trace.h>
29 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
30 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
31 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
33 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
36 #define PCI_CFG_REG_BAR_NUM 2
37 #define PCI_MBOX_BAR_NUM 4
41 enum arua_mapped_qtypes {
46 /* NIX LF interrupts range*/
47 #define NIX_LF_QINT_VEC_START 0x00
48 #define NIX_LF_CINT_VEC_START 0x40
49 #define NIX_LF_GINT_VEC 0x80
50 #define NIX_LF_ERR_VEC 0x81
51 #define NIX_LF_POISON_VEC 0x82
53 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */
54 #define SEND_CQ_SKID 2000
56 /* RSS configuration */
58 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
61 struct otx2_rss_info {
65 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
66 u8 key[RSS_HASH_KEY_SIZE];
67 struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
70 /* NIX (or NPC) RX errors */
81 NPC_ERRLVL_NIX = 0x0F,
84 enum otx2_errcodes_re {
85 /* NPC_ERRLVL_RE errcodes */
87 ERRCODE_FCS_RCV = 0x8,
88 ERRCODE_UNDERSIZE = 0x10,
89 ERRCODE_OVERSIZE = 0x11,
90 ERRCODE_OL2_LEN_MISMATCH = 0x12,
91 /* NPC_ERRLVL_NIX errcodes */
92 ERRCODE_OL3_LEN = 0x10,
93 ERRCODE_OL4_LEN = 0x11,
94 ERRCODE_OL4_CSUM = 0x12,
95 ERRCODE_IL3_LEN = 0x20,
96 ERRCODE_IL4_LEN = 0x21,
97 ERRCODE_IL4_CSUM = 0x22,
101 enum nix_stat_lf_tx {
111 enum nix_stat_lf_rx {
122 RX_DRP_L3BCAST = 0xa,
123 RX_DRP_L3MCAST = 0xb,
127 struct otx2_dev_stats {
143 /* Driver counted stats */
144 struct otx2_drv_stats {
145 atomic_t rx_fcs_errs;
146 atomic_t rx_oversize_errs;
147 atomic_t rx_undersize_errs;
148 atomic_t rx_csum_errs;
149 atomic_t rx_len_errs;
150 atomic_t rx_other_errs;
154 struct otx2_mbox mbox;
155 struct work_struct mbox_wrk;
156 struct otx2_mbox mbox_up;
157 struct work_struct mbox_up_wrk;
158 struct otx2_nic *pfvf;
159 void *bbuf_base; /* Bounce buffer for mbox memory */
160 struct mutex lock; /* serialize mailbox access */
161 int num_msgs; /* mbox number of messages */
162 int up_num_msgs; /* mbox_up number of messages */
166 struct pci_dev *pdev;
167 struct otx2_rss_info rss_info;
176 u32 stack_pg_ptrs; /* No of ptrs per stack page */
177 u32 stack_pg_bytes; /* Size of stack page */
181 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
183 /* HW settings, coalescing etc */
198 u8 cint_cnt; /* CQ interrupt count */
199 u16 npa_msixoff; /* Offset of NPA vectors */
200 u16 nix_msixoff; /* Offset of NIX vectors */
202 cpumask_var_t *affinity_mask;
205 struct otx2_dev_stats dev_stats;
206 struct otx2_drv_stats drv_stats;
207 u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
208 u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
209 u64 cgx_fec_corr_blks;
210 u64 cgx_fec_uncorr_blks;
211 u8 cgx_links; /* No. of CGX links present in HW */
212 u8 lbk_links; /* No. of LBK links present in HW */
213 #define HW_TSO BIT_ULL(0)
214 #define CN10K_MBOX BIT_ULL(1)
215 #define CN10K_LMTST BIT_ULL(2)
216 unsigned long cap_flag;
218 #define LMT_LINE_SIZE 128
219 #define NIX_LMTID_BASE 72 /* RX + TX + XDP */
220 void __iomem *lmt_base;
225 struct otx2_vf_config {
227 struct delayed_work link_event_work;
228 bool intf_down; /* interface was either configured or not */
235 struct work_struct work;
240 struct delayed_work pool_refill_work;
245 struct ptp_clock_info ptp_info;
246 struct ptp_clock *ptp_clock;
247 struct otx2_nic *nic;
249 struct cyclecounter cycle_counter;
250 struct timecounter time_counter;
253 #define OTX2_HW_TIMESTAMP_LEN 8
255 struct otx2_mac_table {
261 struct otx2_flow_config {
262 u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
264 #define OTX2_MAX_NTUPLE_FLOWS 32
265 #define OTX2_MAX_UNICAST_FLOWS 8
266 #define OTX2_MAX_VLAN_FLOWS 1
267 #define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \
268 OTX2_MAX_UNICAST_FLOWS + \
274 #define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */
275 #define OTX2_VF_VLAN_RX_INDEX 0
276 #define OTX2_VF_VLAN_TX_INDEX 1
277 u32 ntuple_max_flows;
278 struct list_head flow_list;
282 int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
283 void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
285 void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
286 void (*aura_freeptr)(void *dev, int aura, u64 buf);
290 void __iomem *reg_base;
291 struct net_device *netdev;
292 struct dev_hw_ops *hw_ops;
295 u16 rbsize; /* Receive buffer size */
297 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
298 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
299 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
300 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
301 #define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
302 #define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
303 #define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
304 #define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7)
305 #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
306 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
307 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
310 struct otx2_qset qset;
312 struct pci_dev *pdev;
317 struct mbox *mbox_pfvf;
318 struct workqueue_struct *mbox_wq;
319 struct workqueue_struct *mbox_pfvf_wq;
322 u16 pcifunc; /* RVU PF_FUNC */
323 u16 bpid[NIX_MAX_BPID_CHAN];
324 struct otx2_vf_config *vf_configs;
325 struct cgx_link_user_info linfo;
328 struct work_struct reset_task;
329 struct workqueue_struct *flr_wq;
330 struct flr_work *flr_wrk;
331 struct refill_work *refill_wrk;
332 struct workqueue_struct *otx2_wq;
333 struct work_struct rx_mode_work;
334 struct otx2_mac_table *mac_table;
339 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
341 /* LMTST Lines info */
346 struct otx2_ptp *ptp;
347 struct hwtstamp_config tstamp;
349 struct otx2_flow_config *flow_cfg;
352 static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
354 return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
357 static inline bool is_96xx_A0(struct pci_dev *pdev)
359 return (pdev->revision == 0x00) &&
360 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
363 static inline bool is_96xx_B0(struct pci_dev *pdev)
365 return (pdev->revision == 0x01) &&
366 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
369 /* REVID for PCIe devices.
370 * Bits 0..1: minor pass, bit 3..2: major pass
373 #define PCI_REVISION_ID_96XX 0x00
374 #define PCI_REVISION_ID_95XX 0x10
375 #define PCI_REVISION_ID_LOKI 0x20
376 #define PCI_REVISION_ID_98XX 0x30
377 #define PCI_REVISION_ID_95XXMM 0x40
379 static inline bool is_dev_otx2(struct pci_dev *pdev)
381 u8 midr = pdev->revision & 0xF0;
383 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
384 midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX ||
385 midr == PCI_REVISION_ID_95XXMM);
388 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
390 struct otx2_hw *hw = &pfvf->hw;
392 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
393 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
394 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
396 __set_bit(HW_TSO, &hw->cap_flag);
398 if (is_96xx_A0(pfvf->pdev)) {
399 __clear_bit(HW_TSO, &hw->cap_flag);
401 /* Time based irq coalescing is not supported */
402 pfvf->hw.cq_qcount_wait = 0x0;
404 /* Due to HW issue previous silicons required minimum
405 * 600 unused CQE to avoid CQ overflow.
407 pfvf->hw.rq_skid = 600;
408 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
410 if (is_96xx_B0(pfvf->pdev))
411 __clear_bit(HW_TSO, &hw->cap_flag);
413 if (!is_dev_otx2(pfvf->pdev)) {
414 __set_bit(CN10K_MBOX, &hw->cap_flag);
415 __set_bit(CN10K_LMTST, &hw->cap_flag);
419 /* Register read/write APIs */
420 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
424 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
426 blkaddr = nic->nix_blkaddr;
429 blkaddr = BLKADDR_NPA;
432 blkaddr = BLKADDR_RVUM;
436 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
437 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
439 return nic->reg_base + offset;
442 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
444 void __iomem *addr = otx2_get_regaddr(nic, offset);
449 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
451 void __iomem *addr = otx2_get_regaddr(nic, offset);
456 /* Mbox bounce buffer APIs */
457 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
459 struct otx2_mbox *otx2_mbox;
460 struct otx2_mbox_dev *mdev;
462 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
463 if (!mbox->bbuf_base)
466 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
467 * prepare all mbox messages in bounce buffer instead of directly
470 otx2_mbox = &mbox->mbox;
471 mdev = &otx2_mbox->dev[0];
472 mdev->mbase = mbox->bbuf_base;
474 otx2_mbox = &mbox->mbox_up;
475 mdev = &otx2_mbox->dev[0];
476 mdev->mbase = mbox->bbuf_base;
480 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
482 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
483 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
484 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
485 struct mbox_hdr *hdr;
488 if (mdev->mbase == hw_mbase)
491 hdr = hw_mbase + mbox->rx_start;
492 msg_size = hdr->msg_size;
494 if (msg_size > mbox->rx_size - msgs_offset)
495 msg_size = mbox->rx_size - msgs_offset;
497 /* Copy mbox messages from mbox memory to bounce buffer */
498 memcpy(mdev->mbase + mbox->rx_start,
499 hw_mbase + mbox->rx_start, msg_size + msgs_offset);
502 /* With the absence of API for 128-bit IO memory access for arm64,
503 * implement required operations at place.
505 #if defined(CONFIG_ARM64)
506 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
508 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
509 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
512 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
516 __asm__ volatile(".cpu generic+lse\n"
517 "ldadd %x[i], %x[r], [%[b]]"
518 : [r]"=r"(result), "+m"(*ptr)
519 : [i]"r"(incr), [b]"r"(ptr)
525 #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
526 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
529 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
530 u64 *ptrs, u64 num_ptrs,
533 u64 size = 0, count_eot = 0;
534 u64 tar_addr, val = 0;
536 tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
537 /* LMTID is same as AURA Id */
538 val = (aura & 0x7FF) | BIT_ULL(63);
539 /* Set if [127:64] of last 128bit word has a valid pointer */
540 count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
541 /* Set AURA ID to free pointer */
542 ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
543 /* Target address for LMTST flush tells HW how many 128bit
544 * words are valid from NPA_LF_AURA_BATCH_FREE0.
546 * tar_addr[6:4] is LMTST size-1 in units of 128b.
549 size = (sizeof(u64) * num_ptrs) / 16;
552 tar_addr |= ((size - 1) & 0x7) << 4;
554 memcpy(lmt_addr, ptrs, sizeof(u64) * num_ptrs);
555 /* Perform LMTST flush */
556 cn10k_lmt_flush(val, tar_addr);
559 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
561 struct otx2_nic *pfvf = dev;
562 struct otx2_pool *pool;
565 pool = &pfvf->qset.pool[aura];
567 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2, pool->lmt_addr);
570 /* Alloc pointer from pool/aura */
571 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
573 u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
574 NPA_LF_AURA_OP_ALLOCX(0));
575 u64 incr = (u64)aura | BIT_ULL(63);
577 return otx2_atomic64_add(incr, ptr);
580 /* Free pointer to a pool/aura */
581 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf)
583 struct otx2_nic *pfvf = dev;
584 void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0);
586 otx2_write128(buf, (u64)aura | BIT_ULL(63), addr);
589 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
591 if (type == AURA_NIX_SQ)
592 return pfvf->hw.rqpool_cnt + idx;
599 static inline int otx2_sync_mbox_msg(struct mbox *mbox)
603 if (!otx2_mbox_nonempty(&mbox->mbox, 0))
605 otx2_mbox_msg_send(&mbox->mbox, 0);
606 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0);
610 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
613 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
617 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
619 otx2_mbox_msg_send(&mbox->mbox_up, devid);
620 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
624 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid);
627 /* Use this API to send mbox msgs in atomic context
628 * where sleeping is not allowed
630 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
634 if (!otx2_mbox_nonempty(&mbox->mbox, 0))
636 otx2_mbox_msg_send(&mbox->mbox, 0);
637 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0);
641 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
644 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
645 static struct _req_type __maybe_unused \
646 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
648 struct _req_type *req; \
650 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
651 &mbox->mbox, 0, sizeof(struct _req_type), \
652 sizeof(struct _rsp_type)); \
655 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
657 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
664 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
666 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
667 struct _req_type *req, \
668 struct _rsp_type *rsp); \
673 /* Time to wait before watchdog kicks off */
674 #define OTX2_TX_TIMEOUT (100 * HZ)
676 #define RVU_PFVF_PF_SHIFT 10
677 #define RVU_PFVF_PF_MASK 0x3F
678 #define RVU_PFVF_FUNC_SHIFT 0
679 #define RVU_PFVF_FUNC_MASK 0x3FF
681 static inline int rvu_get_pf(u16 pcifunc)
683 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
686 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
688 size_t offset, size_t size,
689 enum dma_data_direction dir)
693 iova = dma_map_page_attrs(pfvf->dev, page,
694 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
695 if (unlikely(dma_mapping_error(pfvf->dev, iova)))
696 return (dma_addr_t)NULL;
700 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
701 dma_addr_t addr, size_t size,
702 enum dma_data_direction dir)
704 dma_unmap_page_attrs(pfvf->dev, addr, size,
705 dir, DMA_ATTR_SKIP_CPU_SYNC);
709 void otx2_free_cints(struct otx2_nic *pfvf, int n);
710 void otx2_set_cints_affinity(struct otx2_nic *pfvf);
711 int otx2_set_mac_address(struct net_device *netdev, void *p);
712 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
713 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
714 void otx2_get_mac_from_af(struct net_device *netdev);
715 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
716 int otx2_config_pause_frm(struct otx2_nic *pfvf);
717 void otx2_setup_segmentation(struct otx2_nic *pfvf);
719 /* RVU block related APIs */
720 int otx2_attach_npa_nix(struct otx2_nic *pfvf);
721 int otx2_detach_resources(struct mbox *mbox);
722 int otx2_config_npa(struct otx2_nic *pfvf);
723 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
724 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
725 void otx2_aura_pool_free(struct otx2_nic *pfvf);
726 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
727 void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
728 int otx2_config_nix(struct otx2_nic *pfvf);
729 int otx2_config_nix_queues(struct otx2_nic *pfvf);
730 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
731 int otx2_txsch_alloc(struct otx2_nic *pfvf);
732 int otx2_txschq_stop(struct otx2_nic *pfvf);
733 void otx2_sqb_flush(struct otx2_nic *pfvf);
734 int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
736 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
737 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
738 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
739 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
740 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
741 int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
742 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
743 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
746 /* RSS configuration APIs*/
747 int otx2_rss_init(struct otx2_nic *pfvf);
748 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
749 void otx2_set_rss_key(struct otx2_nic *pfvf);
750 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
753 void mbox_handler_msix_offset(struct otx2_nic *pfvf,
754 struct msix_offset_rsp *rsp);
755 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
756 struct npa_lf_alloc_rsp *rsp);
757 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
758 struct nix_lf_alloc_rsp *rsp);
759 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
760 struct nix_txsch_alloc_rsp *rsp);
761 void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
762 struct cgx_stats_rsp *rsp);
763 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
764 struct cgx_fec_stats_rsp *rsp);
765 void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
766 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
767 struct nix_bp_cfg_rsp *rsp);
769 /* Device stats APIs */
770 void otx2_get_dev_stats(struct otx2_nic *pfvf);
771 void otx2_get_stats64(struct net_device *netdev,
772 struct rtnl_link_stats64 *stats);
773 void otx2_update_lmac_stats(struct otx2_nic *pfvf);
774 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
775 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
776 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
777 void otx2_set_ethtool_ops(struct net_device *netdev);
778 void otx2vf_set_ethtool_ops(struct net_device *netdev);
780 int otx2_open(struct net_device *netdev);
781 int otx2_stop(struct net_device *netdev);
782 int otx2_set_real_num_queues(struct net_device *netdev,
783 int tx_queues, int rx_queues);
784 /* MCAM filter related APIs */
785 int otx2_mcam_flow_init(struct otx2_nic *pf);
786 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf);
787 void otx2_mcam_flow_del(struct otx2_nic *pf);
788 int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
789 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
790 int otx2_get_flow(struct otx2_nic *pfvf,
791 struct ethtool_rxnfc *nfc, u32 location);
792 int otx2_get_all_flows(struct otx2_nic *pfvf,
793 struct ethtool_rxnfc *nfc, u32 *rule_locs);
794 int otx2_add_flow(struct otx2_nic *pfvf,
795 struct ethtool_rxnfc *nfc);
796 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
797 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
798 struct npc_install_flow_req *req);
799 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
800 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
801 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
802 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
803 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
804 u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
805 #endif /* OTX2_COMMON_H */