1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/module.h>
11 #include "rvu_struct.h"
17 #include "lmac_common.h"
18 #include "rvu_npc_hash.h"
20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
22 int type, int chan_id);
23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
25 static int nix_setup_ipolicers(struct rvu *rvu,
26 struct nix_hw *nix_hw, int blkaddr);
27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
29 struct nix_hw *nix_hw, u16 pcifunc);
30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
33 static const char *nix_get_ctx_name(int ctype);
59 enum nix_makr_fmt_indexes {
60 NIX_MARK_CFG_IP_DSCP_RED,
61 NIX_MARK_CFG_IP_DSCP_YELLOW,
62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63 NIX_MARK_CFG_IP_ECN_RED,
64 NIX_MARK_CFG_IP_ECN_YELLOW,
65 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66 NIX_MARK_CFG_VLAN_DEI_RED,
67 NIX_MARK_CFG_VLAN_DEI_YELLOW,
68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
72 /* For now considering MC resources needed for broadcast
73 * pkt replication only. i.e 256 HWVFs + 12 PFs.
75 #define MC_TBL_SIZE MC_TBL_SZ_512
76 #define MC_BUF_CNT MC_BUF_CNT_128
79 struct hlist_node node;
83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
87 /*If blkaddr is 0, return the first nix block address*/
89 return rvu->nix_blkaddr[blkaddr];
91 while (i + 1 < MAX_NIX_BLKS) {
92 if (rvu->nix_blkaddr[i] == blkaddr)
93 return rvu->nix_blkaddr[i + 1];
100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
102 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
106 if (!pfvf->nixlf || blkaddr < 0)
111 int rvu_get_nixlf_count(struct rvu *rvu)
113 int blkaddr = 0, max = 0;
114 struct rvu_block *block;
116 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
118 block = &rvu->hw->block[blkaddr];
119 max += block->lf.max;
120 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
127 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
128 struct rvu_hwinfo *hw = rvu->hw;
131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
132 if (!pfvf->nixlf || blkaddr < 0)
133 return NIX_AF_ERR_AF_LF_INVALID;
135 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
137 return NIX_AF_ERR_AF_LF_INVALID;
140 *nix_blkaddr = blkaddr;
145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
146 struct nix_hw **nix_hw, int *blkaddr)
148 struct rvu_pfvf *pfvf;
150 pfvf = rvu_get_pfvf(rvu, pcifunc);
151 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
152 if (!pfvf->nixlf || *blkaddr < 0)
153 return NIX_AF_ERR_AF_LF_INVALID;
155 *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
157 return NIX_AF_ERR_INVALID_NIXBLK;
161 static void nix_mce_list_init(struct nix_mce_list *list, int max)
163 INIT_HLIST_HEAD(&list->head);
168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
175 idx = mcast->next_free_mce;
176 mcast->next_free_mce += count;
180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
182 int nix_blkaddr = 0, i = 0;
183 struct rvu *rvu = hw->rvu;
185 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
186 while (nix_blkaddr) {
187 if (blkaddr == nix_blkaddr && hw->nix)
189 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
195 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
197 if (hw->cap.nix_multiple_dwrr_mtu)
198 return NIX_AF_DWRR_MTUX(smq_link_type);
200 if (smq_link_type == SMQ_LINK_TYPE_SDP)
201 return NIX_AF_DWRR_SDP_MTU;
203 /* Here it's same reg for RPM and LBK */
204 return NIX_AF_DWRR_RPM_MTU;
207 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
211 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
212 * Value of 4 is reserved for MTU value of 9728 bytes.
213 * Value of 5 is reserved for MTU value of 10240 bytes.
221 return BIT_ULL(dwrr_mtu);
227 u32 convert_bytes_to_dwrr_mtu(u32 bytes)
229 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
230 * Value of 4 is reserved for MTU value of 9728 bytes.
231 * Value of 5 is reserved for MTU value of 10240 bytes.
233 if (bytes > BIT_ULL(16))
248 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
252 /* Sync all in flight RX packets to LLC/DRAM */
253 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
254 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
256 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
258 /* SW_SYNC ensures all existing transactions are finished and pkts
259 * are written to LLC/DRAM, queues should be teared down after
260 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
261 * an existing transaction might end after SW_SYNC operation. To
262 * ensure operation is fully done, do the SW_SYNC twice.
264 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
265 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
267 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
270 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
271 int lvl, u16 pcifunc, u16 schq)
273 struct rvu_hwinfo *hw = rvu->hw;
274 struct nix_txsch *txsch;
275 struct nix_hw *nix_hw;
278 nix_hw = get_nix_hw(rvu->hw, blkaddr);
282 txsch = &nix_hw->txsch[lvl];
283 /* Check out of bounds */
284 if (schq >= txsch->schq.max)
287 mutex_lock(&rvu->rsrc_lock);
288 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
289 mutex_unlock(&rvu->rsrc_lock);
291 /* TLs aggegating traffic are shared across PF and VFs */
292 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
293 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
299 if (map_func != pcifunc)
305 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
306 struct nix_lf_alloc_rsp *rsp, bool loop)
308 struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
309 u16 req_chan_base, req_chan_end, req_chan_cnt;
310 struct rvu_hwinfo *hw = rvu->hw;
311 struct sdp_node_info *sdp_info;
312 int pkind, pf, vf, lbkid, vfid;
317 pf = rvu_get_pf(pcifunc);
318 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
319 type != NIX_INTF_TYPE_SDP)
323 case NIX_INTF_TYPE_CGX:
324 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
325 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
327 pkind = rvu_npc_get_pkind(rvu, pf);
330 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
333 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
334 pfvf->tx_chan_base = pfvf->rx_chan_base;
335 pfvf->rx_chan_cnt = 1;
336 pfvf->tx_chan_cnt = 1;
337 rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
339 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
340 rvu_npc_set_pkind(rvu, pkind, pfvf);
343 case NIX_INTF_TYPE_LBK:
344 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
346 /* If NIX1 block is present on the silicon then NIXes are
347 * assigned alternatively for lbk interfaces. NIX0 should
348 * send packets on lbk link 1 channels and NIX1 should send
349 * on lbk link 0 channels for the communication between
353 if (rvu->hw->lbk_links > 1)
354 lbkid = vf & 0x1 ? 0 : 1;
356 /* By default NIX0 is configured to send packet on lbk link 1
357 * (which corresponds to LBK1), same packet will receive on
358 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
359 * (which corresponds to LBK2) packet will receive on NIX0 lbk
361 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
362 * transmits and receives on lbk link 0, whick corresponds
363 * to LBK1 block, back to back connectivity between NIX and
364 * LBK can be achieved (which is similar to 96xx)
367 * NIX0 lbk link 1 (LBK2) 1 (LBK1)
368 * NIX0 lbk link 0 (LBK0) 0 (LBK0)
369 * NIX1 lbk link 0 (LBK1) 0 (LBK2)
370 * NIX1 lbk link 1 (LBK3) 1 (LBK3)
375 /* Note that AF's VFs work in pairs and talk over consecutive
376 * loopback channels.Therefore if odd number of AF VFs are
377 * enabled then the last VF remains with no pair.
379 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
380 pfvf->tx_chan_base = vf & 0x1 ?
381 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
382 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
383 pfvf->rx_chan_cnt = 1;
384 pfvf->tx_chan_cnt = 1;
385 rsp->tx_link = hw->cgx_links + lbkid;
387 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
388 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
393 case NIX_INTF_TYPE_SDP:
394 from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
395 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
396 sdp_info = parent_pf->sdp_info;
398 dev_err(rvu->dev, "Invalid sdp_info pointer\n");
402 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
403 sdp_info->num_pf_rings;
404 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
405 for (vfid = 0; vfid < vf; vfid++)
406 req_chan_base += sdp_info->vf_rings[vfid];
407 req_chan_cnt = sdp_info->vf_rings[vf];
408 req_chan_end = req_chan_base + req_chan_cnt - 1;
409 if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
410 req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
412 "PF_Func 0x%x: Invalid channel base and count\n",
417 req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
418 req_chan_cnt = sdp_info->num_pf_rings;
421 pfvf->rx_chan_base = req_chan_base;
422 pfvf->rx_chan_cnt = req_chan_cnt;
423 pfvf->tx_chan_base = pfvf->rx_chan_base;
424 pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
426 rsp->tx_link = hw->cgx_links + hw->lbk_links;
427 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
433 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
434 * RVU PF/VF's MAC address.
436 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
437 pfvf->rx_chan_base, pfvf->mac_addr);
439 /* Add this PF_FUNC to bcast pkt replication list */
440 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
443 "Bcast list, failed to enable PF_FUNC 0x%x\n",
447 /* Install MCAM rule matching Ethernet broadcast mac address */
448 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
449 nixlf, pfvf->rx_chan_base);
451 pfvf->maxlen = NIC_HW_MIN_FRS;
452 pfvf->minlen = NIC_HW_MIN_FRS;
457 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
459 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
465 /* Remove this PF_FUNC from bcast pkt replication list */
466 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
469 "Bcast list, failed to disable PF_FUNC 0x%x\n",
473 /* Free and disable any MCAM entries used by this NIX LF */
474 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
476 /* Disable DMAC filters used */
477 rvu_cgx_disable_dmac_entries(rvu, pcifunc);
480 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
481 struct nix_bp_cfg_req *req,
484 u16 pcifunc = req->hdr.pcifunc;
485 struct rvu_pfvf *pfvf;
486 int blkaddr, pf, type;
490 pf = rvu_get_pf(pcifunc);
491 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
492 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
495 pfvf = rvu_get_pfvf(rvu, pcifunc);
496 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
498 chan_base = pfvf->rx_chan_base + req->chan_base;
499 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
500 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
501 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
507 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
508 int type, int chan_id)
510 int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
511 u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
512 struct rvu_hwinfo *hw = rvu->hw;
513 struct rvu_pfvf *pfvf;
517 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
518 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
519 lmac_chan_cnt = cfg & 0xFF;
521 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
522 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
524 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
525 sdp_chan_cnt = cfg & 0xFFF;
526 sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
528 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
530 /* Backpressure IDs range division
531 * CGX channles are mapped to (0 - 191) BPIDs
532 * LBK channles are mapped to (192 - 255) BPIDs
533 * SDP channles are mapped to (256 - 511) BPIDs
535 * Lmac channles and bpids mapped as follows
536 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
537 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
538 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
541 case NIX_INTF_TYPE_CGX:
542 if ((req->chan_base + req->chan_cnt) > 16)
544 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
545 /* Assign bpid based on cgx, lmac and chan id */
546 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
547 (lmac_id * lmac_chan_cnt) + req->chan_base;
549 if (req->bpid_per_chan)
551 if (bpid > cgx_bpid_cnt)
555 case NIX_INTF_TYPE_LBK:
556 if ((req->chan_base + req->chan_cnt) > 63)
558 bpid = cgx_bpid_cnt + req->chan_base;
559 if (req->bpid_per_chan)
561 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
564 case NIX_INTF_TYPE_SDP:
565 if ((req->chan_base + req->chan_cnt) > 255)
568 bpid = sdp_bpid_cnt + req->chan_base;
569 if (req->bpid_per_chan)
572 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
581 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
582 struct nix_bp_cfg_req *req,
583 struct nix_bp_cfg_rsp *rsp)
585 int blkaddr, pf, type, chan_id = 0;
586 u16 pcifunc = req->hdr.pcifunc;
587 struct rvu_pfvf *pfvf;
592 pf = rvu_get_pf(pcifunc);
593 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
594 if (is_sdp_pfvf(pcifunc))
595 type = NIX_INTF_TYPE_SDP;
597 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
598 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
599 type != NIX_INTF_TYPE_SDP)
602 pfvf = rvu_get_pfvf(rvu, pcifunc);
603 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
605 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
606 chan_base = pfvf->rx_chan_base + req->chan_base;
609 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
611 dev_warn(rvu->dev, "Fail to enable backpressure\n");
615 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
616 cfg &= ~GENMASK_ULL(8, 0);
617 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
618 cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
620 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
623 for (chan = 0; chan < req->chan_cnt; chan++) {
624 /* Map channel and bpid assign to it */
625 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
627 if (req->bpid_per_chan)
630 rsp->chan_cnt = req->chan_cnt;
635 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
636 u64 format, bool v4, u64 *fidx)
638 struct nix_lso_format field = {0};
640 /* IP's Length field */
641 field.layer = NIX_TXLAYER_OL3;
642 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
643 field.offset = v4 ? 2 : 4;
644 field.sizem1 = 1; /* i.e 2 bytes */
645 field.alg = NIX_LSOALG_ADD_PAYLEN;
646 rvu_write64(rvu, blkaddr,
647 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
650 /* No ID field in IPv6 header */
655 field.layer = NIX_TXLAYER_OL3;
657 field.sizem1 = 1; /* i.e 2 bytes */
658 field.alg = NIX_LSOALG_ADD_SEGNUM;
659 rvu_write64(rvu, blkaddr,
660 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
664 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
665 u64 format, u64 *fidx)
667 struct nix_lso_format field = {0};
669 /* TCP's sequence number field */
670 field.layer = NIX_TXLAYER_OL4;
672 field.sizem1 = 3; /* i.e 4 bytes */
673 field.alg = NIX_LSOALG_ADD_OFFSET;
674 rvu_write64(rvu, blkaddr,
675 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
678 /* TCP's flags field */
679 field.layer = NIX_TXLAYER_OL4;
681 field.sizem1 = 1; /* 2 bytes */
682 field.alg = NIX_LSOALG_TCP_FLAGS;
683 rvu_write64(rvu, blkaddr,
684 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
688 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
690 u64 cfg, idx, fidx = 0;
692 /* Get max HW supported format indices */
693 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
694 nix_hw->lso.total = cfg;
697 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
698 /* For TSO, set first and middle segment flags to
699 * mask out PSH, RST & FIN flags in TCP packet
701 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
702 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
703 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
705 /* Setup default static LSO formats
707 * Configure format fields for TCPv4 segmentation offload
709 idx = NIX_LSO_FORMAT_IDX_TSOV4;
710 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
711 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
713 /* Set rest of the fields to NOP */
714 for (; fidx < 8; fidx++) {
715 rvu_write64(rvu, blkaddr,
716 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
718 nix_hw->lso.in_use++;
720 /* Configure format fields for TCPv6 segmentation offload */
721 idx = NIX_LSO_FORMAT_IDX_TSOV6;
723 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
724 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
726 /* Set rest of the fields to NOP */
727 for (; fidx < 8; fidx++) {
728 rvu_write64(rvu, blkaddr,
729 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
731 nix_hw->lso.in_use++;
734 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
736 kfree(pfvf->rq_bmap);
737 kfree(pfvf->sq_bmap);
738 kfree(pfvf->cq_bmap);
740 qmem_free(rvu->dev, pfvf->rq_ctx);
742 qmem_free(rvu->dev, pfvf->sq_ctx);
744 qmem_free(rvu->dev, pfvf->cq_ctx);
746 qmem_free(rvu->dev, pfvf->rss_ctx);
747 if (pfvf->nix_qints_ctx)
748 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
749 if (pfvf->cq_ints_ctx)
750 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
752 pfvf->rq_bmap = NULL;
753 pfvf->cq_bmap = NULL;
754 pfvf->sq_bmap = NULL;
758 pfvf->rss_ctx = NULL;
759 pfvf->nix_qints_ctx = NULL;
760 pfvf->cq_ints_ctx = NULL;
763 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
764 struct rvu_pfvf *pfvf, int nixlf,
765 int rss_sz, int rss_grps, int hwctx_size,
766 u64 way_mask, bool tag_lsb_as_adder)
768 int err, grp, num_indices;
771 /* RSS is not requested for this NIXLF */
774 num_indices = rss_sz * rss_grps;
776 /* Alloc NIX RSS HW context memory and config the base */
777 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
781 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
782 (u64)pfvf->rss_ctx->iova);
784 /* Config full RSS table size, enable RSS and caching */
785 val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
786 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
788 if (tag_lsb_as_adder)
791 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
792 /* Config RSS group offset and sizes */
793 for (grp = 0; grp < rss_grps; grp++)
794 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
795 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
799 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
800 struct nix_aq_inst_s *inst)
802 struct admin_queue *aq = block->aq;
803 struct nix_aq_res_s *result;
808 result = (struct nix_aq_res_s *)aq->res->base;
810 /* Get current head pointer where to append this instruction */
811 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
812 head = (reg >> 4) & AQ_PTR_MASK;
814 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
815 (void *)inst, aq->inst->entry_sz);
816 memset(result, 0, sizeof(*result));
817 /* sync into memory */
820 /* Ring the doorbell and wait for result */
821 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
822 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
830 if (result->compcode != NIX_AQ_COMP_GOOD) {
831 /* TODO: Replace this with some error code */
832 if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
833 result->compcode == NIX_AQ_COMP_LOCKERR ||
834 result->compcode == NIX_AQ_COMP_CTX_POISON) {
835 ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
836 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
837 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
838 ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
841 "%s: Not able to unlock cachelines\n", __func__);
850 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
851 u16 *smq, u16 *smq_mask)
853 struct nix_cn10k_aq_enq_req *aq_req;
855 if (!is_rvu_otx2(rvu)) {
856 aq_req = (struct nix_cn10k_aq_enq_req *)req;
857 *smq = aq_req->sq.smq;
858 *smq_mask = aq_req->sq_mask.smq;
861 *smq_mask = req->sq_mask.smq;
865 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
866 struct nix_aq_enq_req *req,
867 struct nix_aq_enq_rsp *rsp)
869 struct rvu_hwinfo *hw = rvu->hw;
870 u16 pcifunc = req->hdr.pcifunc;
871 int nixlf, blkaddr, rc = 0;
872 struct nix_aq_inst_s inst;
873 struct rvu_block *block;
874 struct admin_queue *aq;
875 struct rvu_pfvf *pfvf;
881 blkaddr = nix_hw->blkaddr;
882 block = &hw->block[blkaddr];
885 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
886 return NIX_AF_ERR_AQ_ENQUEUE;
889 pfvf = rvu_get_pfvf(rvu, pcifunc);
890 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
892 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
893 * operations done by AF itself.
895 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
896 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
897 if (!pfvf->nixlf || nixlf < 0)
898 return NIX_AF_ERR_AF_LF_INVALID;
901 switch (req->ctype) {
902 case NIX_AQ_CTYPE_RQ:
903 /* Check if index exceeds max no of queues */
904 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
905 rc = NIX_AF_ERR_AQ_ENQUEUE;
907 case NIX_AQ_CTYPE_SQ:
908 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
909 rc = NIX_AF_ERR_AQ_ENQUEUE;
911 case NIX_AQ_CTYPE_CQ:
912 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
913 rc = NIX_AF_ERR_AQ_ENQUEUE;
915 case NIX_AQ_CTYPE_RSS:
916 /* Check if RSS is enabled and qidx is within range */
917 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
918 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
919 (req->qidx >= (256UL << (cfg & 0xF))))
920 rc = NIX_AF_ERR_AQ_ENQUEUE;
922 case NIX_AQ_CTYPE_MCE:
923 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
925 /* Check if index exceeds MCE list length */
926 if (!nix_hw->mcast.mce_ctx ||
927 (req->qidx >= (256UL << (cfg & 0xF))))
928 rc = NIX_AF_ERR_AQ_ENQUEUE;
930 /* Adding multicast lists for requests from PF/VFs is not
931 * yet supported, so ignore this.
934 rc = NIX_AF_ERR_AQ_ENQUEUE;
936 case NIX_AQ_CTYPE_BANDPROF:
937 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
939 rc = NIX_AF_ERR_INVALID_BANDPROF;
942 rc = NIX_AF_ERR_AQ_ENQUEUE;
948 nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
949 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
950 if (req->ctype == NIX_AQ_CTYPE_SQ &&
951 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
952 (req->op == NIX_AQ_INSTOP_WRITE &&
953 req->sq_mask.ena && req->sq.ena && smq_mask))) {
954 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
956 return NIX_AF_ERR_AQ_ENQUEUE;
959 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
961 inst.cindex = req->qidx;
962 inst.ctype = req->ctype;
964 /* Currently we are not supporting enqueuing multiple instructions,
965 * so always choose first entry in result memory.
967 inst.res_addr = (u64)aq->res->iova;
969 /* Hardware uses same aq->res->base for updating result of
970 * previous instruction hence wait here till it is done.
972 spin_lock(&aq->lock);
974 /* Clean result + context memory */
975 memset(aq->res->base, 0, aq->res->entry_sz);
976 /* Context needs to be written at RES_ADDR + 128 */
977 ctx = aq->res->base + 128;
978 /* Mask needs to be written at RES_ADDR + 256 */
979 mask = aq->res->base + 256;
982 case NIX_AQ_INSTOP_WRITE:
983 if (req->ctype == NIX_AQ_CTYPE_RQ)
984 memcpy(mask, &req->rq_mask,
985 sizeof(struct nix_rq_ctx_s));
986 else if (req->ctype == NIX_AQ_CTYPE_SQ)
987 memcpy(mask, &req->sq_mask,
988 sizeof(struct nix_sq_ctx_s));
989 else if (req->ctype == NIX_AQ_CTYPE_CQ)
990 memcpy(mask, &req->cq_mask,
991 sizeof(struct nix_cq_ctx_s));
992 else if (req->ctype == NIX_AQ_CTYPE_RSS)
993 memcpy(mask, &req->rss_mask,
994 sizeof(struct nix_rsse_s));
995 else if (req->ctype == NIX_AQ_CTYPE_MCE)
996 memcpy(mask, &req->mce_mask,
997 sizeof(struct nix_rx_mce_s));
998 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
999 memcpy(mask, &req->prof_mask,
1000 sizeof(struct nix_bandprof_s));
1002 case NIX_AQ_INSTOP_INIT:
1003 if (req->ctype == NIX_AQ_CTYPE_RQ)
1004 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
1005 else if (req->ctype == NIX_AQ_CTYPE_SQ)
1006 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
1007 else if (req->ctype == NIX_AQ_CTYPE_CQ)
1008 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
1009 else if (req->ctype == NIX_AQ_CTYPE_RSS)
1010 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
1011 else if (req->ctype == NIX_AQ_CTYPE_MCE)
1012 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
1013 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1014 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
1016 case NIX_AQ_INSTOP_NOP:
1017 case NIX_AQ_INSTOP_READ:
1018 case NIX_AQ_INSTOP_LOCK:
1019 case NIX_AQ_INSTOP_UNLOCK:
1022 rc = NIX_AF_ERR_AQ_ENQUEUE;
1023 spin_unlock(&aq->lock);
1027 /* Submit the instruction to AQ */
1028 rc = nix_aq_enqueue_wait(rvu, block, &inst);
1030 spin_unlock(&aq->lock);
1034 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
1035 if (req->op == NIX_AQ_INSTOP_INIT) {
1036 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
1037 __set_bit(req->qidx, pfvf->rq_bmap);
1038 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
1039 __set_bit(req->qidx, pfvf->sq_bmap);
1040 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
1041 __set_bit(req->qidx, pfvf->cq_bmap);
1044 if (req->op == NIX_AQ_INSTOP_WRITE) {
1045 if (req->ctype == NIX_AQ_CTYPE_RQ) {
1046 ena = (req->rq.ena & req->rq_mask.ena) |
1047 (test_bit(req->qidx, pfvf->rq_bmap) &
1050 __set_bit(req->qidx, pfvf->rq_bmap);
1052 __clear_bit(req->qidx, pfvf->rq_bmap);
1054 if (req->ctype == NIX_AQ_CTYPE_SQ) {
1055 ena = (req->rq.ena & req->sq_mask.ena) |
1056 (test_bit(req->qidx, pfvf->sq_bmap) &
1059 __set_bit(req->qidx, pfvf->sq_bmap);
1061 __clear_bit(req->qidx, pfvf->sq_bmap);
1063 if (req->ctype == NIX_AQ_CTYPE_CQ) {
1064 ena = (req->rq.ena & req->cq_mask.ena) |
1065 (test_bit(req->qidx, pfvf->cq_bmap) &
1068 __set_bit(req->qidx, pfvf->cq_bmap);
1070 __clear_bit(req->qidx, pfvf->cq_bmap);
1075 /* Copy read context into mailbox */
1076 if (req->op == NIX_AQ_INSTOP_READ) {
1077 if (req->ctype == NIX_AQ_CTYPE_RQ)
1078 memcpy(&rsp->rq, ctx,
1079 sizeof(struct nix_rq_ctx_s));
1080 else if (req->ctype == NIX_AQ_CTYPE_SQ)
1081 memcpy(&rsp->sq, ctx,
1082 sizeof(struct nix_sq_ctx_s));
1083 else if (req->ctype == NIX_AQ_CTYPE_CQ)
1084 memcpy(&rsp->cq, ctx,
1085 sizeof(struct nix_cq_ctx_s));
1086 else if (req->ctype == NIX_AQ_CTYPE_RSS)
1087 memcpy(&rsp->rss, ctx,
1088 sizeof(struct nix_rsse_s));
1089 else if (req->ctype == NIX_AQ_CTYPE_MCE)
1090 memcpy(&rsp->mce, ctx,
1091 sizeof(struct nix_rx_mce_s));
1092 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1093 memcpy(&rsp->prof, ctx,
1094 sizeof(struct nix_bandprof_s));
1098 spin_unlock(&aq->lock);
1102 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
1103 struct nix_aq_enq_req *req, u8 ctype)
1105 struct nix_cn10k_aq_enq_req aq_req;
1106 struct nix_cn10k_aq_enq_rsp aq_rsp;
1109 if (req->ctype != NIX_AQ_CTYPE_CQ)
1112 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1113 req->hdr.pcifunc, ctype, req->qidx);
1116 "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
1117 __func__, nix_get_ctx_name(ctype), req->qidx,
1122 /* Make copy of original context & mask which are required
1125 memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
1126 memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
1128 /* exclude fields which HW can update */
1129 aq_req.cq_mask.cq_err = 0;
1130 aq_req.cq_mask.wrptr = 0;
1131 aq_req.cq_mask.tail = 0;
1132 aq_req.cq_mask.head = 0;
1133 aq_req.cq_mask.avg_level = 0;
1134 aq_req.cq_mask.update_time = 0;
1135 aq_req.cq_mask.substream = 0;
1137 /* Context mask (cq_mask) holds mask value of fields which
1138 * are changed in AQ WRITE operation.
1139 * for example cq.drop = 0xa;
1140 * cq_mask.drop = 0xff;
1141 * Below logic performs '&' between cq and cq_mask so that non
1142 * updated fields are masked out for request and response
1145 for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
1147 *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
1148 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1149 *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
1150 (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1153 if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
1154 return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
1159 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
1160 struct nix_aq_enq_rsp *rsp)
1162 struct nix_hw *nix_hw;
1163 int err, retries = 5;
1166 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
1168 return NIX_AF_ERR_AF_LF_INVALID;
1170 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1172 return NIX_AF_ERR_INVALID_NIXBLK;
1175 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
1177 /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
1178 * As a work around perfrom CQ context read after each AQ write. If AQ
1179 * read shows AQ write is not updated perform AQ write again.
1181 if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
1182 err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
1183 if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
1187 return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
1194 static const char *nix_get_ctx_name(int ctype)
1197 case NIX_AQ_CTYPE_CQ:
1199 case NIX_AQ_CTYPE_SQ:
1201 case NIX_AQ_CTYPE_RQ:
1203 case NIX_AQ_CTYPE_RSS:
1209 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1211 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1212 struct nix_aq_enq_req aq_req;
1213 unsigned long *bmap;
1214 int qidx, q_cnt = 0;
1217 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1218 return NIX_AF_ERR_AQ_ENQUEUE;
1220 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1221 aq_req.hdr.pcifunc = req->hdr.pcifunc;
1223 if (req->ctype == NIX_AQ_CTYPE_CQ) {
1225 aq_req.cq_mask.ena = 1;
1226 aq_req.cq.bp_ena = 0;
1227 aq_req.cq_mask.bp_ena = 1;
1228 q_cnt = pfvf->cq_ctx->qsize;
1229 bmap = pfvf->cq_bmap;
1231 if (req->ctype == NIX_AQ_CTYPE_SQ) {
1233 aq_req.sq_mask.ena = 1;
1234 q_cnt = pfvf->sq_ctx->qsize;
1235 bmap = pfvf->sq_bmap;
1237 if (req->ctype == NIX_AQ_CTYPE_RQ) {
1239 aq_req.rq_mask.ena = 1;
1240 q_cnt = pfvf->rq_ctx->qsize;
1241 bmap = pfvf->rq_bmap;
1244 aq_req.ctype = req->ctype;
1245 aq_req.op = NIX_AQ_INSTOP_WRITE;
1247 for (qidx = 0; qidx < q_cnt; qidx++) {
1248 if (!test_bit(qidx, bmap))
1251 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1254 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1255 nix_get_ctx_name(req->ctype), qidx);
1262 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1263 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1265 struct nix_aq_enq_req lock_ctx_req;
1268 if (req->op != NIX_AQ_INSTOP_INIT)
1271 if (req->ctype == NIX_AQ_CTYPE_MCE ||
1272 req->ctype == NIX_AQ_CTYPE_DYNO)
1275 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1276 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1277 lock_ctx_req.ctype = req->ctype;
1278 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1279 lock_ctx_req.qidx = req->qidx;
1280 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1283 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1285 nix_get_ctx_name(req->ctype), req->qidx);
1289 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1290 struct nix_aq_enq_req *req,
1291 struct nix_aq_enq_rsp *rsp)
1295 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1297 err = nix_lf_hwctx_lockdown(rvu, req);
1302 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1303 struct nix_aq_enq_req *req,
1304 struct nix_aq_enq_rsp *rsp)
1306 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1309 /* CN10K mbox handler */
1310 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1311 struct nix_cn10k_aq_enq_req *req,
1312 struct nix_cn10k_aq_enq_rsp *rsp)
1314 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1315 (struct nix_aq_enq_rsp *)rsp);
1318 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1319 struct hwctx_disable_req *req,
1320 struct msg_rsp *rsp)
1322 return nix_lf_hwctx_disable(rvu, req);
1325 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1326 struct nix_lf_alloc_req *req,
1327 struct nix_lf_alloc_rsp *rsp)
1329 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1330 struct rvu_hwinfo *hw = rvu->hw;
1331 u16 pcifunc = req->hdr.pcifunc;
1332 struct rvu_block *block;
1333 struct rvu_pfvf *pfvf;
1337 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1338 return NIX_AF_ERR_PARAM;
1341 req->way_mask &= 0xFFFF;
1343 pfvf = rvu_get_pfvf(rvu, pcifunc);
1344 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1345 if (!pfvf->nixlf || blkaddr < 0)
1346 return NIX_AF_ERR_AF_LF_INVALID;
1348 block = &hw->block[blkaddr];
1349 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1351 return NIX_AF_ERR_AF_LF_INVALID;
1353 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1354 if (req->npa_func) {
1355 /* If default, use 'this' NIXLF's PFFUNC */
1356 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1357 req->npa_func = pcifunc;
1358 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1359 return NIX_AF_INVAL_NPA_PF_FUNC;
1362 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1363 if (req->sso_func) {
1364 /* If default, use 'this' NIXLF's PFFUNC */
1365 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1366 req->sso_func = pcifunc;
1367 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1368 return NIX_AF_INVAL_SSO_PF_FUNC;
1371 /* If RSS is being enabled, check if requested config is valid.
1372 * RSS table size should be power of two, otherwise
1373 * RSS_GRP::OFFSET + adder might go beyond that group or
1374 * won't be able to use entire table.
1376 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1377 !is_power_of_2(req->rss_sz)))
1378 return NIX_AF_ERR_RSS_SIZE_INVALID;
1381 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1382 return NIX_AF_ERR_RSS_GRPS_INVALID;
1384 /* Reset this NIX LF */
1385 err = rvu_lf_reset(rvu, block, nixlf);
1387 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1388 block->addr - BLKADDR_NIX0, nixlf);
1389 return NIX_AF_ERR_LF_RESET;
1392 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1394 /* Alloc NIX RQ HW context memory and config the base */
1395 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1396 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1400 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1404 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1405 (u64)pfvf->rq_ctx->iova);
1407 /* Set caching and queue count in HW */
1408 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1409 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1411 /* Alloc NIX SQ HW context memory and config the base */
1412 hwctx_size = 1UL << (ctx_cfg & 0xF);
1413 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1417 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1421 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1422 (u64)pfvf->sq_ctx->iova);
1424 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1425 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1427 /* Alloc NIX CQ HW context memory and config the base */
1428 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1429 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1433 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1437 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1438 (u64)pfvf->cq_ctx->iova);
1440 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1441 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1443 /* Initialize receive side scaling (RSS) */
1444 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1445 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1446 req->rss_grps, hwctx_size, req->way_mask,
1447 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1451 /* Alloc memory for CQINT's HW contexts */
1452 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1453 qints = (cfg >> 24) & 0xFFF;
1454 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1455 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1459 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1460 (u64)pfvf->cq_ints_ctx->iova);
1462 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1463 BIT_ULL(36) | req->way_mask << 20);
1465 /* Alloc memory for QINT's HW contexts */
1466 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1467 qints = (cfg >> 12) & 0xFFF;
1468 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1469 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1473 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1474 (u64)pfvf->nix_qints_ctx->iova);
1475 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1476 BIT_ULL(36) | req->way_mask << 20);
1478 /* Setup VLANX TPID's.
1479 * Use VLAN1 for 802.1Q
1480 * and VLAN0 for 802.1AD.
1482 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1483 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1485 /* Enable LMTST for this NIX LF */
1486 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1488 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1490 cfg = req->npa_func;
1492 cfg |= (u64)req->sso_func << 16;
1494 cfg |= (u64)req->xqe_sz << 33;
1495 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1497 /* Config Rx pkt length, csum checks and apad enable / disable */
1498 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1500 /* Configure pkind for TX parse config */
1501 cfg = NPC_TX_DEF_PKIND;
1502 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1504 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1505 if (is_sdp_pfvf(pcifunc))
1506 intf = NIX_INTF_TYPE_SDP;
1508 err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1509 !!(req->flags & NIX_LF_LBK_BLK_SEL));
1513 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1514 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1516 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1517 rvu_write64(rvu, blkaddr,
1518 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1519 VTAGSIZE_T4 | VTAG_STRIP);
1524 nix_ctx_free(rvu, pfvf);
1528 /* Set macaddr of this PF/VF */
1529 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1531 /* set SQB size info */
1532 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1533 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1534 rsp->rx_chan_base = pfvf->rx_chan_base;
1535 rsp->tx_chan_base = pfvf->tx_chan_base;
1536 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1537 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1538 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1539 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1540 /* Get HW supported stat count */
1541 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1542 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1543 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1544 /* Get count of CQ IRQs and error IRQs supported per LF */
1545 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1546 rsp->qints = ((cfg >> 12) & 0xFFF);
1547 rsp->cints = ((cfg >> 24) & 0xFFF);
1548 rsp->cgx_links = hw->cgx_links;
1549 rsp->lbk_links = hw->lbk_links;
1550 rsp->sdp_links = hw->sdp_links;
1555 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1556 struct msg_rsp *rsp)
1558 struct rvu_hwinfo *hw = rvu->hw;
1559 u16 pcifunc = req->hdr.pcifunc;
1560 struct rvu_block *block;
1561 int blkaddr, nixlf, err;
1562 struct rvu_pfvf *pfvf;
1564 pfvf = rvu_get_pfvf(rvu, pcifunc);
1565 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1566 if (!pfvf->nixlf || blkaddr < 0)
1567 return NIX_AF_ERR_AF_LF_INVALID;
1569 block = &hw->block[blkaddr];
1570 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1572 return NIX_AF_ERR_AF_LF_INVALID;
1574 if (req->flags & NIX_LF_DISABLE_FLOWS)
1575 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1577 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1579 /* Free any tx vtag def entries used by this NIX LF */
1580 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1581 nix_free_tx_vtag_entries(rvu, pcifunc);
1583 nix_interface_deinit(rvu, pcifunc, nixlf);
1585 /* Reset this NIX LF */
1586 err = rvu_lf_reset(rvu, block, nixlf);
1588 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1589 block->addr - BLKADDR_NIX0, nixlf);
1590 return NIX_AF_ERR_LF_RESET;
1593 nix_ctx_free(rvu, pfvf);
1598 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1599 struct nix_mark_format_cfg *req,
1600 struct nix_mark_format_cfg_rsp *rsp)
1602 u16 pcifunc = req->hdr.pcifunc;
1603 struct nix_hw *nix_hw;
1604 struct rvu_pfvf *pfvf;
1608 pfvf = rvu_get_pfvf(rvu, pcifunc);
1609 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1610 if (!pfvf->nixlf || blkaddr < 0)
1611 return NIX_AF_ERR_AF_LF_INVALID;
1613 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1615 return NIX_AF_ERR_INVALID_NIXBLK;
1617 cfg = (((u32)req->offset & 0x7) << 16) |
1618 (((u32)req->y_mask & 0xF) << 12) |
1619 (((u32)req->y_val & 0xF) << 8) |
1620 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1622 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1624 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1625 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1626 return NIX_AF_ERR_MARK_CFG_FAIL;
1629 rsp->mark_format_idx = rc;
1633 /* Handle shaper update specially for few revisions */
1635 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1636 int lvl, u64 reg, u64 regval)
1638 u64 regbase, oldval, sw_xoff = 0;
1639 u64 dbgval, md_debug0 = 0;
1640 unsigned long poll_tmo;
1644 regbase = reg & 0xFFFF;
1645 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1647 /* Check for rate register */
1649 case NIX_TXSCH_LVL_TL1:
1650 md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1651 sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1653 rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1655 case NIX_TXSCH_LVL_TL2:
1656 md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1657 sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1659 rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1660 regbase == NIX_AF_TL2X_PIR(0));
1662 case NIX_TXSCH_LVL_TL3:
1663 md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1664 sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1666 rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1667 regbase == NIX_AF_TL3X_PIR(0));
1669 case NIX_TXSCH_LVL_TL4:
1670 md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1671 sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1673 rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1674 regbase == NIX_AF_TL4X_PIR(0));
1676 case NIX_TXSCH_LVL_MDQ:
1677 sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1678 rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1679 regbase == NIX_AF_MDQX_PIR(0));
1686 /* Nothing special to do when state is not toggled */
1687 oldval = rvu_read64(rvu, blkaddr, reg);
1688 if ((oldval & 0x1) == (regval & 0x1)) {
1689 rvu_write64(rvu, blkaddr, reg, regval);
1693 /* PIR/CIR disable */
1694 if (!(regval & 0x1)) {
1695 rvu_write64(rvu, blkaddr, sw_xoff, 1);
1696 rvu_write64(rvu, blkaddr, reg, 0);
1698 rvu_write64(rvu, blkaddr, sw_xoff, 0);
1702 /* PIR/CIR enable */
1703 rvu_write64(rvu, blkaddr, sw_xoff, 1);
1705 poll_tmo = jiffies + usecs_to_jiffies(10000);
1706 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1708 if (time_after(jiffies, poll_tmo)) {
1710 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1715 dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1716 } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1718 rvu_write64(rvu, blkaddr, reg, regval);
1720 rvu_write64(rvu, blkaddr, sw_xoff, 0);
1724 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
1727 u64 tlx_parent = 0, tlx_schedule = 0;
1730 case NIX_TXSCH_LVL_TL2:
1731 tlx_parent = NIX_AF_TL2X_PARENT(schq);
1732 tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
1734 case NIX_TXSCH_LVL_TL3:
1735 tlx_parent = NIX_AF_TL3X_PARENT(schq);
1736 tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
1738 case NIX_TXSCH_LVL_TL4:
1739 tlx_parent = NIX_AF_TL4X_PARENT(schq);
1740 tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
1742 case NIX_TXSCH_LVL_MDQ:
1743 /* no need to reset SMQ_CFG as HW clears this CSR
1746 tlx_parent = NIX_AF_MDQX_PARENT(schq);
1747 tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
1754 rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
1757 rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
1760 /* Disable shaping of pkts by a scheduler queue
1761 * at a given scheduler level.
1763 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1764 int nixlf, int lvl, int schq)
1766 struct rvu_hwinfo *hw = rvu->hw;
1767 u64 cir_reg = 0, pir_reg = 0;
1771 case NIX_TXSCH_LVL_TL1:
1772 cir_reg = NIX_AF_TL1X_CIR(schq);
1773 pir_reg = 0; /* PIR not available at TL1 */
1775 case NIX_TXSCH_LVL_TL2:
1776 cir_reg = NIX_AF_TL2X_CIR(schq);
1777 pir_reg = NIX_AF_TL2X_PIR(schq);
1779 case NIX_TXSCH_LVL_TL3:
1780 cir_reg = NIX_AF_TL3X_CIR(schq);
1781 pir_reg = NIX_AF_TL3X_PIR(schq);
1783 case NIX_TXSCH_LVL_TL4:
1784 cir_reg = NIX_AF_TL4X_CIR(schq);
1785 pir_reg = NIX_AF_TL4X_PIR(schq);
1787 case NIX_TXSCH_LVL_MDQ:
1788 cir_reg = NIX_AF_MDQX_CIR(schq);
1789 pir_reg = NIX_AF_MDQX_PIR(schq);
1793 /* Shaper state toggle needs wait/poll */
1794 if (hw->cap.nix_shaper_toggle_wait) {
1796 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1799 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1806 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1807 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1811 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1812 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1815 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1818 struct rvu_hwinfo *hw = rvu->hw;
1822 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1825 /* Reset TL4's SDP link config */
1826 if (lvl == NIX_TXSCH_LVL_TL4)
1827 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1829 link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1830 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1831 if (lvl != link_level)
1834 /* Reset TL2's CGX or LBK link config */
1835 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1836 rvu_write64(rvu, blkaddr,
1837 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1840 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
1843 struct rvu_hwinfo *hw = rvu->hw;
1846 /* Skip this if shaping is not supported */
1847 if (!hw->cap.nix_shaping)
1850 /* Clear level specific SW_XOFF */
1852 case NIX_TXSCH_LVL_TL1:
1853 reg = NIX_AF_TL1X_SW_XOFF(schq);
1855 case NIX_TXSCH_LVL_TL2:
1856 reg = NIX_AF_TL2X_SW_XOFF(schq);
1858 case NIX_TXSCH_LVL_TL3:
1859 reg = NIX_AF_TL3X_SW_XOFF(schq);
1861 case NIX_TXSCH_LVL_TL4:
1862 reg = NIX_AF_TL4X_SW_XOFF(schq);
1864 case NIX_TXSCH_LVL_MDQ:
1865 reg = NIX_AF_MDQX_SW_XOFF(schq);
1871 rvu_write64(rvu, blkaddr, reg, 0x0);
1874 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1876 struct rvu_hwinfo *hw = rvu->hw;
1877 int pf = rvu_get_pf(pcifunc);
1878 u8 cgx_id = 0, lmac_id = 0;
1880 if (is_afvf(pcifunc)) {/* LBK links */
1881 return hw->cgx_links;
1882 } else if (is_pf_cgxmapped(rvu, pf)) {
1883 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1884 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1888 return hw->cgx_links + hw->lbk_links;
1891 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1892 int link, int *start, int *end)
1894 struct rvu_hwinfo *hw = rvu->hw;
1895 int pf = rvu_get_pf(pcifunc);
1897 if (is_afvf(pcifunc)) { /* LBK links */
1898 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1899 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1900 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1901 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1902 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1903 } else { /* SDP link */
1904 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1905 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1906 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1910 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1911 struct nix_hw *nix_hw,
1912 struct nix_txsch_alloc_req *req)
1914 struct rvu_hwinfo *hw = rvu->hw;
1915 int schq, req_schq, free_cnt;
1916 struct nix_txsch *txsch;
1917 int link, start, end;
1919 txsch = &nix_hw->txsch[lvl];
1920 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1925 link = nix_get_tx_link(rvu, pcifunc);
1927 /* For traffic aggregating scheduler level, one queue is enough */
1928 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1930 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1934 /* Get free SCHQ count and check if request can be accomodated */
1935 if (hw->cap.nix_fixed_txschq_mapping) {
1936 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1937 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1938 if (end <= txsch->schq.max && schq < end &&
1939 !test_bit(schq, txsch->schq.bmap))
1944 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1947 if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
1948 req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
1949 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1951 /* If contiguous queues are needed, check for availability */
1952 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1953 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1954 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1959 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1960 struct nix_txsch_alloc_rsp *rsp,
1961 int lvl, int start, int end)
1963 struct rvu_hwinfo *hw = rvu->hw;
1964 u16 pcifunc = rsp->hdr.pcifunc;
1967 /* For traffic aggregating levels, queue alloc is based
1968 * on transmit link to which PF_FUNC is mapped to.
1970 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1971 /* A single TL queue is allocated */
1972 if (rsp->schq_contig[lvl]) {
1973 rsp->schq_contig[lvl] = 1;
1974 rsp->schq_contig_list[lvl][0] = start;
1977 /* Both contig and non-contig reqs doesn't make sense here */
1978 if (rsp->schq_contig[lvl])
1981 if (rsp->schq[lvl]) {
1983 rsp->schq_list[lvl][0] = start;
1988 /* Adjust the queue request count if HW supports
1989 * only one queue per level configuration.
1991 if (hw->cap.nix_fixed_txschq_mapping) {
1992 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1994 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1995 rsp->schq_contig[lvl] = 0;
2000 if (rsp->schq_contig[lvl]) {
2001 rsp->schq_contig[lvl] = 1;
2002 set_bit(schq, txsch->schq.bmap);
2003 rsp->schq_contig_list[lvl][0] = schq;
2005 } else if (rsp->schq[lvl]) {
2007 set_bit(schq, txsch->schq.bmap);
2008 rsp->schq_list[lvl][0] = schq;
2013 /* Allocate contiguous queue indices requesty first */
2014 if (rsp->schq_contig[lvl]) {
2015 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
2016 txsch->schq.max, start,
2017 rsp->schq_contig[lvl], 0);
2019 rsp->schq_contig[lvl] = 0;
2020 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
2021 set_bit(schq, txsch->schq.bmap);
2022 rsp->schq_contig_list[lvl][idx] = schq;
2027 /* Allocate non-contiguous queue indices */
2028 if (rsp->schq[lvl]) {
2030 for (schq = start; schq < end; schq++) {
2031 if (!test_bit(schq, txsch->schq.bmap)) {
2032 set_bit(schq, txsch->schq.bmap);
2033 rsp->schq_list[lvl][idx++] = schq;
2035 if (idx == rsp->schq[lvl])
2038 /* Update how many were allocated */
2039 rsp->schq[lvl] = idx;
2043 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
2044 struct nix_txsch_alloc_req *req,
2045 struct nix_txsch_alloc_rsp *rsp)
2047 struct rvu_hwinfo *hw = rvu->hw;
2048 u16 pcifunc = req->hdr.pcifunc;
2049 int link, blkaddr, rc = 0;
2050 int lvl, idx, start, end;
2051 struct nix_txsch *txsch;
2052 struct nix_hw *nix_hw;
2057 rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2061 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2063 return NIX_AF_ERR_INVALID_NIXBLK;
2065 mutex_lock(&rvu->rsrc_lock);
2067 /* Check if request is valid as per HW capabilities
2068 * and can be accomodated.
2070 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2071 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
2076 /* Allocate requested Tx scheduler queues */
2077 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2078 txsch = &nix_hw->txsch[lvl];
2079 pfvf_map = txsch->pfvf_map;
2081 if (!req->schq[lvl] && !req->schq_contig[lvl])
2084 rsp->schq[lvl] = req->schq[lvl];
2085 rsp->schq_contig[lvl] = req->schq_contig[lvl];
2087 link = nix_get_tx_link(rvu, pcifunc);
2089 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2092 } else if (hw->cap.nix_fixed_txschq_mapping) {
2093 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2096 end = txsch->schq.max;
2099 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
2101 /* Reset queue config */
2102 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
2103 schq = rsp->schq_contig_list[lvl][idx];
2104 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2105 NIX_TXSCHQ_CFG_DONE))
2106 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2107 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2108 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2109 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2112 for (idx = 0; idx < req->schq[lvl]; idx++) {
2113 schq = rsp->schq_list[lvl][idx];
2114 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2115 NIX_TXSCHQ_CFG_DONE))
2116 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2117 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2118 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2119 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2123 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
2124 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
2125 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
2126 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2127 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2130 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
2132 mutex_unlock(&rvu->rsrc_lock);
2136 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
2137 struct nix_smq_flush_ctx *smq_flush_ctx)
2139 struct nix_smq_tree_ctx *smq_tree_ctx;
2140 u64 parent_off, regval;
2144 smq_flush_ctx->smq = smq;
2147 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2148 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2149 if (lvl == NIX_TXSCH_LVL_TL1) {
2150 smq_flush_ctx->tl1_schq = schq;
2151 smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
2152 smq_tree_ctx->pir_off = 0;
2153 smq_tree_ctx->pir_val = 0;
2155 } else if (lvl == NIX_TXSCH_LVL_TL2) {
2156 smq_flush_ctx->tl2_schq = schq;
2157 smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
2158 smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
2159 parent_off = NIX_AF_TL2X_PARENT(schq);
2160 } else if (lvl == NIX_TXSCH_LVL_TL3) {
2161 smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
2162 smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
2163 parent_off = NIX_AF_TL3X_PARENT(schq);
2164 } else if (lvl == NIX_TXSCH_LVL_TL4) {
2165 smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
2166 smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
2167 parent_off = NIX_AF_TL4X_PARENT(schq);
2168 } else if (lvl == NIX_TXSCH_LVL_MDQ) {
2169 smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
2170 smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
2171 parent_off = NIX_AF_MDQX_PARENT(schq);
2173 /* save cir/pir register values */
2174 smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
2175 if (smq_tree_ctx->pir_off)
2176 smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
2178 /* get parent txsch node */
2180 regval = rvu_read64(rvu, blkaddr, parent_off);
2181 schq = (regval >> 16) & 0x1FF;
2186 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
2187 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2189 struct nix_txsch *txsch;
2190 struct nix_hw *nix_hw;
2194 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2198 /* loop through all TL2s with matching PF_FUNC */
2199 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2200 for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
2201 /* skip the smq(flush) TL2 */
2202 if (tl2 == smq_flush_ctx->tl2_schq)
2204 /* skip unused TL2s */
2205 if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
2207 /* skip if PF_FUNC doesn't match */
2208 if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
2209 (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
2210 ~RVU_PFVF_FUNC_MASK)))
2212 /* enable/disable XOFF */
2213 regoff = NIX_AF_TL2X_SW_XOFF(tl2);
2215 rvu_write64(rvu, blkaddr, regoff, 0x1);
2217 rvu_write64(rvu, blkaddr, regoff, 0x0);
2221 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
2222 struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2224 u64 cir_off, pir_off, cir_val, pir_val;
2225 struct nix_smq_tree_ctx *smq_tree_ctx;
2228 for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2229 smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2230 cir_off = smq_tree_ctx->cir_off;
2231 cir_val = smq_tree_ctx->cir_val;
2232 pir_off = smq_tree_ctx->pir_off;
2233 pir_val = smq_tree_ctx->pir_val;
2236 rvu_write64(rvu, blkaddr, cir_off, cir_val);
2237 if (lvl != NIX_TXSCH_LVL_TL1)
2238 rvu_write64(rvu, blkaddr, pir_off, pir_val);
2240 rvu_write64(rvu, blkaddr, cir_off, 0x0);
2241 if (lvl != NIX_TXSCH_LVL_TL1)
2242 rvu_write64(rvu, blkaddr, pir_off, 0x0);
2247 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
2248 int smq, u16 pcifunc, int nixlf)
2250 struct nix_smq_flush_ctx *smq_flush_ctx;
2251 int pf = rvu_get_pf(pcifunc);
2252 u8 cgx_id = 0, lmac_id = 0;
2253 int err, restore_tx_en = 0;
2256 if (!is_rvu_otx2(rvu)) {
2257 /* Skip SMQ flush if pkt count is zero */
2258 cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
2263 /* enable cgx tx if disabled */
2264 if (is_pf_cgxmapped(rvu, pf)) {
2265 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2266 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
2270 /* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
2271 smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
2274 nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
2275 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
2276 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
2278 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2279 /* Do SMQ flush and set enqueue xoff */
2280 cfg |= BIT_ULL(50) | BIT_ULL(49);
2281 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2283 /* Disable backpressure from physical link,
2284 * otherwise SMQ flush may stall.
2286 rvu_cgx_enadis_rx_bp(rvu, pf, false);
2288 /* Wait for flush to complete */
2289 err = rvu_poll_reg(rvu, blkaddr,
2290 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
2293 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
2296 /* clear XOFF on TL2s */
2297 nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
2298 nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
2299 kfree(smq_flush_ctx);
2301 rvu_cgx_enadis_rx_bp(rvu, pf, true);
2302 /* restore cgx tx state */
2304 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2308 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2310 int blkaddr, nixlf, lvl, schq, err;
2311 struct rvu_hwinfo *hw = rvu->hw;
2312 struct nix_txsch *txsch;
2313 struct nix_hw *nix_hw;
2316 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2318 return NIX_AF_ERR_AF_LF_INVALID;
2320 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2322 return NIX_AF_ERR_INVALID_NIXBLK;
2324 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2326 return NIX_AF_ERR_AF_LF_INVALID;
2328 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2329 mutex_lock(&rvu->rsrc_lock);
2330 for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2331 txsch = &nix_hw->txsch[lvl];
2333 if (lvl >= hw->cap.nix_tx_aggr_lvl)
2336 for (schq = 0; schq < txsch->schq.max; schq++) {
2337 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2339 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2340 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2341 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2344 nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2345 nix_get_tx_link(rvu, pcifunc));
2347 /* On PF cleanup, clear cfg done flag as
2348 * PF would have changed default config.
2350 if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2351 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2352 schq = nix_get_tx_link(rvu, pcifunc);
2353 /* Do not clear pcifunc in txsch->pfvf_map[schq] because
2354 * VF might be using this TL1 queue
2356 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2357 txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2361 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2362 for (schq = 0; schq < txsch->schq.max; schq++) {
2363 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2365 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2368 /* Now free scheduler queues to free pool */
2369 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2370 /* TLs above aggregation level are shared across all PF
2371 * and it's VFs, hence skip freeing them.
2373 if (lvl >= hw->cap.nix_tx_aggr_lvl)
2376 txsch = &nix_hw->txsch[lvl];
2377 for (schq = 0; schq < txsch->schq.max; schq++) {
2378 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2380 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2381 rvu_free_rsrc(&txsch->schq, schq);
2382 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2385 mutex_unlock(&rvu->rsrc_lock);
2387 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
2388 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
2389 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
2391 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2396 static int nix_txschq_free_one(struct rvu *rvu,
2397 struct nix_txsch_free_req *req)
2399 struct rvu_hwinfo *hw = rvu->hw;
2400 u16 pcifunc = req->hdr.pcifunc;
2401 int lvl, schq, nixlf, blkaddr;
2402 struct nix_txsch *txsch;
2403 struct nix_hw *nix_hw;
2407 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2409 return NIX_AF_ERR_AF_LF_INVALID;
2411 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2413 return NIX_AF_ERR_INVALID_NIXBLK;
2415 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2417 return NIX_AF_ERR_AF_LF_INVALID;
2419 lvl = req->schq_lvl;
2421 txsch = &nix_hw->txsch[lvl];
2423 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
2426 pfvf_map = txsch->pfvf_map;
2427 mutex_lock(&rvu->rsrc_lock);
2429 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2430 rc = NIX_AF_ERR_TLX_INVALID;
2434 /* Clear SW_XOFF of this resource only.
2435 * For SMQ level, all path XOFF's
2436 * need to be made clear by user
2438 nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2440 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2441 nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2443 /* Flush if it is a SMQ. Onus of disabling
2444 * TL2/3 queue links before SMQ flush is on user
2446 if (lvl == NIX_TXSCH_LVL_SMQ &&
2447 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2448 rc = NIX_AF_SMQ_FLUSH_FAILED;
2452 nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2454 /* Free the resource */
2455 rvu_free_rsrc(&txsch->schq, schq);
2456 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2457 mutex_unlock(&rvu->rsrc_lock);
2460 mutex_unlock(&rvu->rsrc_lock);
2464 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2465 struct nix_txsch_free_req *req,
2466 struct msg_rsp *rsp)
2468 if (req->flags & TXSCHQ_FREE_ALL)
2469 return nix_txschq_free(rvu, req->hdr.pcifunc);
2471 return nix_txschq_free_one(rvu, req);
2474 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2475 int lvl, u64 reg, u64 regval)
2477 u64 regbase = reg & 0xFFFF;
2480 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2483 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2484 /* Check if this schq belongs to this PF/VF or not */
2485 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2488 parent = (regval >> 16) & 0x1FF;
2489 /* Validate MDQ's TL4 parent */
2490 if (regbase == NIX_AF_MDQX_PARENT(0) &&
2491 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2494 /* Validate TL4's TL3 parent */
2495 if (regbase == NIX_AF_TL4X_PARENT(0) &&
2496 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2499 /* Validate TL3's TL2 parent */
2500 if (regbase == NIX_AF_TL3X_PARENT(0) &&
2501 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2504 /* Validate TL2's TL1 parent */
2505 if (regbase == NIX_AF_TL2X_PARENT(0) &&
2506 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2512 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
2516 if (hw->cap.nix_shaping)
2519 /* If shaping and coloring is not supported, then
2520 * *_CIR and *_PIR registers should not be configured.
2522 regbase = reg & 0xFFFF;
2525 case NIX_TXSCH_LVL_TL1:
2526 if (regbase == NIX_AF_TL1X_CIR(0))
2529 case NIX_TXSCH_LVL_TL2:
2530 if (regbase == NIX_AF_TL2X_CIR(0) ||
2531 regbase == NIX_AF_TL2X_PIR(0))
2534 case NIX_TXSCH_LVL_TL3:
2535 if (regbase == NIX_AF_TL3X_CIR(0) ||
2536 regbase == NIX_AF_TL3X_PIR(0))
2539 case NIX_TXSCH_LVL_TL4:
2540 if (regbase == NIX_AF_TL4X_CIR(0) ||
2541 regbase == NIX_AF_TL4X_PIR(0))
2544 case NIX_TXSCH_LVL_MDQ:
2545 if (regbase == NIX_AF_MDQX_CIR(0) ||
2546 regbase == NIX_AF_MDQX_PIR(0))
2553 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
2554 u16 pcifunc, int blkaddr)
2559 schq = nix_get_tx_link(rvu, pcifunc);
2560 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
2561 /* Skip if PF has already done the config */
2562 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
2564 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
2565 (TXSCH_TL1_DFLT_RR_PRIO << 1));
2567 /* On OcteonTx2 the config was in bytes and newer silcons
2568 * it's changed to weight.
2570 if (!rvu->hw->cap.nix_common_dwrr_mtu)
2571 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2572 TXSCH_TL1_DFLT_RR_QTM);
2574 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2575 CN10K_MAX_DWRR_WEIGHT);
2577 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
2578 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
2581 /* Register offset - [15:0]
2582 * Scheduler Queue number - [25:16]
2584 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
2586 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2587 int blkaddr, struct nix_txschq_config *req,
2588 struct nix_txschq_config *rsp)
2590 u16 pcifunc = req->hdr.pcifunc;
2594 for (idx = 0; idx < req->num_regs; idx++) {
2595 reg = req->reg[idx];
2596 reg &= NIX_TX_SCHQ_MASK;
2597 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2598 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2599 !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2600 return NIX_AF_INVAL_TXSCHQ_CFG;
2601 rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2603 rsp->lvl = req->lvl;
2604 rsp->num_regs = req->num_regs;
2608 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
2609 struct nix_txsch *txsch, bool enable)
2611 struct rvu_hwinfo *hw = rvu->hw;
2612 int lbk_link_start, lbk_links;
2613 u8 pf = rvu_get_pf(pcifunc);
2617 if (!is_pf_cgxmapped(rvu, pf))
2620 cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
2621 lbk_link_start = hw->cgx_links;
2623 for (schq = 0; schq < txsch->schq.max; schq++) {
2624 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2626 /* Enable all LBK links with channel 63 by default so that
2627 * packets can be sent to LBK with a NPC TX MCAM rule
2629 lbk_links = hw->lbk_links;
2631 rvu_write64(rvu, blkaddr,
2632 NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2638 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2639 struct nix_txschq_config *req,
2640 struct nix_txschq_config *rsp)
2642 u64 reg, val, regval, schq_regbase, val_mask;
2643 struct rvu_hwinfo *hw = rvu->hw;
2644 u16 pcifunc = req->hdr.pcifunc;
2645 struct nix_txsch *txsch;
2646 struct nix_hw *nix_hw;
2647 int blkaddr, idx, err;
2651 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2652 req->num_regs > MAX_REGS_PER_MBOX_MSG)
2653 return NIX_AF_INVAL_TXSCHQ_CFG;
2655 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2659 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2661 return NIX_AF_ERR_INVALID_NIXBLK;
2664 return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2666 txsch = &nix_hw->txsch[req->lvl];
2667 pfvf_map = txsch->pfvf_map;
2669 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2670 pcifunc & RVU_PFVF_FUNC_MASK) {
2671 mutex_lock(&rvu->rsrc_lock);
2672 if (req->lvl == NIX_TXSCH_LVL_TL1)
2673 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2674 mutex_unlock(&rvu->rsrc_lock);
2678 for (idx = 0; idx < req->num_regs; idx++) {
2679 reg = req->reg[idx];
2680 reg &= NIX_TX_SCHQ_MASK;
2681 regval = req->regval[idx];
2682 schq_regbase = reg & 0xFFFF;
2683 val_mask = req->regval_mask[idx];
2685 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2686 txsch->lvl, reg, regval))
2687 return NIX_AF_INVAL_TXSCHQ_CFG;
2689 /* Check if shaping and coloring is supported */
2690 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2693 val = rvu_read64(rvu, blkaddr, reg);
2694 regval = (val & val_mask) | (regval & ~val_mask);
2696 /* Handle shaping state toggle specially */
2697 if (hw->cap.nix_shaper_toggle_wait &&
2698 handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2699 req->lvl, reg, regval))
2702 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2703 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2704 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2706 regval &= ~(0x7FULL << 24);
2707 regval |= ((u64)nixlf << 24);
2710 /* Clear 'BP_ENA' config, if it's not allowed */
2711 if (!hw->cap.nix_tx_link_bp) {
2712 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2713 (schq_regbase & 0xFF00) ==
2714 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2715 regval &= ~BIT_ULL(13);
2718 /* Mark config as done for TL1 by PF */
2719 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2720 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2721 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2722 mutex_lock(&rvu->rsrc_lock);
2723 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2724 NIX_TXSCHQ_CFG_DONE);
2725 mutex_unlock(&rvu->rsrc_lock);
2728 /* SMQ flush is special hence split register writes such
2729 * that flush first and write rest of the bits later.
2731 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2732 (regval & BIT_ULL(49))) {
2733 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2734 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2735 regval &= ~BIT_ULL(49);
2737 rvu_write64(rvu, blkaddr, reg, regval);
2743 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2744 struct nix_vtag_config *req)
2746 u64 regval = req->vtag_size;
2748 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2749 req->vtag_size > VTAGSIZE_T8)
2752 /* RX VTAG Type 7 reserved for vf vlan */
2753 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2754 return NIX_AF_ERR_RX_VTAG_INUSE;
2756 if (req->rx.capture_vtag)
2757 regval |= BIT_ULL(5);
2758 if (req->rx.strip_vtag)
2759 regval |= BIT_ULL(4);
2761 rvu_write64(rvu, blkaddr,
2762 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2766 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2767 u16 pcifunc, int index)
2769 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2770 struct nix_txvlan *vlan;
2773 return NIX_AF_ERR_INVALID_NIXBLK;
2775 vlan = &nix_hw->txvlan;
2776 if (vlan->entry2pfvf_map[index] != pcifunc)
2777 return NIX_AF_ERR_PARAM;
2779 rvu_write64(rvu, blkaddr,
2780 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2781 rvu_write64(rvu, blkaddr,
2782 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2784 vlan->entry2pfvf_map[index] = 0;
2785 rvu_free_rsrc(&vlan->rsrc, index);
2790 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2792 struct nix_txvlan *vlan;
2793 struct nix_hw *nix_hw;
2796 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2800 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2804 vlan = &nix_hw->txvlan;
2806 mutex_lock(&vlan->rsrc_lock);
2807 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2808 for (index = 0; index < vlan->rsrc.max; index++) {
2809 if (vlan->entry2pfvf_map[index] == pcifunc)
2810 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2812 mutex_unlock(&vlan->rsrc_lock);
2815 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2818 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2819 struct nix_txvlan *vlan;
2824 return NIX_AF_ERR_INVALID_NIXBLK;
2826 vlan = &nix_hw->txvlan;
2828 mutex_lock(&vlan->rsrc_lock);
2830 index = rvu_alloc_rsrc(&vlan->rsrc);
2832 mutex_unlock(&vlan->rsrc_lock);
2836 mutex_unlock(&vlan->rsrc_lock);
2838 regval = size ? vtag : vtag << 32;
2840 rvu_write64(rvu, blkaddr,
2841 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2842 rvu_write64(rvu, blkaddr,
2843 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2848 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2849 struct nix_vtag_config *req)
2851 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2852 u16 pcifunc = req->hdr.pcifunc;
2853 int idx0 = req->tx.vtag0_idx;
2854 int idx1 = req->tx.vtag1_idx;
2855 struct nix_txvlan *vlan;
2859 return NIX_AF_ERR_INVALID_NIXBLK;
2861 vlan = &nix_hw->txvlan;
2862 if (req->tx.free_vtag0 && req->tx.free_vtag1)
2863 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2864 vlan->entry2pfvf_map[idx1] != pcifunc)
2865 return NIX_AF_ERR_PARAM;
2867 mutex_lock(&vlan->rsrc_lock);
2869 if (req->tx.free_vtag0) {
2870 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2875 if (req->tx.free_vtag1)
2876 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2879 mutex_unlock(&vlan->rsrc_lock);
2883 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2884 struct nix_vtag_config *req,
2885 struct nix_vtag_config_rsp *rsp)
2887 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2888 struct nix_txvlan *vlan;
2889 u16 pcifunc = req->hdr.pcifunc;
2892 return NIX_AF_ERR_INVALID_NIXBLK;
2894 vlan = &nix_hw->txvlan;
2895 if (req->tx.cfg_vtag0) {
2897 nix_tx_vtag_alloc(rvu, blkaddr,
2898 req->tx.vtag0, req->vtag_size);
2900 if (rsp->vtag0_idx < 0)
2901 return NIX_AF_ERR_TX_VTAG_NOSPC;
2903 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2906 if (req->tx.cfg_vtag1) {
2908 nix_tx_vtag_alloc(rvu, blkaddr,
2909 req->tx.vtag1, req->vtag_size);
2911 if (rsp->vtag1_idx < 0)
2914 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2920 if (req->tx.cfg_vtag0)
2921 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2923 return NIX_AF_ERR_TX_VTAG_NOSPC;
2926 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2927 struct nix_vtag_config *req,
2928 struct nix_vtag_config_rsp *rsp)
2930 u16 pcifunc = req->hdr.pcifunc;
2931 int blkaddr, nixlf, err;
2933 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2937 if (req->cfg_type) {
2938 /* rx vtag configuration */
2939 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2941 return NIX_AF_ERR_PARAM;
2943 /* tx vtag configuration */
2944 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2945 (req->tx.free_vtag0 || req->tx.free_vtag1))
2946 return NIX_AF_ERR_PARAM;
2948 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2949 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2951 if (req->tx.free_vtag0 || req->tx.free_vtag1)
2952 return nix_tx_vtag_decfg(rvu, blkaddr, req);
2958 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2959 int mce, u8 op, u16 pcifunc, int next, bool eol)
2961 struct nix_aq_enq_req aq_req;
2964 aq_req.hdr.pcifunc = 0;
2965 aq_req.ctype = NIX_AQ_CTYPE_MCE;
2969 /* Use RSS with RSS index 0 */
2971 aq_req.mce.index = 0;
2972 aq_req.mce.eol = eol;
2973 aq_req.mce.pf_func = pcifunc;
2974 aq_req.mce.next = next;
2976 /* All fields valid */
2977 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2979 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2981 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2982 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2988 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2989 u16 pcifunc, bool add)
2991 struct mce *mce, *tail = NULL;
2992 bool delete = false;
2994 /* Scan through the current list */
2995 hlist_for_each_entry(mce, &mce_list->head, node) {
2996 /* If already exists, then delete */
2997 if (mce->pcifunc == pcifunc && !add) {
3000 } else if (mce->pcifunc == pcifunc && add) {
3001 /* entry already exists */
3008 hlist_del(&mce->node);
3017 /* Add a new one to the list, at the tail */
3018 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3021 mce->pcifunc = pcifunc;
3023 hlist_add_head(&mce->node, &mce_list->head);
3025 hlist_add_behind(&mce->node, &tail->node);
3030 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
3031 struct nix_mce_list *mce_list,
3032 int mce_idx, int mcam_index, bool add)
3034 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
3035 struct npc_mcam *mcam = &rvu->hw->mcam;
3036 struct nix_mcast *mcast;
3037 struct nix_hw *nix_hw;
3043 /* Get this PF/VF func's MCE index */
3044 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
3046 if (idx > (mce_idx + mce_list->max)) {
3048 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
3049 __func__, idx, mce_list->max,
3050 pcifunc >> RVU_PFVF_PF_SHIFT);
3054 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
3058 mcast = &nix_hw->mcast;
3059 mutex_lock(&mcast->mce_lock);
3061 err = nix_update_mce_list_entry(mce_list, pcifunc, add);
3065 /* Disable MCAM entry in NPC */
3066 if (!mce_list->count) {
3067 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3068 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
3072 /* Dump the updated list to HW */
3074 last_idx = idx + mce_list->count - 1;
3075 hlist_for_each_entry(mce, &mce_list->head, node) {
3080 /* EOL should be set in last MCE */
3081 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3082 mce->pcifunc, next_idx,
3083 (next_idx > last_idx) ? true : false);
3090 mutex_unlock(&mcast->mce_lock);
3094 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
3095 struct nix_mce_list **mce_list, int *mce_idx)
3097 struct rvu_hwinfo *hw = rvu->hw;
3098 struct rvu_pfvf *pfvf;
3100 if (!hw->cap.nix_rx_multicast ||
3101 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
3107 /* Get this PF/VF func's MCE index */
3108 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
3110 if (type == NIXLF_BCAST_ENTRY) {
3111 *mce_list = &pfvf->bcast_mce_list;
3112 *mce_idx = pfvf->bcast_mce_idx;
3113 } else if (type == NIXLF_ALLMULTI_ENTRY) {
3114 *mce_list = &pfvf->mcast_mce_list;
3115 *mce_idx = pfvf->mcast_mce_idx;
3116 } else if (type == NIXLF_PROMISC_ENTRY) {
3117 *mce_list = &pfvf->promisc_mce_list;
3118 *mce_idx = pfvf->promisc_mce_idx;
3125 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
3128 int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
3129 struct npc_mcam *mcam = &rvu->hw->mcam;
3130 struct rvu_hwinfo *hw = rvu->hw;
3131 struct nix_mce_list *mce_list;
3134 /* skip multicast pkt replication for AF's VFs & SDP links */
3135 if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
3138 if (!hw->cap.nix_rx_multicast)
3141 pf = rvu_get_pf(pcifunc);
3142 if (!is_pf_cgxmapped(rvu, pf))
3145 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3149 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
3153 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
3155 mcam_index = npc_get_nixlf_mcam_index(mcam,
3156 pcifunc & ~RVU_PFVF_FUNC_MASK,
3158 err = nix_update_mce_list(rvu, pcifunc, mce_list,
3159 mce_idx, mcam_index, add);
3163 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
3165 struct nix_mcast *mcast = &nix_hw->mcast;
3166 int err, pf, numvfs, idx;
3167 struct rvu_pfvf *pfvf;
3171 /* Skip PF0 (i.e AF) */
3172 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
3173 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3174 /* If PF is not enabled, nothing to do */
3175 if (!((cfg >> 20) & 0x01))
3177 /* Get numVFs attached to this PF */
3178 numvfs = (cfg >> 12) & 0xFF;
3180 pfvf = &rvu->pf[pf];
3182 /* This NIX0/1 block mapped to PF ? */
3183 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
3186 /* save start idx of broadcast mce list */
3187 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3188 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
3190 /* save start idx of multicast mce list */
3191 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3192 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
3194 /* save the start idx of promisc mce list */
3195 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3196 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
3198 for (idx = 0; idx < (numvfs + 1); idx++) {
3199 /* idx-0 is for PF, followed by VFs */
3200 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
3202 /* Add dummy entries now, so that we don't have to check
3203 * for whether AQ_OP should be INIT/WRITE later on.
3204 * Will be updated when a NIXLF is attached/detached to
3207 err = nix_blk_setup_mce(rvu, nix_hw,
3208 pfvf->bcast_mce_idx + idx,
3214 /* add dummy entries to multicast mce list */
3215 err = nix_blk_setup_mce(rvu, nix_hw,
3216 pfvf->mcast_mce_idx + idx,
3222 /* add dummy entries to promisc mce list */
3223 err = nix_blk_setup_mce(rvu, nix_hw,
3224 pfvf->promisc_mce_idx + idx,
3234 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3236 struct nix_mcast *mcast = &nix_hw->mcast;
3237 struct rvu_hwinfo *hw = rvu->hw;
3240 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
3241 size = (1ULL << size);
3243 /* Alloc memory for multicast/mirror replication entries */
3244 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
3245 (256UL << MC_TBL_SIZE), size);
3249 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
3250 (u64)mcast->mce_ctx->iova);
3252 /* Set max list length equal to max no of VFs per PF + PF itself */
3253 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
3254 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
3256 /* Alloc memory for multicast replication buffers */
3257 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
3258 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
3259 (8UL << MC_BUF_CNT), size);
3263 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
3264 (u64)mcast->mcast_buf->iova);
3266 /* Alloc pkind for NIX internal RX multicast/mirror replay */
3267 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
3269 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
3270 BIT_ULL(63) | (mcast->replay_pkind << 24) |
3271 BIT_ULL(20) | MC_BUF_CNT);
3273 mutex_init(&mcast->mce_lock);
3275 return nix_setup_mce_tables(rvu, nix_hw);
3278 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
3280 struct nix_txvlan *vlan = &nix_hw->txvlan;
3283 /* Allocate resource bimap for tx vtag def registers*/
3284 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
3285 err = rvu_alloc_bitmap(&vlan->rsrc);
3289 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
3290 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
3291 sizeof(u16), GFP_KERNEL);
3292 if (!vlan->entry2pfvf_map)
3295 mutex_init(&vlan->rsrc_lock);
3299 kfree(vlan->rsrc.bmap);
3303 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3305 struct nix_txsch *txsch;
3309 /* Get scheduler queue count of each type and alloc
3310 * bitmap for each for alloc/free/attach operations.
3312 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3313 txsch = &nix_hw->txsch[lvl];
3316 case NIX_TXSCH_LVL_SMQ:
3317 reg = NIX_AF_MDQ_CONST;
3319 case NIX_TXSCH_LVL_TL4:
3320 reg = NIX_AF_TL4_CONST;
3322 case NIX_TXSCH_LVL_TL3:
3323 reg = NIX_AF_TL3_CONST;
3325 case NIX_TXSCH_LVL_TL2:
3326 reg = NIX_AF_TL2_CONST;
3328 case NIX_TXSCH_LVL_TL1:
3329 reg = NIX_AF_TL1_CONST;
3332 cfg = rvu_read64(rvu, blkaddr, reg);
3333 txsch->schq.max = cfg & 0xFFFF;
3334 err = rvu_alloc_bitmap(&txsch->schq);
3338 /* Allocate memory for scheduler queues to
3339 * PF/VF pcifunc mapping info.
3341 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
3342 sizeof(u32), GFP_KERNEL);
3343 if (!txsch->pfvf_map)
3345 for (schq = 0; schq < txsch->schq.max; schq++)
3346 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3349 /* Setup a default value of 8192 as DWRR MTU */
3350 if (rvu->hw->cap.nix_common_dwrr_mtu ||
3351 rvu->hw->cap.nix_multiple_dwrr_mtu) {
3352 rvu_write64(rvu, blkaddr,
3353 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
3354 convert_bytes_to_dwrr_mtu(8192));
3355 rvu_write64(rvu, blkaddr,
3356 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
3357 convert_bytes_to_dwrr_mtu(8192));
3358 rvu_write64(rvu, blkaddr,
3359 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
3360 convert_bytes_to_dwrr_mtu(8192));
3366 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3367 int blkaddr, u32 cfg)
3371 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3372 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3375 if (fmt_idx >= nix_hw->mark_format.total)
3378 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3379 nix_hw->mark_format.cfg[fmt_idx] = cfg;
3380 nix_hw->mark_format.in_use++;
3384 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3388 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
3389 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
3390 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
3391 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
3392 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
3393 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
3394 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
3395 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
3396 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3401 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3402 nix_hw->mark_format.total = (u8)total;
3403 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3405 if (!nix_hw->mark_format.cfg)
3407 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3408 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3410 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3417 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3419 /* CN10K supports LBK FIFO size 72 KB */
3420 if (rvu->hw->lbk_bufsize == 0x12000)
3421 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
3423 *max_mtu = NIC_HW_MAX_FRS;
3426 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3428 int fifo_size = rvu_cgx_get_fifolen(rvu);
3430 /* RPM supports FIFO len 128 KB and RPM2 supports double the
3431 * FIFO len to accommodate 8 LMACS
3433 if (fifo_size == 0x20000 || fifo_size == 0x40000)
3434 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
3436 *max_mtu = NIC_HW_MAX_FRS;
3439 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
3440 struct nix_hw_info *rsp)
3442 u16 pcifunc = req->hdr.pcifunc;
3446 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3448 return NIX_AF_ERR_AF_LF_INVALID;
3450 if (is_afvf(pcifunc))
3451 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
3453 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
3455 rsp->min_mtu = NIC_HW_MIN_FRS;
3457 if (!rvu->hw->cap.nix_common_dwrr_mtu &&
3458 !rvu->hw->cap.nix_multiple_dwrr_mtu) {
3459 /* Return '1' on OTx2 */
3460 rsp->rpm_dwrr_mtu = 1;
3461 rsp->sdp_dwrr_mtu = 1;
3462 rsp->lbk_dwrr_mtu = 1;
3466 /* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
3467 dwrr_mtu = rvu_read64(rvu, blkaddr,
3468 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
3469 rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3471 dwrr_mtu = rvu_read64(rvu, blkaddr,
3472 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
3473 rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3475 dwrr_mtu = rvu_read64(rvu, blkaddr,
3476 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
3477 rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3482 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
3483 struct msg_rsp *rsp)
3485 u16 pcifunc = req->hdr.pcifunc;
3486 int i, nixlf, blkaddr, err;
3489 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3493 /* Get stats count supported by HW */
3494 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3496 /* Reset tx stats */
3497 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
3498 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
3500 /* Reset rx stats */
3501 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
3502 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
3507 /* Returns the ALG index to be set into NPC_RX_ACTION */
3508 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3512 /* Scan over exiting algo entries to find a match */
3513 for (i = 0; i < nix_hw->flowkey.in_use; i++)
3514 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
3520 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
3522 int idx, nr_field, key_off, field_marker, keyoff_marker;
3523 int max_key_off, max_bit_pos, group_member;
3524 struct nix_rx_flowkey_alg *field;
3525 struct nix_rx_flowkey_alg tmp;
3526 u32 key_type, valid_key;
3528 int l4_key_offset = 0;
3533 #define FIELDS_PER_ALG 5
3534 #define MAX_KEY_OFF 40
3535 /* Clear all fields */
3536 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3538 /* Each of the 32 possible flow key algorithm definitions should
3539 * fall into above incremental config (except ALG0). Otherwise a
3540 * single NPC MCAM entry is not sufficient for supporting RSS.
3542 * If a different definition or combination needed then NPC MCAM
3543 * has to be programmed to filter such pkts and it's action should
3544 * point to this definition to calculate flowtag or hash.
3546 * The `for loop` goes over _all_ protocol field and the following
3547 * variables depicts the state machine forward progress logic.
3549 * keyoff_marker - Enabled when hash byte length needs to be accounted
3550 * in field->key_offset update.
3551 * field_marker - Enabled when a new field needs to be selected.
3552 * group_member - Enabled when protocol is part of a group.
3555 /* Last 4 bits (31:28) are reserved to specify SRC, DST
3556 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
3557 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
3558 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
3560 l3_l4_src_dst = flow_cfg;
3561 /* Reset these 4 bits, so that these won't be part of key */
3562 flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
3564 keyoff_marker = 0; max_key_off = 0; group_member = 0;
3565 nr_field = 0; key_off = 0; field_marker = 1;
3566 field = &tmp; max_bit_pos = fls(flow_cfg);
3568 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
3569 key_off < MAX_KEY_OFF; idx++) {
3570 key_type = BIT(idx);
3571 valid_key = flow_cfg & key_type;
3572 /* Found a field marker, reset the field values */
3574 memset(&tmp, 0, sizeof(tmp));
3576 field_marker = true;
3577 keyoff_marker = true;
3579 case NIX_FLOW_KEY_TYPE_PORT:
3580 field->sel_chan = true;
3581 /* This should be set to 1, when SEL_CHAN is set */
3584 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
3585 field->lid = NPC_LID_LC;
3586 field->hdr_offset = 9; /* offset */
3587 field->bytesm1 = 0; /* 1 byte */
3588 field->ltype_match = NPC_LT_LC_IP;
3589 field->ltype_mask = 0xF;
3591 case NIX_FLOW_KEY_TYPE_IPV4:
3592 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
3593 field->lid = NPC_LID_LC;
3594 field->ltype_match = NPC_LT_LC_IP;
3595 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
3596 field->lid = NPC_LID_LG;
3597 field->ltype_match = NPC_LT_LG_TU_IP;
3599 field->hdr_offset = 12; /* SIP offset */
3600 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
3603 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
3604 field->bytesm1 = 3; /* SIP, 4 bytes */
3606 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
3607 /* Both SIP + DIP */
3608 if (field->bytesm1 == 3) {
3609 field->bytesm1 = 7; /* SIP + DIP, 8B */
3612 field->hdr_offset = 16; /* DIP off */
3613 field->bytesm1 = 3; /* DIP, 4 bytes */
3617 field->ltype_mask = 0xF; /* Match only IPv4 */
3618 keyoff_marker = false;
3620 case NIX_FLOW_KEY_TYPE_IPV6:
3621 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
3622 field->lid = NPC_LID_LC;
3623 field->ltype_match = NPC_LT_LC_IP6;
3624 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
3625 field->lid = NPC_LID_LG;
3626 field->ltype_match = NPC_LT_LG_TU_IP6;
3628 field->hdr_offset = 8; /* SIP offset */
3629 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
3632 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
3633 field->bytesm1 = 15; /* SIP, 16 bytes */
3635 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
3636 /* Both SIP + DIP */
3637 if (field->bytesm1 == 15) {
3638 /* SIP + DIP, 32 bytes */
3639 field->bytesm1 = 31;
3642 field->hdr_offset = 24; /* DIP off */
3643 field->bytesm1 = 15; /* DIP,16 bytes */
3646 field->ltype_mask = 0xF; /* Match only IPv6 */
3648 case NIX_FLOW_KEY_TYPE_TCP:
3649 case NIX_FLOW_KEY_TYPE_UDP:
3650 case NIX_FLOW_KEY_TYPE_SCTP:
3651 case NIX_FLOW_KEY_TYPE_INNR_TCP:
3652 case NIX_FLOW_KEY_TYPE_INNR_UDP:
3653 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
3654 field->lid = NPC_LID_LD;
3655 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
3656 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
3657 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
3658 field->lid = NPC_LID_LH;
3659 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
3661 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
3662 field->bytesm1 = 1; /* SRC, 2 bytes */
3664 if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
3665 /* Both SRC + DST */
3666 if (field->bytesm1 == 1) {
3667 /* SRC + DST, 4 bytes */
3671 field->hdr_offset = 2; /* DST off */
3672 field->bytesm1 = 1; /* DST, 2 bytes */
3676 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
3677 * so no need to change the ltype_match, just change
3678 * the lid for inner protocols
3680 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
3681 (int)NPC_LT_LH_TU_TCP);
3682 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
3683 (int)NPC_LT_LH_TU_UDP);
3684 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
3685 (int)NPC_LT_LH_TU_SCTP);
3687 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
3688 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
3690 field->ltype_match |= NPC_LT_LD_TCP;
3691 group_member = true;
3692 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
3693 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
3695 field->ltype_match |= NPC_LT_LD_UDP;
3696 group_member = true;
3697 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3698 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
3700 field->ltype_match |= NPC_LT_LD_SCTP;
3701 group_member = true;
3703 field->ltype_mask = ~field->ltype_match;
3704 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3705 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
3706 /* Handle the case where any of the group item
3707 * is enabled in the group but not the final one
3711 group_member = false;
3714 field_marker = false;
3715 keyoff_marker = false;
3718 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
3719 * remember the TCP key offset of 40 byte hash key.
3721 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
3722 l4_key_offset = key_off;
3724 case NIX_FLOW_KEY_TYPE_NVGRE:
3725 field->lid = NPC_LID_LD;
3726 field->hdr_offset = 4; /* VSID offset */
3728 field->ltype_match = NPC_LT_LD_NVGRE;
3729 field->ltype_mask = 0xF;
3731 case NIX_FLOW_KEY_TYPE_VXLAN:
3732 case NIX_FLOW_KEY_TYPE_GENEVE:
3733 field->lid = NPC_LID_LE;
3735 field->hdr_offset = 4;
3736 field->ltype_mask = 0xF;
3737 field_marker = false;
3738 keyoff_marker = false;
3740 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
3741 field->ltype_match |= NPC_LT_LE_VXLAN;
3742 group_member = true;
3745 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
3746 field->ltype_match |= NPC_LT_LE_GENEVE;
3747 group_member = true;
3750 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
3752 field->ltype_mask = ~field->ltype_match;
3753 field_marker = true;
3754 keyoff_marker = true;
3756 group_member = false;
3760 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
3761 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
3762 field->lid = NPC_LID_LA;
3763 field->ltype_match = NPC_LT_LA_ETHER;
3764 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
3765 field->lid = NPC_LID_LF;
3766 field->ltype_match = NPC_LT_LF_TU_ETHER;
3768 field->hdr_offset = 0;
3769 field->bytesm1 = 5; /* DMAC 6 Byte */
3770 field->ltype_mask = 0xF;
3772 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
3773 field->lid = NPC_LID_LC;
3774 field->hdr_offset = 40; /* IPV6 hdr */
3775 field->bytesm1 = 0; /* 1 Byte ext hdr*/
3776 field->ltype_match = NPC_LT_LC_IP6_EXT;
3777 field->ltype_mask = 0xF;
3779 case NIX_FLOW_KEY_TYPE_GTPU:
3780 field->lid = NPC_LID_LE;
3781 field->hdr_offset = 4;
3782 field->bytesm1 = 3; /* 4 bytes TID*/
3783 field->ltype_match = NPC_LT_LE_GTPU;
3784 field->ltype_mask = 0xF;
3786 case NIX_FLOW_KEY_TYPE_VLAN:
3787 field->lid = NPC_LID_LB;
3788 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
3789 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
3790 field->ltype_match = NPC_LT_LB_CTAG;
3791 field->ltype_mask = 0xF;
3792 field->fn_mask = 1; /* Mask out the first nibble */
3794 case NIX_FLOW_KEY_TYPE_AH:
3795 case NIX_FLOW_KEY_TYPE_ESP:
3796 field->hdr_offset = 0;
3797 field->bytesm1 = 7; /* SPI + sequence number */
3798 field->ltype_mask = 0xF;
3799 field->lid = NPC_LID_LE;
3800 field->ltype_match = NPC_LT_LE_ESP;
3801 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
3802 field->lid = NPC_LID_LD;
3803 field->ltype_match = NPC_LT_LD_AH;
3804 field->hdr_offset = 4;
3805 keyoff_marker = false;
3811 /* Found a valid flow key type */
3813 /* Use the key offset of TCP/UDP/SCTP fields
3814 * for ESP/AH fields.
3816 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
3817 key_type == NIX_FLOW_KEY_TYPE_AH)
3818 key_off = l4_key_offset;
3819 field->key_offset = key_off;
3820 memcpy(&alg[nr_field], field, sizeof(*field));
3821 max_key_off = max(max_key_off, field->bytesm1 + 1);
3823 /* Found a field marker, get the next field */
3828 /* Found a keyoff marker, update the new key_off */
3829 if (keyoff_marker) {
3830 key_off += max_key_off;
3834 /* Processed all the flow key types */
3835 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3838 return NIX_AF_ERR_RSS_NOSPC_FIELD;
3841 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3843 u64 field[FIELDS_PER_ALG];
3847 hw = get_nix_hw(rvu->hw, blkaddr);
3849 return NIX_AF_ERR_INVALID_NIXBLK;
3851 /* No room to add new flow hash algoritham */
3852 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3853 return NIX_AF_ERR_RSS_NOSPC_ALGO;
3855 /* Generate algo fields for the given flow_cfg */
3856 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3860 /* Update ALGX_FIELDX register with generated fields */
3861 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3862 rvu_write64(rvu, blkaddr,
3863 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3866 /* Store the flow_cfg for futher lookup */
3867 rc = hw->flowkey.in_use;
3868 hw->flowkey.flowkey[rc] = flow_cfg;
3869 hw->flowkey.in_use++;
3874 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3875 struct nix_rss_flowkey_cfg *req,
3876 struct nix_rss_flowkey_cfg_rsp *rsp)
3878 u16 pcifunc = req->hdr.pcifunc;
3879 int alg_idx, nixlf, blkaddr;
3880 struct nix_hw *nix_hw;
3883 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3887 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3889 return NIX_AF_ERR_INVALID_NIXBLK;
3891 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3892 /* Failed to get algo index from the exiting list, reserve new */
3894 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3899 rsp->alg_idx = alg_idx;
3900 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3901 alg_idx, req->mcam_index);
3905 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3907 u32 flowkey_cfg, minkey_cfg;
3910 /* Disable all flow key algx fieldx */
3911 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3912 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3913 rvu_write64(rvu, blkaddr,
3914 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3918 /* IPv4/IPv6 SIP/DIPs */
3919 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3920 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3924 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3925 minkey_cfg = flowkey_cfg;
3926 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3927 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3931 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3932 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3933 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3937 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3938 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3939 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3943 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3944 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3945 NIX_FLOW_KEY_TYPE_UDP;
3946 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3950 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3951 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3952 NIX_FLOW_KEY_TYPE_SCTP;
3953 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3957 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3958 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3959 NIX_FLOW_KEY_TYPE_SCTP;
3960 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3964 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3965 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3966 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3967 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3974 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3975 struct nix_set_mac_addr *req,
3976 struct msg_rsp *rsp)
3978 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3979 u16 pcifunc = req->hdr.pcifunc;
3980 int blkaddr, nixlf, err;
3981 struct rvu_pfvf *pfvf;
3983 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3987 pfvf = rvu_get_pfvf(rvu, pcifunc);
3989 /* untrusted VF can't overwrite admin(PF) changes */
3990 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3991 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3993 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3997 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3999 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
4000 pfvf->rx_chan_base, req->mac_addr);
4002 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
4003 ether_addr_copy(pfvf->default_mac, req->mac_addr);
4005 rvu_switch_update_rules(rvu, pcifunc);
4010 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
4011 struct msg_req *req,
4012 struct nix_get_mac_addr_rsp *rsp)
4014 u16 pcifunc = req->hdr.pcifunc;
4015 struct rvu_pfvf *pfvf;
4017 if (!is_nixlf_attached(rvu, pcifunc))
4018 return NIX_AF_ERR_AF_LF_INVALID;
4020 pfvf = rvu_get_pfvf(rvu, pcifunc);
4022 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
4027 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
4028 struct msg_rsp *rsp)
4030 bool allmulti, promisc, nix_rx_multicast;
4031 u16 pcifunc = req->hdr.pcifunc;
4032 struct rvu_pfvf *pfvf;
4035 pfvf = rvu_get_pfvf(rvu, pcifunc);
4036 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
4037 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
4038 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
4040 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
4042 if (is_vf(pcifunc) && !nix_rx_multicast &&
4043 (promisc || allmulti)) {
4044 dev_warn_ratelimited(rvu->dev,
4045 "VF promisc/multicast not supported\n");
4049 /* untrusted VF can't configure promisc/allmulti */
4050 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4051 (promisc || allmulti))
4054 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4058 if (nix_rx_multicast) {
4059 /* add/del this PF_FUNC to/from mcast pkt replication list */
4060 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
4064 "Failed to update pcifunc 0x%x to multicast list\n",
4069 /* add/del this PF_FUNC to/from promisc pkt replication list */
4070 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
4074 "Failed to update pcifunc 0x%x to promisc list\n",
4080 /* install/uninstall allmulti entry */
4082 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
4083 pfvf->rx_chan_base);
4085 if (!nix_rx_multicast)
4086 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
4089 /* install/uninstall promisc entry */
4091 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
4095 if (!nix_rx_multicast)
4096 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
4101 static void nix_find_link_frs(struct rvu *rvu,
4102 struct nix_frs_cfg *req, u16 pcifunc)
4104 int pf = rvu_get_pf(pcifunc);
4105 struct rvu_pfvf *pfvf;
4110 /* Update with requester's min/max lengths */
4111 pfvf = rvu_get_pfvf(rvu, pcifunc);
4112 pfvf->maxlen = req->maxlen;
4113 if (req->update_minlen)
4114 pfvf->minlen = req->minlen;
4116 maxlen = req->maxlen;
4117 minlen = req->update_minlen ? req->minlen : 0;
4119 /* Get this PF's numVFs and starting hwvf */
4120 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
4122 /* For each VF, compare requested max/minlen */
4123 for (vf = 0; vf < numvfs; vf++) {
4124 pfvf = &rvu->hwvf[hwvf + vf];
4125 if (pfvf->maxlen > maxlen)
4126 maxlen = pfvf->maxlen;
4127 if (req->update_minlen &&
4128 pfvf->minlen && pfvf->minlen < minlen)
4129 minlen = pfvf->minlen;
4132 /* Compare requested max/minlen with PF's max/minlen */
4133 pfvf = &rvu->pf[pf];
4134 if (pfvf->maxlen > maxlen)
4135 maxlen = pfvf->maxlen;
4136 if (req->update_minlen &&
4137 pfvf->minlen && pfvf->minlen < minlen)
4138 minlen = pfvf->minlen;
4140 /* Update the request with max/min PF's and it's VF's max/min */
4141 req->maxlen = maxlen;
4142 if (req->update_minlen)
4143 req->minlen = minlen;
4147 nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
4148 u16 pcifunc, u64 tx_credits)
4150 struct rvu_hwinfo *hw = rvu->hw;
4151 int pf = rvu_get_pf(pcifunc);
4152 u8 cgx_id = 0, lmac_id = 0;
4153 unsigned long poll_tmo;
4154 bool restore_tx_en = 0;
4155 struct nix_hw *nix_hw;
4156 u64 cfg, sw_xoff = 0;
4161 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4163 return NIX_AF_ERR_INVALID_NIXBLK;
4165 if (tx_credits == nix_hw->tx_credits[link])
4168 /* Enable cgx tx if disabled for credits to be back */
4169 if (is_pf_cgxmapped(rvu, pf)) {
4170 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
4171 restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
4175 mutex_lock(&rvu->rsrc_lock);
4176 /* Disable new traffic to link */
4177 if (hw->cap.nix_shaping) {
4178 schq = nix_get_tx_link(rvu, pcifunc);
4179 sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
4180 rvu_write64(rvu, blkaddr,
4181 NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
4184 rc = NIX_AF_ERR_LINK_CREDITS;
4185 poll_tmo = jiffies + usecs_to_jiffies(200000);
4186 /* Wait for credits to return */
4188 if (time_after(jiffies, poll_tmo))
4190 usleep_range(100, 200);
4192 cfg = rvu_read64(rvu, blkaddr,
4193 NIX_AF_TX_LINKX_NORM_CREDIT(link));
4194 credits = (cfg >> 12) & 0xFFFFFULL;
4195 } while (credits != nix_hw->tx_credits[link]);
4197 cfg &= ~(0xFFFFFULL << 12);
4198 cfg |= (tx_credits << 12);
4199 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4202 nix_hw->tx_credits[link] = tx_credits;
4205 /* Enable traffic back */
4206 if (hw->cap.nix_shaping && !sw_xoff)
4207 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
4209 /* Restore state of cgx tx */
4211 rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
4213 mutex_unlock(&rvu->rsrc_lock);
4217 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
4218 struct msg_rsp *rsp)
4220 struct rvu_hwinfo *hw = rvu->hw;
4221 u16 pcifunc = req->hdr.pcifunc;
4222 int pf = rvu_get_pf(pcifunc);
4223 int blkaddr, schq, link = -1;
4224 struct nix_txsch *txsch;
4225 u64 cfg, lmac_fifo_len;
4226 struct nix_hw *nix_hw;
4227 struct rvu_pfvf *pfvf;
4228 u8 cgx = 0, lmac = 0;
4231 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4233 return NIX_AF_ERR_AF_LF_INVALID;
4235 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4237 return NIX_AF_ERR_INVALID_NIXBLK;
4239 if (is_afvf(pcifunc))
4240 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
4242 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
4244 if (!req->sdp_link && req->maxlen > max_mtu)
4245 return NIX_AF_ERR_FRS_INVALID;
4247 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
4248 return NIX_AF_ERR_FRS_INVALID;
4250 /* Check if requester wants to update SMQ's */
4251 if (!req->update_smq)
4254 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
4255 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
4256 mutex_lock(&rvu->rsrc_lock);
4257 for (schq = 0; schq < txsch->schq.max; schq++) {
4258 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
4260 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
4261 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
4262 if (req->update_minlen)
4263 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
4264 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
4266 mutex_unlock(&rvu->rsrc_lock);
4269 /* Check if config is for SDP link */
4270 if (req->sdp_link) {
4272 return NIX_AF_ERR_RX_LINK_INVALID;
4273 link = hw->cgx_links + hw->lbk_links;
4277 /* Check if the request is from CGX mapped RVU PF */
4278 if (is_pf_cgxmapped(rvu, pf)) {
4279 /* Get CGX and LMAC to which this PF is mapped and find link */
4280 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
4281 link = (cgx * hw->lmac_per_cgx) + lmac;
4282 } else if (pf == 0) {
4283 /* For VFs of PF0 ingress is LBK port, so config LBK link */
4284 pfvf = rvu_get_pfvf(rvu, pcifunc);
4285 link = hw->cgx_links + pfvf->lbkid;
4289 return NIX_AF_ERR_RX_LINK_INVALID;
4293 nix_find_link_frs(rvu, req, pcifunc);
4295 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
4296 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
4297 if (req->update_minlen)
4298 cfg = (cfg & ~0xFFFFULL) | req->minlen;
4299 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
4301 if (req->sdp_link || pf == 0)
4304 /* Update transmit credits for CGX links */
4305 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
4306 if (!lmac_fifo_len) {
4308 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4309 __func__, cgx, lmac);
4312 return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
4313 (lmac_fifo_len - req->maxlen) / 16);
4316 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
4317 struct msg_rsp *rsp)
4319 int nixlf, blkaddr, err;
4322 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
4326 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
4327 /* Set the interface configuration */
4328 if (req->len_verify & BIT(0))
4331 cfg &= ~BIT_ULL(41);
4333 if (req->len_verify & BIT(1))
4336 cfg &= ~BIT_ULL(40);
4338 if (req->len_verify & NIX_RX_DROP_RE)
4341 cfg &= ~BIT_ULL(32);
4343 if (req->csum_verify & BIT(0))
4346 cfg &= ~BIT_ULL(37);
4348 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
4353 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
4355 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
4358 static void nix_link_config(struct rvu *rvu, int blkaddr,
4359 struct nix_hw *nix_hw)
4361 struct rvu_hwinfo *hw = rvu->hw;
4362 int cgx, lmac_cnt, slink, link;
4363 u16 lbk_max_frs, lmac_max_frs;
4364 unsigned long lmac_bmap;
4365 u64 tx_credits, cfg;
4369 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
4370 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
4372 /* Set default min/max packet lengths allowed on NIX Rx links.
4374 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4375 * as undersize and report them to SW as error pkts, hence
4376 * setting it to 40 bytes.
4378 for (link = 0; link < hw->cgx_links; link++) {
4379 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4380 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
4383 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
4384 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4385 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
4387 if (hw->sdp_links) {
4388 link = hw->cgx_links + hw->lbk_links;
4389 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4390 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
4393 /* Get MCS external bypass status for CN10K-B */
4394 if (mcs_get_blkcnt() == 1) {
4395 /* Adjust for 2 credits when external bypass is disabled */
4396 nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
4399 /* Set credits for Tx links assuming max packet length allowed.
4400 * This will be reconfigured based on MTU set for PF/VF.
4402 for (cgx = 0; cgx < hw->cgx; cgx++) {
4403 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
4404 /* Skip when cgx is not available or lmac cnt is zero */
4407 slink = cgx * hw->lmac_per_cgx;
4409 /* Get LMAC id's from bitmap */
4410 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
4411 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
4412 lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
4413 if (!lmac_fifo_len) {
4415 "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4416 __func__, cgx, iter);
4419 tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
4420 /* Enable credits and set credit pkt count to max allowed */
4421 cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4422 cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
4424 link = iter + slink;
4425 nix_hw->tx_credits[link] = tx_credits;
4426 rvu_write64(rvu, blkaddr,
4427 NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4431 /* Set Tx credits for LBK link */
4432 slink = hw->cgx_links;
4433 for (link = slink; link < (slink + hw->lbk_links); link++) {
4434 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
4435 nix_hw->tx_credits[link] = tx_credits;
4436 /* Enable credits and set credit pkt count to max allowed */
4437 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4438 rvu_write64(rvu, blkaddr,
4439 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
4443 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4448 /* Start X2P bus calibration */
4449 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4450 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4451 /* Wait for calibration to complete */
4452 err = rvu_poll_reg(rvu, blkaddr,
4453 NIX_AF_STATUS, BIT_ULL(10), false);
4455 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4459 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4460 /* Check if CGX devices are ready */
4461 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
4462 /* Skip when cgx port is not available */
4463 if (!rvu_cgx_pdata(idx, rvu) ||
4464 (status & (BIT_ULL(16 + idx))))
4467 "CGX%d didn't respond to NIX X2P calibration\n", idx);
4471 /* Check if LBK is ready */
4472 if (!(status & BIT_ULL(19))) {
4474 "LBK didn't respond to NIX X2P calibration\n");
4478 /* Clear 'calibrate_x2p' bit */
4479 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4480 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4481 if (err || (status & 0x3FFULL))
4483 "NIX X2P calibration failed, status 0x%llx\n", status);
4489 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4494 /* Set admin queue endianness */
4495 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4498 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4501 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4504 /* Do not bypass NDC cache */
4505 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4507 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4508 /* Disable caching of SQB aka SQEs */
4511 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4513 /* Result structure can be followed by RQ/SQ/CQ context at
4514 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4515 * operation type. Alloc sufficient result memory for all operations.
4517 err = rvu_aq_alloc(rvu, &block->aq,
4518 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4519 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4523 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4524 rvu_write64(rvu, block->addr,
4525 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4529 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
4531 struct rvu_hwinfo *hw = rvu->hw;
4534 hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4536 /* On OcteonTx2 DWRR quantum is directly configured into each of
4537 * the transmit scheduler queues. And PF/VF drivers were free to
4538 * config any value upto 2^24.
4539 * On CN10K, HW is modified, the quantum configuration at scheduler
4540 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4541 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4542 * 'DWRR MTU * weight' to get the quantum.
4544 * Check if HW uses a common MTU for all DWRR quantum configs.
4545 * On OcteonTx2 this register field is '0'.
4547 if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
4548 hw->cap.nix_common_dwrr_mtu = true;
4550 if (hw_const & BIT_ULL(61))
4551 hw->cap.nix_multiple_dwrr_mtu = true;
4554 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4556 const struct npc_lt_def_cfg *ltdefs;
4557 struct rvu_hwinfo *hw = rvu->hw;
4558 int blkaddr = nix_hw->blkaddr;
4559 struct rvu_block *block;
4563 block = &hw->block[blkaddr];
4565 if (is_rvu_96xx_B0(rvu)) {
4566 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4567 * internal state when conditional clocks are turned off.
4568 * Hence enable them.
4570 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4571 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4573 /* Set chan/link to backpressure TL3 instead of TL2 */
4574 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
4576 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
4577 * This sticky mode is known to cause SQ stalls when multiple
4578 * SQs are mapped to same SMQ and transmitting pkts at a time.
4580 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
4581 cfg &= ~BIT_ULL(15);
4582 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4585 ltdefs = rvu->kpu.lt_def;
4586 /* Calibrate X2P bus to check if CGX/LBK links are fine */
4587 err = nix_calibrate_x2p(rvu, blkaddr);
4591 /* Setup capabilities of the NIX block */
4592 rvu_nix_setup_capabilities(rvu, blkaddr);
4594 /* Initialize admin queue */
4595 err = nix_aq_init(rvu, block);
4599 /* Restore CINT timer delay to HW reset values */
4600 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4602 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
4604 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4606 if (!is_rvu_otx2(rvu))
4607 cfg |= NIX_PTP_1STEP_EN;
4609 rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
4611 if (!is_rvu_otx2(rvu))
4612 rvu_nix_block_cn10k_init(rvu, nix_hw);
4614 if (is_block_implemented(hw, blkaddr)) {
4615 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4619 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4623 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4627 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
4631 err = nix_setup_txvlan(rvu, nix_hw);
4635 /* Configure segmentation offload formats */
4636 nix_setup_lso(rvu, nix_hw, blkaddr);
4638 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
4639 * This helps HW protocol checker to identify headers
4640 * and validate length and checksums.
4642 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
4643 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
4644 ltdefs->rx_ol2.ltype_mask);
4645 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
4646 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
4647 ltdefs->rx_oip4.ltype_mask);
4648 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
4649 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
4650 ltdefs->rx_iip4.ltype_mask);
4651 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
4652 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
4653 ltdefs->rx_oip6.ltype_mask);
4654 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
4655 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
4656 ltdefs->rx_iip6.ltype_mask);
4657 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
4658 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
4659 ltdefs->rx_otcp.ltype_mask);
4660 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
4661 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
4662 ltdefs->rx_itcp.ltype_mask);
4663 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
4664 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
4665 ltdefs->rx_oudp.ltype_mask);
4666 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
4667 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
4668 ltdefs->rx_iudp.ltype_mask);
4669 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
4670 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
4671 ltdefs->rx_osctp.ltype_mask);
4672 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
4673 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
4674 ltdefs->rx_isctp.ltype_mask);
4676 if (!is_rvu_otx2(rvu)) {
4677 /* Enable APAD calculation for other protocols
4678 * matching APAD0 and APAD1 lt def registers.
4680 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
4681 (ltdefs->rx_apad0.valid << 11) |
4682 (ltdefs->rx_apad0.lid << 8) |
4683 (ltdefs->rx_apad0.ltype_match << 4) |
4684 ltdefs->rx_apad0.ltype_mask);
4685 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
4686 (ltdefs->rx_apad1.valid << 11) |
4687 (ltdefs->rx_apad1.lid << 8) |
4688 (ltdefs->rx_apad1.ltype_match << 4) |
4689 ltdefs->rx_apad1.ltype_mask);
4691 /* Receive ethertype defination register defines layer
4692 * information in NPC_RESULT_S to identify the Ethertype
4693 * location in L2 header. Used for Ethertype overwriting
4694 * in inline IPsec flow.
4696 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
4697 (ltdefs->rx_et[0].offset << 12) |
4698 (ltdefs->rx_et[0].valid << 11) |
4699 (ltdefs->rx_et[0].lid << 8) |
4700 (ltdefs->rx_et[0].ltype_match << 4) |
4701 ltdefs->rx_et[0].ltype_mask);
4702 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
4703 (ltdefs->rx_et[1].offset << 12) |
4704 (ltdefs->rx_et[1].valid << 11) |
4705 (ltdefs->rx_et[1].lid << 8) |
4706 (ltdefs->rx_et[1].ltype_match << 4) |
4707 ltdefs->rx_et[1].ltype_mask);
4710 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
4714 nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
4715 sizeof(u64), GFP_KERNEL);
4716 if (!nix_hw->tx_credits)
4719 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
4720 nix_link_config(rvu, blkaddr, nix_hw);
4722 /* Enable Channel backpressure */
4723 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
4728 int rvu_nix_init(struct rvu *rvu)
4730 struct rvu_hwinfo *hw = rvu->hw;
4731 struct nix_hw *nix_hw;
4732 int blkaddr = 0, err;
4735 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
4740 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4742 nix_hw = &hw->nix[i];
4744 nix_hw->blkaddr = blkaddr;
4745 err = rvu_nix_block_init(rvu, nix_hw);
4748 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4755 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
4756 struct rvu_block *block)
4758 struct nix_txsch *txsch;
4759 struct nix_mcast *mcast;
4760 struct nix_txvlan *vlan;
4761 struct nix_hw *nix_hw;
4764 rvu_aq_free(rvu, block->aq);
4766 if (is_block_implemented(rvu->hw, blkaddr)) {
4767 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4771 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
4772 txsch = &nix_hw->txsch[lvl];
4773 kfree(txsch->schq.bmap);
4776 kfree(nix_hw->tx_credits);
4778 nix_ipolicer_freemem(rvu, nix_hw);
4780 vlan = &nix_hw->txvlan;
4781 kfree(vlan->rsrc.bmap);
4782 mutex_destroy(&vlan->rsrc_lock);
4784 mcast = &nix_hw->mcast;
4785 qmem_free(rvu->dev, mcast->mce_ctx);
4786 qmem_free(rvu->dev, mcast->mcast_buf);
4787 mutex_destroy(&mcast->mce_lock);
4791 void rvu_nix_freemem(struct rvu *rvu)
4793 struct rvu_hwinfo *hw = rvu->hw;
4794 struct rvu_block *block;
4797 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4799 block = &hw->block[blkaddr];
4800 rvu_nix_block_freemem(rvu, blkaddr, block);
4801 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4805 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
4806 struct msg_rsp *rsp)
4808 u16 pcifunc = req->hdr.pcifunc;
4809 struct rvu_pfvf *pfvf;
4812 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4816 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
4818 npc_mcam_enable_flows(rvu, pcifunc);
4820 pfvf = rvu_get_pfvf(rvu, pcifunc);
4821 set_bit(NIXLF_INITIALIZED, &pfvf->flags);
4823 rvu_switch_update_rules(rvu, pcifunc);
4825 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
4828 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
4829 struct msg_rsp *rsp)
4831 u16 pcifunc = req->hdr.pcifunc;
4832 struct rvu_pfvf *pfvf;
4835 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4839 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4841 pfvf = rvu_get_pfvf(rvu, pcifunc);
4842 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4844 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
4847 #define RX_SA_BASE GENMASK_ULL(52, 7)
4849 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
4851 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
4852 struct hwctx_disable_req ctx_req;
4853 int pf = rvu_get_pf(pcifunc);
4854 struct mac_ops *mac_ops;
4860 ctx_req.hdr.pcifunc = pcifunc;
4862 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
4863 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4864 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
4865 nix_interface_deinit(rvu, pcifunc, nixlf);
4866 nix_rx_sync(rvu, blkaddr);
4867 nix_txschq_free(rvu, pcifunc);
4869 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4871 rvu_cgx_start_stop_io(rvu, pcifunc, false);
4874 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
4875 err = nix_lf_hwctx_disable(rvu, &ctx_req);
4877 dev_err(rvu->dev, "SQ ctx disable failed\n");
4881 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
4882 err = nix_lf_hwctx_disable(rvu, &ctx_req);
4884 dev_err(rvu->dev, "RQ ctx disable failed\n");
4888 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
4889 err = nix_lf_hwctx_disable(rvu, &ctx_req);
4891 dev_err(rvu->dev, "CQ ctx disable failed\n");
4894 /* reset HW config done for Switch headers */
4895 rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
4896 (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
4898 /* Disabling CGX and NPC config done for PTP */
4899 if (pfvf->hw_rx_tstamp_en) {
4900 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
4901 cgxd = rvu_cgx_pdata(cgx_id, rvu);
4902 mac_ops = get_mac_ops(cgxd);
4903 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
4904 /* Undo NPC config done for PTP */
4905 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
4906 dev_err(rvu->dev, "NPC config for PTP failed\n");
4907 pfvf->hw_rx_tstamp_en = false;
4910 /* reset priority flow control config */
4911 rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
4913 /* reset 802.3x flow control config */
4914 rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
4916 nix_ctx_free(rvu, pfvf);
4918 nix_free_all_bandprof(rvu, pcifunc);
4920 sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
4921 if (FIELD_GET(RX_SA_BASE, sa_base)) {
4922 err = rvu_cpt_ctx_flush(rvu, pcifunc);
4925 "CPT ctx flush failed with error: %d\n", err);
4929 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
4931 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
4933 struct rvu_hwinfo *hw = rvu->hw;
4934 struct rvu_block *block;
4939 pf = rvu_get_pf(pcifunc);
4940 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
4943 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4945 return NIX_AF_ERR_AF_LF_INVALID;
4947 block = &hw->block[blkaddr];
4948 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
4950 return NIX_AF_ERR_AF_LF_INVALID;
4952 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
4955 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
4957 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
4959 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
4964 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
4965 struct msg_rsp *rsp)
4967 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
4970 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
4971 struct msg_rsp *rsp)
4973 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
4976 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
4977 struct nix_lso_format_cfg *req,
4978 struct nix_lso_format_cfg_rsp *rsp)
4980 u16 pcifunc = req->hdr.pcifunc;
4981 struct nix_hw *nix_hw;
4982 struct rvu_pfvf *pfvf;
4983 int blkaddr, idx, f;
4986 pfvf = rvu_get_pfvf(rvu, pcifunc);
4987 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4988 if (!pfvf->nixlf || blkaddr < 0)
4989 return NIX_AF_ERR_AF_LF_INVALID;
4991 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4993 return NIX_AF_ERR_INVALID_NIXBLK;
4995 /* Find existing matching LSO format, if any */
4996 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
4997 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
4998 reg = rvu_read64(rvu, blkaddr,
4999 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
5000 if (req->fields[f] != (reg & req->field_mask))
5004 if (f == NIX_LSO_FIELD_MAX)
5008 if (idx < nix_hw->lso.in_use) {
5010 rsp->lso_format_idx = idx;
5014 if (nix_hw->lso.in_use == nix_hw->lso.total)
5015 return NIX_AF_ERR_LSO_CFG_FAIL;
5017 rsp->lso_format_idx = nix_hw->lso.in_use++;
5019 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
5020 rvu_write64(rvu, blkaddr,
5021 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
5027 #define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
5028 #define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
5029 #define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
5030 #define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
5032 #define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
5033 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
5034 #define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
5036 #define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32)
5037 #define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22)
5038 #define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0)
5040 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
5043 u8 cpt_idx, cpt_blkaddr;
5046 cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
5049 /* Enable context prefetching */
5050 if (!is_rvu_otx2(rvu))
5053 /* Set OPCODE and EGRP */
5054 val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
5055 val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
5056 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
5057 val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
5059 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
5061 /* Set CPT queue for inline IPSec */
5062 val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
5063 val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
5064 req->inst_qsel.cpt_pf_func);
5066 if (!is_rvu_otx2(rvu)) {
5067 cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
5069 val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
5072 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5075 /* Set CPT credit */
5076 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5077 if ((val & 0x3FFFFF) != 0x3FFFFF)
5078 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5081 val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
5082 val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
5083 val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
5084 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
5086 rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
5087 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5089 val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5090 if ((val & 0x3FFFFF) != 0x3FFFFF)
5091 rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5096 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
5097 struct nix_inline_ipsec_cfg *req,
5098 struct msg_rsp *rsp)
5100 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5103 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
5104 if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
5105 nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
5110 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
5111 struct msg_req *req,
5112 struct nix_inline_ipsec_cfg *rsp)
5117 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5120 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
5121 rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
5122 rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
5123 rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
5124 rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
5126 val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
5127 rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
5128 rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
5129 rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
5134 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
5135 struct nix_inline_ipsec_lf_cfg *req,
5136 struct msg_rsp *rsp)
5138 int lf, blkaddr, err;
5141 if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5144 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
5149 /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
5150 val = (u64)req->ipsec_cfg0.tt << 44 |
5151 (u64)req->ipsec_cfg0.tag_const << 20 |
5152 (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
5153 req->ipsec_cfg0.lenm1_max;
5155 if (blkaddr == BLKADDR_NIX1)
5158 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
5160 /* Set SA_IDX_W and SA_IDX_MAX */
5161 val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
5162 req->ipsec_cfg1.sa_idx_max;
5163 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
5165 /* Set SA base address */
5166 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5169 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
5170 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
5171 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5178 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
5180 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
5182 /* overwrite vf mac address with default_mac */
5184 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
5187 /* NIX ingress policers or bandwidth profiles APIs */
5188 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
5190 struct npc_lt_def_cfg defs, *ltdefs;
5193 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
5195 /* Extract PCP and DEI fields from outer VLAN from byte offset
5196 * 2 from the start of LB_PTR (ie TAG).
5197 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
5198 * fields are considered when 'Tunnel enable' is set in profile.
5200 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
5201 (2UL << 12) | (ltdefs->ovlan.lid << 8) |
5202 (ltdefs->ovlan.ltype_match << 4) |
5203 ltdefs->ovlan.ltype_mask);
5204 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
5205 (2UL << 12) | (ltdefs->ivlan.lid << 8) |
5206 (ltdefs->ivlan.ltype_match << 4) |
5207 ltdefs->ivlan.ltype_mask);
5209 /* DSCP field in outer and tunneled IPv4 packets */
5210 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
5211 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
5212 (ltdefs->rx_oip4.ltype_match << 4) |
5213 ltdefs->rx_oip4.ltype_mask);
5214 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
5215 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
5216 (ltdefs->rx_iip4.ltype_match << 4) |
5217 ltdefs->rx_iip4.ltype_mask);
5219 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
5220 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
5221 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
5222 (ltdefs->rx_oip6.ltype_match << 4) |
5223 ltdefs->rx_oip6.ltype_mask);
5224 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
5225 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
5226 (ltdefs->rx_iip6.ltype_match << 4) |
5227 ltdefs->rx_iip6.ltype_mask);
5230 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
5231 int layer, int prof_idx)
5233 struct nix_cn10k_aq_enq_req aq_req;
5236 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5238 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
5239 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5240 aq_req.op = NIX_AQ_INSTOP_INIT;
5242 /* Context is all zeros, submit to AQ */
5243 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5244 (struct nix_aq_enq_req *)&aq_req, NULL);
5246 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
5251 static int nix_setup_ipolicers(struct rvu *rvu,
5252 struct nix_hw *nix_hw, int blkaddr)
5254 struct rvu_hwinfo *hw = rvu->hw;
5255 struct nix_ipolicer *ipolicer;
5256 int err, layer, prof_idx;
5259 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5260 if (!(cfg & BIT_ULL(61))) {
5261 hw->cap.ipolicer = false;
5265 hw->cap.ipolicer = true;
5266 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
5267 sizeof(*ipolicer), GFP_KERNEL);
5268 if (!nix_hw->ipolicer)
5271 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
5273 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5274 ipolicer = &nix_hw->ipolicer[layer];
5276 case BAND_PROF_LEAF_LAYER:
5277 ipolicer->band_prof.max = cfg & 0XFFFF;
5279 case BAND_PROF_MID_LAYER:
5280 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
5282 case BAND_PROF_TOP_LAYER:
5283 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
5287 if (!ipolicer->band_prof.max)
5290 err = rvu_alloc_bitmap(&ipolicer->band_prof);
5294 ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
5295 ipolicer->band_prof.max,
5296 sizeof(u16), GFP_KERNEL);
5297 if (!ipolicer->pfvf_map)
5300 ipolicer->match_id = devm_kcalloc(rvu->dev,
5301 ipolicer->band_prof.max,
5302 sizeof(u16), GFP_KERNEL);
5303 if (!ipolicer->match_id)
5307 prof_idx < ipolicer->band_prof.max; prof_idx++) {
5308 /* Set AF as current owner for INIT ops to succeed */
5309 ipolicer->pfvf_map[prof_idx] = 0x00;
5311 /* There is no enable bit in the profile context,
5312 * so no context disable. So let's INIT them here
5313 * so that PF/VF later on have to just do WRITE to
5314 * setup policer rates and config.
5316 err = nix_init_policer_context(rvu, nix_hw,
5322 /* Allocate memory for maintaining ref_counts for MID level
5323 * profiles, this will be needed for leaf layer profiles'
5326 if (layer != BAND_PROF_MID_LAYER)
5329 ipolicer->ref_count = devm_kcalloc(rvu->dev,
5330 ipolicer->band_prof.max,
5331 sizeof(u16), GFP_KERNEL);
5332 if (!ipolicer->ref_count)
5336 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
5337 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
5339 nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
5344 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
5346 struct nix_ipolicer *ipolicer;
5349 if (!rvu->hw->cap.ipolicer)
5352 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5353 ipolicer = &nix_hw->ipolicer[layer];
5355 if (!ipolicer->band_prof.max)
5358 kfree(ipolicer->band_prof.bmap);
5362 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5363 struct nix_hw *nix_hw, u16 pcifunc)
5365 struct nix_ipolicer *ipolicer;
5366 int layer, hi_layer, prof_idx;
5368 /* Bits [15:14] in profile index represent layer */
5369 layer = (req->qidx >> 14) & 0x03;
5370 prof_idx = req->qidx & 0x3FFF;
5372 ipolicer = &nix_hw->ipolicer[layer];
5373 if (prof_idx >= ipolicer->band_prof.max)
5376 /* Check if the profile is allocated to the requesting PCIFUNC or not
5377 * with the exception of AF. AF is allowed to read and update contexts.
5379 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
5382 /* If this profile is linked to higher layer profile then check
5383 * if that profile is also allocated to the requesting PCIFUNC
5386 if (!req->prof.hl_en)
5389 /* Leaf layer profile can link only to mid layer and
5390 * mid layer to top layer.
5392 if (layer == BAND_PROF_LEAF_LAYER)
5393 hi_layer = BAND_PROF_MID_LAYER;
5394 else if (layer == BAND_PROF_MID_LAYER)
5395 hi_layer = BAND_PROF_TOP_LAYER;
5399 ipolicer = &nix_hw->ipolicer[hi_layer];
5400 prof_idx = req->prof.band_prof_id;
5401 if (prof_idx >= ipolicer->band_prof.max ||
5402 ipolicer->pfvf_map[prof_idx] != pcifunc)
5408 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
5409 struct nix_bandprof_alloc_req *req,
5410 struct nix_bandprof_alloc_rsp *rsp)
5412 int blkaddr, layer, prof, idx, err;
5413 u16 pcifunc = req->hdr.pcifunc;
5414 struct nix_ipolicer *ipolicer;
5415 struct nix_hw *nix_hw;
5417 if (!rvu->hw->cap.ipolicer)
5418 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5420 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5424 mutex_lock(&rvu->rsrc_lock);
5425 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5426 if (layer == BAND_PROF_INVAL_LAYER)
5428 if (!req->prof_count[layer])
5431 ipolicer = &nix_hw->ipolicer[layer];
5432 for (idx = 0; idx < req->prof_count[layer]; idx++) {
5433 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5434 if (idx == MAX_BANDPROF_PER_PFFUNC)
5437 prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5440 rsp->prof_count[layer]++;
5441 rsp->prof_idx[layer][idx] = prof;
5442 ipolicer->pfvf_map[prof] = pcifunc;
5445 mutex_unlock(&rvu->rsrc_lock);
5449 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
5451 int blkaddr, layer, prof_idx, err;
5452 struct nix_ipolicer *ipolicer;
5453 struct nix_hw *nix_hw;
5455 if (!rvu->hw->cap.ipolicer)
5456 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5458 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5462 mutex_lock(&rvu->rsrc_lock);
5463 /* Free all the profiles allocated to the PCIFUNC */
5464 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5465 if (layer == BAND_PROF_INVAL_LAYER)
5467 ipolicer = &nix_hw->ipolicer[layer];
5469 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
5470 if (ipolicer->pfvf_map[prof_idx] != pcifunc)
5473 /* Clear ratelimit aggregation, if any */
5474 if (layer == BAND_PROF_LEAF_LAYER &&
5475 ipolicer->match_id[prof_idx])
5476 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5478 ipolicer->pfvf_map[prof_idx] = 0x00;
5479 ipolicer->match_id[prof_idx] = 0;
5480 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5483 mutex_unlock(&rvu->rsrc_lock);
5487 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
5488 struct nix_bandprof_free_req *req,
5489 struct msg_rsp *rsp)
5491 int blkaddr, layer, prof_idx, idx, err;
5492 u16 pcifunc = req->hdr.pcifunc;
5493 struct nix_ipolicer *ipolicer;
5494 struct nix_hw *nix_hw;
5497 return nix_free_all_bandprof(rvu, pcifunc);
5499 if (!rvu->hw->cap.ipolicer)
5500 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5502 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5506 mutex_lock(&rvu->rsrc_lock);
5507 /* Free the requested profile indices */
5508 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5509 if (layer == BAND_PROF_INVAL_LAYER)
5511 if (!req->prof_count[layer])
5514 ipolicer = &nix_hw->ipolicer[layer];
5515 for (idx = 0; idx < req->prof_count[layer]; idx++) {
5516 if (idx == MAX_BANDPROF_PER_PFFUNC)
5518 prof_idx = req->prof_idx[layer][idx];
5519 if (prof_idx >= ipolicer->band_prof.max ||
5520 ipolicer->pfvf_map[prof_idx] != pcifunc)
5523 /* Clear ratelimit aggregation, if any */
5524 if (layer == BAND_PROF_LEAF_LAYER &&
5525 ipolicer->match_id[prof_idx])
5526 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5528 ipolicer->pfvf_map[prof_idx] = 0x00;
5529 ipolicer->match_id[prof_idx] = 0;
5530 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5533 mutex_unlock(&rvu->rsrc_lock);
5537 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
5538 struct nix_cn10k_aq_enq_req *aq_req,
5539 struct nix_cn10k_aq_enq_rsp *aq_rsp,
5540 u16 pcifunc, u8 ctype, u32 qidx)
5542 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5543 aq_req->hdr.pcifunc = pcifunc;
5544 aq_req->ctype = ctype;
5545 aq_req->op = NIX_AQ_INSTOP_READ;
5546 aq_req->qidx = qidx;
5548 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5549 (struct nix_aq_enq_req *)aq_req,
5550 (struct nix_aq_enq_rsp *)aq_rsp);
5553 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
5554 struct nix_hw *nix_hw,
5555 struct nix_cn10k_aq_enq_req *aq_req,
5556 struct nix_cn10k_aq_enq_rsp *aq_rsp,
5557 u32 leaf_prof, u16 mid_prof)
5559 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5560 aq_req->hdr.pcifunc = 0x00;
5561 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
5562 aq_req->op = NIX_AQ_INSTOP_WRITE;
5563 aq_req->qidx = leaf_prof;
5565 aq_req->prof.band_prof_id = mid_prof;
5566 aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
5567 aq_req->prof.hl_en = 1;
5568 aq_req->prof_mask.hl_en = 1;
5570 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5571 (struct nix_aq_enq_req *)aq_req,
5572 (struct nix_aq_enq_rsp *)aq_rsp);
5575 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
5576 u16 rq_idx, u16 match_id)
5578 int leaf_prof, mid_prof, leaf_match;
5579 struct nix_cn10k_aq_enq_req aq_req;
5580 struct nix_cn10k_aq_enq_rsp aq_rsp;
5581 struct nix_ipolicer *ipolicer;
5582 struct nix_hw *nix_hw;
5583 int blkaddr, idx, rc;
5585 if (!rvu->hw->cap.ipolicer)
5588 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5592 /* Fetch the RQ's context to see if policing is enabled */
5593 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
5594 NIX_AQ_CTYPE_RQ, rq_idx);
5597 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
5598 __func__, rq_idx, pcifunc);
5602 if (!aq_rsp.rq.policer_ena)
5605 /* Get the bandwidth profile ID mapped to this RQ */
5606 leaf_prof = aq_rsp.rq.band_prof_id;
5608 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
5609 ipolicer->match_id[leaf_prof] = match_id;
5611 /* Check if any other leaf profile is marked with same match_id */
5612 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
5613 if (idx == leaf_prof)
5615 if (ipolicer->match_id[idx] != match_id)
5622 if (idx == ipolicer->band_prof.max)
5625 /* Fetch the matching profile's context to check if it's already
5626 * mapped to a mid level profile.
5628 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5629 NIX_AQ_CTYPE_BANDPROF, leaf_match);
5632 "%s: Failed to fetch context of leaf profile %d\n",
5633 __func__, leaf_match);
5637 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5638 if (aq_rsp.prof.hl_en) {
5639 /* Get Mid layer prof index and map leaf_prof index
5640 * also such that flows that are being steered
5641 * to different RQs and marked with same match_id
5642 * are rate limited in a aggregate fashion
5644 mid_prof = aq_rsp.prof.band_prof_id;
5645 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5647 leaf_prof, mid_prof);
5650 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5651 __func__, leaf_prof, mid_prof);
5655 mutex_lock(&rvu->rsrc_lock);
5656 ipolicer->ref_count[mid_prof]++;
5657 mutex_unlock(&rvu->rsrc_lock);
5661 /* Allocate a mid layer profile and
5662 * map both 'leaf_prof' and 'leaf_match' profiles to it.
5664 mutex_lock(&rvu->rsrc_lock);
5665 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5668 "%s: Unable to allocate mid layer profile\n", __func__);
5669 mutex_unlock(&rvu->rsrc_lock);
5672 mutex_unlock(&rvu->rsrc_lock);
5673 ipolicer->pfvf_map[mid_prof] = 0x00;
5674 ipolicer->ref_count[mid_prof] = 0;
5676 /* Initialize mid layer profile same as 'leaf_prof' */
5677 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5678 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5681 "%s: Failed to fetch context of leaf profile %d\n",
5682 __func__, leaf_prof);
5686 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5687 aq_req.hdr.pcifunc = 0x00;
5688 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
5689 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5690 aq_req.op = NIX_AQ_INSTOP_WRITE;
5691 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
5692 memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
5693 /* Clear higher layer enable bit in the mid profile, just in case */
5694 aq_req.prof.hl_en = 0;
5695 aq_req.prof_mask.hl_en = 1;
5697 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5698 (struct nix_aq_enq_req *)&aq_req, NULL);
5701 "%s: Failed to INIT context of mid layer profile %d\n",
5702 __func__, mid_prof);
5706 /* Map both leaf profiles to this mid layer profile */
5707 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5709 leaf_prof, mid_prof);
5712 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5713 __func__, leaf_prof, mid_prof);
5717 mutex_lock(&rvu->rsrc_lock);
5718 ipolicer->ref_count[mid_prof]++;
5719 mutex_unlock(&rvu->rsrc_lock);
5721 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5723 leaf_match, mid_prof);
5726 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5727 __func__, leaf_match, mid_prof);
5728 ipolicer->ref_count[mid_prof]--;
5732 mutex_lock(&rvu->rsrc_lock);
5733 ipolicer->ref_count[mid_prof]++;
5734 mutex_unlock(&rvu->rsrc_lock);
5740 /* Called with mutex rsrc_lock */
5741 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
5744 struct nix_cn10k_aq_enq_req aq_req;
5745 struct nix_cn10k_aq_enq_rsp aq_rsp;
5746 struct nix_ipolicer *ipolicer;
5750 mutex_unlock(&rvu->rsrc_lock);
5752 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5753 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5755 mutex_lock(&rvu->rsrc_lock);
5758 "%s: Failed to fetch context of leaf profile %d\n",
5759 __func__, leaf_prof);
5763 if (!aq_rsp.prof.hl_en)
5766 mid_prof = aq_rsp.prof.band_prof_id;
5767 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5768 ipolicer->ref_count[mid_prof]--;
5769 /* If ref_count is zero, free mid layer profile */
5770 if (!ipolicer->ref_count[mid_prof]) {
5771 ipolicer->pfvf_map[mid_prof] = 0x00;
5772 rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
5776 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
5777 struct nix_bandprof_get_hwinfo_rsp *rsp)
5779 struct nix_ipolicer *ipolicer;
5780 int blkaddr, layer, err;
5781 struct nix_hw *nix_hw;
5784 if (!rvu->hw->cap.ipolicer)
5785 return NIX_AF_ERR_IPOLICER_NOTSUPP;
5787 err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
5791 /* Return number of bandwidth profiles free at each layer */
5792 mutex_lock(&rvu->rsrc_lock);
5793 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5794 if (layer == BAND_PROF_INVAL_LAYER)
5797 ipolicer = &nix_hw->ipolicer[layer];
5798 rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
5800 mutex_unlock(&rvu->rsrc_lock);
5802 /* Set the policer timeunit in nanosec */
5803 tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
5804 rsp->policer_timeunit = (tu + 1) * 100;