1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
14 #include "lmac_common.h"
16 #include "rvu_trace.h"
17 #include "rvu_npc_hash.h"
19 struct cgx_evq_entry {
20 struct list_head evq_node;
21 struct cgx_link_event link_event;
24 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
25 static struct _req_type __maybe_unused \
26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
28 struct _req_type *req; \
30 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
31 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
32 sizeof(struct _rsp_type)); \
35 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
37 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
44 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
49 if (!is_pf_cgxmapped(rvu, pf))
52 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
53 cgxd = rvu_cgx_pdata(cgx_id, rvu);
55 return (cgx_features_get(cgxd) & feature);
58 #define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx)
59 /* Returns bitmap of mapped PFs */
60 static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
62 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
65 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
69 pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
71 /* Assumes only one pf mapped to a cgx lmac port */
75 return find_first_bit(&pfmap,
76 rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
79 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
81 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
84 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
86 if (cgx_id >= rvu->cgx_cnt_max)
89 return rvu->cgx_idmap[cgx_id];
92 /* Return first enabled CGX instance if none are enabled then return NULL */
93 void *rvu_first_cgx_pdata(struct rvu *rvu)
95 int first_enabled_cgx = 0;
98 for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
99 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
107 /* Based on P2X connectivity find mapped NIX block for a PF */
108 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
109 int cgx_id, int lmac_id)
111 struct rvu_pfvf *pfvf = &rvu->pf[pf];
114 p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
115 /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
116 pfvf->nix_blkaddr = BLKADDR_NIX0;
117 if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1)
118 pfvf->nix_blkaddr = BLKADDR_NIX1;
121 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
123 struct npc_pkind *pkind = &rvu->hw->pkind;
124 int cgx_cnt_max = rvu->cgx_cnt_max;
125 int pf = PF_CGXMAP_BASE;
126 unsigned long lmac_bmap;
127 int size, free_pkind;
134 if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
138 * An additional entry is required since PF id starts from 1 and
139 * hence entry at offset 0 is invalid.
141 size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
142 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
143 if (!rvu->pf2cgxlmac_map)
146 /* Initialize all entries with an invalid cgx and lmac id */
147 memset(rvu->pf2cgxlmac_map, 0xFF, size);
149 /* Reverse map table */
150 rvu->cgxlmac2pf_map =
151 devm_kzalloc(rvu->dev,
152 cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
154 if (!rvu->cgxlmac2pf_map)
157 rvu->cgx_mapped_pfs = 0;
158 for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
159 if (!rvu_cgx_pdata(cgx, rvu))
161 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
162 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
163 lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
165 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
166 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
167 free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
168 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
169 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
170 rvu->cgx_mapped_pfs++;
171 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
172 rvu->cgx_mapped_vfs += numvfs;
179 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
181 struct cgx_evq_entry *qentry;
185 qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
189 /* Lock the event queue before we read the local link status */
190 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
191 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
192 &qentry->link_event.link_uinfo);
193 qentry->link_event.cgx_id = cgx_id;
194 qentry->link_event.lmac_id = lmac_id;
199 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
201 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
203 /* start worker to process the events */
204 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
209 /* This is called from interrupt context and is expected to be atomic */
210 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
212 struct cgx_evq_entry *qentry;
213 struct rvu *rvu = data;
215 /* post event to the event queue */
216 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
219 qentry->link_event = *event;
220 spin_lock(&rvu->cgx_evq_lock);
221 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
222 spin_unlock(&rvu->cgx_evq_lock);
224 /* start worker to process the events */
225 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
230 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
232 struct cgx_link_user_info *linfo;
233 struct cgx_link_info_msg *msg;
237 linfo = &event->link_uinfo;
238 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
241 pfid = find_first_bit(&pfmap,
242 rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
243 clear_bit(pfid, &pfmap);
245 /* check if notification is enabled */
246 if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
247 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
248 event->cgx_id, event->lmac_id,
249 linfo->link_up ? "UP" : "DOWN");
253 /* Send mbox message to PF */
254 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
257 msg->link_info = *linfo;
258 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
259 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
261 dev_warn(rvu->dev, "notification to pf %d failed\n",
266 static void cgx_evhandler_task(struct work_struct *work)
268 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
269 struct cgx_evq_entry *qentry;
270 struct cgx_link_event *event;
274 /* Dequeue an event */
275 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
276 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
277 struct cgx_evq_entry,
280 list_del(&qentry->evq_node);
281 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
283 break; /* nothing more to process */
285 event = &qentry->link_event;
288 cgx_notify_pfs(event, rvu);
293 static int cgx_lmac_event_handler_init(struct rvu *rvu)
295 unsigned long lmac_bmap;
296 struct cgx_event_cb cb;
300 spin_lock_init(&rvu->cgx_evq_lock);
301 INIT_LIST_HEAD(&rvu->cgx_evq_head);
302 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
303 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
304 if (!rvu->cgx_evh_wq) {
305 dev_err(rvu->dev, "alloc workqueue failed");
309 cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
312 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
313 cgxd = rvu_cgx_pdata(cgx, rvu);
316 lmac_bmap = cgx_get_lmac_bmap(cgxd);
317 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
318 err = cgx_lmac_evh_register(&cb, cgxd, lmac);
321 "%d:%d handler register failed\n",
329 static void rvu_cgx_wq_destroy(struct rvu *rvu)
331 if (rvu->cgx_evh_wq) {
332 destroy_workqueue(rvu->cgx_evh_wq);
333 rvu->cgx_evh_wq = NULL;
337 int rvu_cgx_init(struct rvu *rvu)
342 /* CGX port id starts from 0 and are not necessarily contiguous
343 * Hence we allocate resources based on the maximum port id value.
345 rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
346 if (!rvu->cgx_cnt_max) {
347 dev_info(rvu->dev, "No CGX devices found!\n");
351 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
352 sizeof(void *), GFP_KERNEL);
356 /* Initialize the cgxdata table */
357 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
358 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
360 /* Map CGX LMAC interfaces to RVU PFs */
361 err = rvu_map_cgx_lmac_pf(rvu);
365 /* Register for CGX events */
366 err = cgx_lmac_event_handler_init(rvu);
370 mutex_init(&rvu->cgx_cfg_lock);
372 /* Ensure event handler registration is completed, before
373 * we turn on the links
377 /* Do link up for all CGX ports */
378 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
379 cgxd = rvu_cgx_pdata(cgx, rvu);
382 err = cgx_lmac_linkup_start(cgxd);
385 "Link up process failed to start on cgx %d\n",
392 int rvu_cgx_exit(struct rvu *rvu)
394 unsigned long lmac_bmap;
398 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
399 cgxd = rvu_cgx_pdata(cgx, rvu);
402 lmac_bmap = cgx_get_lmac_bmap(cgxd);
403 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
404 cgx_lmac_evh_unregister(cgxd, lmac);
407 /* Ensure event handler unregister is completed */
410 rvu_cgx_wq_destroy(rvu);
414 /* Most of the CGX configuration is restricted to the mapped PF only,
415 * VF's of mapped PF and other PFs are not allowed. This fn() checks
416 * whether a PFFUNC is permitted to do the config or not.
418 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
420 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
421 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
426 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
428 struct mac_ops *mac_ops;
432 if (!is_pf_cgxmapped(rvu, pf))
435 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
436 cgxd = rvu_cgx_pdata(cgx_id, rvu);
438 mac_ops = get_mac_ops(cgxd);
439 /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
441 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
443 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
446 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
448 int pf = rvu_get_pf(pcifunc);
449 struct mac_ops *mac_ops;
453 if (!is_cgx_config_permitted(rvu, pcifunc))
454 return LMAC_AF_ERR_PERM_DENIED;
456 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
457 cgxd = rvu_cgx_pdata(cgx_id, rvu);
458 mac_ops = get_mac_ops(cgxd);
460 return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
463 int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
465 struct mac_ops *mac_ops;
467 mac_ops = get_mac_ops(cgxd);
468 return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
471 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
473 int pf = rvu_get_pf(pcifunc);
474 int i = 0, lmac_count = 0;
475 struct mac_ops *mac_ops;
480 if (!is_cgx_config_permitted(rvu, pcifunc))
483 if (rvu_npc_exact_has_match_table(rvu)) {
484 rvu_npc_exact_reset(rvu, pcifunc);
488 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
489 cgx_dev = cgx_get_pdata(cgx_id);
490 lmac_count = cgx_get_lmac_cnt(cgx_dev);
492 mac_ops = get_mac_ops(cgx_dev);
496 max_dmac_filters = mac_ops->dmac_filter_count / lmac_count;
498 for (i = 0; i < max_dmac_filters; i++)
499 cgx_lmac_addr_del(cgx_id, lmac_id, i);
501 /* As cgx_lmac_addr_del does not clear entry for index 0
502 * so it needs to be done explicitly
504 cgx_lmac_addr_reset(cgx_id, lmac_id);
507 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
510 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
514 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
517 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
521 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
524 int pf = rvu_get_pf(req->hdr.pcifunc);
525 struct mac_ops *mac_ops;
526 int stat = 0, err = 0;
527 u64 tx_stat, rx_stat;
531 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
532 return LMAC_AF_ERR_PERM_DENIED;
534 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
535 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
536 mac_ops = get_mac_ops(cgxd);
539 while (stat < mac_ops->rx_stats_cnt) {
540 err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
543 if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
544 ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
546 ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
552 while (stat < mac_ops->tx_stats_cnt) {
553 err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
556 if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
557 ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
559 ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
565 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
566 struct cgx_stats_rsp *rsp)
568 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
571 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
572 struct rpm_stats_rsp *rsp)
574 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
577 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
579 struct cgx_fec_stats_rsp *rsp)
581 int pf = rvu_get_pf(req->hdr.pcifunc);
582 struct mac_ops *mac_ops;
586 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
587 return LMAC_AF_ERR_PERM_DENIED;
588 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
590 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
591 mac_ops = get_mac_ops(cgxd);
592 return mac_ops->get_fec_stats(cgxd, lmac, rsp);
595 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
596 struct cgx_mac_addr_set_or_get *req,
597 struct cgx_mac_addr_set_or_get *rsp)
599 int pf = rvu_get_pf(req->hdr.pcifunc);
602 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
605 if (rvu_npc_exact_has_match_table(rvu))
606 return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
608 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
610 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
615 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
616 struct cgx_mac_addr_add_req *req,
617 struct cgx_mac_addr_add_rsp *rsp)
619 int pf = rvu_get_pf(req->hdr.pcifunc);
623 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
626 if (rvu_npc_exact_has_match_table(rvu))
627 return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
629 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
630 rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
639 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
640 struct cgx_mac_addr_del_req *req,
643 int pf = rvu_get_pf(req->hdr.pcifunc);
646 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
649 if (rvu_npc_exact_has_match_table(rvu))
650 return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
652 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
653 return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
656 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
658 struct cgx_max_dmac_entries_get_rsp
661 int pf = rvu_get_pf(req->hdr.pcifunc);
664 /* If msg is received from PFs(which are not mapped to CGX LMACs)
665 * or VF then no entries are allocated for DMAC filters at CGX level.
668 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
669 rsp->max_dmac_filters = 0;
673 if (rvu_npc_exact_has_match_table(rvu)) {
674 rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
678 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
679 rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
683 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
684 struct cgx_mac_addr_set_or_get *req,
685 struct cgx_mac_addr_set_or_get *rsp)
687 int pf = rvu_get_pf(req->hdr.pcifunc);
692 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
695 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
698 cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
699 /* copy 48 bit mac address to req->mac_addr */
700 u64_to_ether_addr(cfg, rsp->mac_addr);
704 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
707 u16 pcifunc = req->hdr.pcifunc;
708 int pf = rvu_get_pf(pcifunc);
711 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
714 /* Disable drop on non hit rule */
715 if (rvu_npc_exact_has_match_table(rvu))
716 return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
718 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
720 cgx_lmac_promisc_config(cgx_id, lmac_id, true);
724 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
727 int pf = rvu_get_pf(req->hdr.pcifunc);
730 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
733 /* Disable drop on non hit rule */
734 if (rvu_npc_exact_has_match_table(rvu))
735 return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
737 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
739 cgx_lmac_promisc_config(cgx_id, lmac_id, false);
743 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
745 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
746 int pf = rvu_get_pf(pcifunc);
747 struct mac_ops *mac_ops;
751 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
754 /* This msg is expected only from PFs that are mapped to CGX LMACs,
755 * if received from other PF/VF simply ACK, nothing to do.
757 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
758 !is_pf_cgxmapped(rvu, pf))
761 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
762 cgxd = rvu_cgx_pdata(cgx_id, rvu);
764 mac_ops = get_mac_ops(cgxd);
765 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable);
766 /* If PTP is enabled then inform NPC that packets to be
767 * parsed by this PF will have their data shifted by 8 bytes
768 * and if PTP is disabled then no shift is required
770 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
772 /* This flag is required to clean up CGX conf if app gets killed */
773 pfvf->hw_rx_tstamp_en = enable;
775 /* Inform MCS about 8B RX header */
776 rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
780 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
783 if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
786 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
789 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
792 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
795 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
797 int pf = rvu_get_pf(pcifunc);
800 if (!is_cgx_config_permitted(rvu, pcifunc))
803 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
806 set_bit(pf, &rvu->pf_notify_bmap);
807 /* Send the current link status to PF */
808 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
810 clear_bit(pf, &rvu->pf_notify_bmap);
816 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
819 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
823 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
826 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
830 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
831 struct cgx_link_info_msg *rsp)
836 pf = rvu_get_pf(req->hdr.pcifunc);
838 if (!is_pf_cgxmapped(rvu, pf))
841 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
843 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
848 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
850 struct cgx_features_info_msg *rsp)
852 int pf = rvu_get_pf(req->hdr.pcifunc);
856 if (!is_pf_cgxmapped(rvu, pf))
859 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
860 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
861 rsp->lmac_features = cgx_features_get(cgxd);
866 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
868 struct mac_ops *mac_ops;
871 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
872 fifo_len = mac_ops ? mac_ops->fifo_len : 0;
877 u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
879 struct mac_ops *mac_ops;
882 cgxd = rvu_cgx_pdata(cgx, rvu);
886 mac_ops = get_mac_ops(cgxd);
887 if (!mac_ops->lmac_fifo_len)
890 return mac_ops->lmac_fifo_len(cgxd, lmac);
893 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
895 int pf = rvu_get_pf(pcifunc);
896 struct mac_ops *mac_ops;
899 if (!is_cgx_config_permitted(rvu, pcifunc))
902 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
903 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
905 return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
909 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
912 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
916 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
919 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
923 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
925 int pf = rvu_get_pf(pcifunc);
926 u8 rx_pfc = 0, tx_pfc = 0;
927 struct mac_ops *mac_ops;
931 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
934 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
935 * if received from other PF/VF simply ACK, nothing to do.
937 if (!is_pf_cgxmapped(rvu, pf))
938 return LMAC_AF_ERR_PF_NOT_MAPPED;
940 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
941 cgxd = rvu_cgx_pdata(cgx_id, rvu);
942 mac_ops = get_mac_ops(cgxd);
944 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
945 if (tx_pfc || rx_pfc) {
947 "Can not configure 802.3X flow control as PFC frames are enabled");
948 return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
951 mutex_lock(&rvu->rsrc_lock);
952 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
953 pcifunc & RVU_PFVF_FUNC_MASK)) {
954 mutex_unlock(&rvu->rsrc_lock);
955 return LMAC_AF_ERR_PERM_DENIED;
957 mutex_unlock(&rvu->rsrc_lock);
959 return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
962 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
963 struct cgx_pause_frm_cfg *req,
964 struct cgx_pause_frm_cfg *rsp)
966 int pf = rvu_get_pf(req->hdr.pcifunc);
967 struct mac_ops *mac_ops;
972 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
973 * if received from other PF/VF simply ACK, nothing to do.
975 if (!is_pf_cgxmapped(rvu, pf))
978 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
979 cgxd = rvu_cgx_pdata(cgx_id, rvu);
980 mac_ops = get_mac_ops(cgxd);
983 err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
985 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
990 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
993 int pf = rvu_get_pf(req->hdr.pcifunc);
996 if (!is_pf_cgxmapped(rvu, pf))
997 return LMAC_AF_ERR_PF_NOT_MAPPED;
999 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1000 return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
1003 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
1004 * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
1006 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
1007 int index, int rxtxflag, u64 *stat)
1009 struct rvu_block *block;
1019 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
1023 /* Assumes LF of a PF and all of its VF belongs to the same
1026 pcifunc = pf << RVU_PFVF_PF_SHIFT;
1027 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1030 block = &rvu->hw->block[blkaddr];
1032 for (lf = 0; lf < block->lf.max; lf++) {
1033 /* Check if a lf is attached to this PF or one of its VFs */
1034 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
1035 ~RVU_PFVF_FUNC_MASK)))
1037 if (rxtxflag == NIX_STATS_RX)
1038 *stat += rvu_read64(rvu, blkaddr,
1039 NIX_AF_LFX_RX_STATX(lf, index));
1041 *stat += rvu_read64(rvu, blkaddr,
1042 NIX_AF_LFX_TX_STATX(lf, index));
1048 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
1050 struct rvu_pfvf *parent_pf, *pfvf;
1051 int cgx_users, err = 0;
1053 if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
1056 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
1057 pfvf = rvu_get_pfvf(rvu, pcifunc);
1059 mutex_lock(&rvu->cgx_cfg_lock);
1061 if (start && pfvf->cgx_in_use)
1062 goto exit; /* CGX is already started hence nothing to do */
1063 if (!start && !pfvf->cgx_in_use)
1064 goto exit; /* CGX is already stopped hence nothing to do */
1067 cgx_users = parent_pf->cgx_users;
1068 parent_pf->cgx_users++;
1070 parent_pf->cgx_users--;
1071 cgx_users = parent_pf->cgx_users;
1074 /* Start CGX when first of all NIXLFs is started.
1075 * Stop CGX when last of all NIXLFs is stopped.
1078 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
1081 dev_err(rvu->dev, "Unable to %s CGX\n",
1082 start ? "start" : "stop");
1083 /* Revert the usage count in case of error */
1084 parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
1085 : parent_pf->cgx_users + 1;
1089 pfvf->cgx_in_use = start;
1091 mutex_unlock(&rvu->cgx_cfg_lock);
1095 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
1096 struct fec_mode *req,
1097 struct fec_mode *rsp)
1099 int pf = rvu_get_pf(req->hdr.pcifunc);
1102 if (!is_pf_cgxmapped(rvu, pf))
1105 if (req->fec == OTX2_FEC_OFF)
1106 req->fec = OTX2_FEC_NONE;
1107 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1108 rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
1112 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
1113 struct cgx_fw_data *rsp)
1115 int pf = rvu_get_pf(req->hdr.pcifunc);
1119 return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
1121 if (!is_pf_cgxmapped(rvu, pf))
1124 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1126 if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
1127 memcpy(&rsp->fwdata,
1128 &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id],
1129 sizeof(struct cgx_lmac_fwdata_s));
1131 memcpy(&rsp->fwdata,
1132 &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
1133 sizeof(struct cgx_lmac_fwdata_s));
1138 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
1139 struct cgx_set_link_mode_req *req,
1140 struct cgx_set_link_mode_rsp *rsp)
1142 int pf = rvu_get_pf(req->hdr.pcifunc);
1146 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1149 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
1150 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
1151 rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
1155 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1156 struct msg_rsp *rsp)
1158 int pf = rvu_get_pf(req->hdr.pcifunc);
1161 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1162 return LMAC_AF_ERR_PERM_DENIED;
1164 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1166 if (rvu_npc_exact_has_match_table(rvu))
1167 return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
1169 return cgx_lmac_addr_reset(cgx_id, lmac_id);
1172 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
1173 struct cgx_mac_addr_update_req *req,
1174 struct cgx_mac_addr_update_rsp *rsp)
1176 int pf = rvu_get_pf(req->hdr.pcifunc);
1179 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1180 return LMAC_AF_ERR_PERM_DENIED;
1182 if (rvu_npc_exact_has_match_table(rvu))
1183 return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
1185 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1186 return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
1189 int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
1190 u8 rx_pause, u16 pfc_en)
1192 int pf = rvu_get_pf(pcifunc);
1193 u8 rx_8023 = 0, tx_8023 = 0;
1194 struct mac_ops *mac_ops;
1198 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1199 * if received from other PF/VF simply ACK, nothing to do.
1201 if (!is_pf_cgxmapped(rvu, pf))
1204 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1205 cgxd = rvu_cgx_pdata(cgx_id, rvu);
1206 mac_ops = get_mac_ops(cgxd);
1208 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
1209 if (tx_8023 || rx_8023) {
1211 "Can not configure PFC as 802.3X pause frames are enabled");
1212 return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
1215 mutex_lock(&rvu->rsrc_lock);
1216 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
1217 pcifunc & RVU_PFVF_FUNC_MASK)) {
1218 mutex_unlock(&rvu->rsrc_lock);
1219 return LMAC_AF_ERR_PERM_DENIED;
1221 mutex_unlock(&rvu->rsrc_lock);
1223 return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
1226 int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
1227 struct cgx_pfc_cfg *req,
1228 struct cgx_pfc_rsp *rsp)
1230 int pf = rvu_get_pf(req->hdr.pcifunc);
1231 struct mac_ops *mac_ops;
1236 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1237 * if received from other PF/VF simply ACK, nothing to do.
1239 if (!is_pf_cgxmapped(rvu, pf))
1242 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1243 cgxd = rvu_cgx_pdata(cgx_id, rvu);
1244 mac_ops = get_mac_ops(cgxd);
1246 err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
1247 req->rx_pause, req->pfc_en);
1249 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
1253 void rvu_mac_reset(struct rvu *rvu, u16 pcifunc)
1255 int pf = rvu_get_pf(pcifunc);
1256 struct mac_ops *mac_ops;
1260 if (!is_pf_cgxmapped(rvu, pf))
1263 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
1264 cgxd = rvu_cgx_pdata(cgx, rvu);
1265 mac_ops = get_mac_ops(cgxd);
1267 if (mac_ops->mac_reset(cgxd, lmac, !is_vf(pcifunc)))
1268 dev_err(rvu->dev, "Failed to reset MAC\n");