octeontx2-af: Use u64_to_ether_addr() to convert ethernet address
[linux-block.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu_cgx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11
12 #include "rvu.h"
13 #include "cgx.h"
14 #include "lmac_common.h"
15 #include "rvu_reg.h"
16 #include "rvu_trace.h"
17 #include "rvu_npc_hash.h"
18
19 struct cgx_evq_entry {
20         struct list_head evq_node;
21         struct cgx_link_event link_event;
22 };
23
24 #define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
25 static struct _req_type __maybe_unused                                  \
26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)           \
27 {                                                                       \
28         struct _req_type *req;                                          \
29                                                                         \
30         req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(              \
31                 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
32                 sizeof(struct _rsp_type));                              \
33         if (!req)                                                       \
34                 return NULL;                                            \
35         req->hdr.sig = OTX2_MBOX_REQ_SIG;                               \
36         req->hdr.id = _id;                                              \
37         trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req));             \
38         return req;                                                     \
39 }
40
41 MBOX_UP_CGX_MESSAGES
42 #undef M
43
44 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
45 {
46         u8 cgx_id, lmac_id;
47         void *cgxd;
48
49         if (!is_pf_cgxmapped(rvu, pf))
50                 return 0;
51
52         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
53         cgxd = rvu_cgx_pdata(cgx_id, rvu);
54
55         return  (cgx_features_get(cgxd) & feature);
56 }
57
58 #define CGX_OFFSET(x)                   ((x) * rvu->hw->lmac_per_cgx)
59 /* Returns bitmap of mapped PFs */
60 static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
61 {
62         return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
63 }
64
65 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
66 {
67         unsigned long pfmap;
68
69         pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
70
71         /* Assumes only one pf mapped to a cgx lmac port */
72         if (!pfmap)
73                 return -ENODEV;
74         else
75                 return find_first_bit(&pfmap,
76                                       rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
77 }
78
79 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
80 {
81         return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
82 }
83
84 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
85 {
86         if (cgx_id >= rvu->cgx_cnt_max)
87                 return NULL;
88
89         return rvu->cgx_idmap[cgx_id];
90 }
91
92 /* Return first enabled CGX instance if none are enabled then return NULL */
93 void *rvu_first_cgx_pdata(struct rvu *rvu)
94 {
95         int first_enabled_cgx = 0;
96         void *cgxd = NULL;
97
98         for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
99                 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
100                 if (cgxd)
101                         break;
102         }
103
104         return cgxd;
105 }
106
107 /* Based on P2X connectivity find mapped NIX block for a PF */
108 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
109                                   int cgx_id, int lmac_id)
110 {
111         struct rvu_pfvf *pfvf = &rvu->pf[pf];
112         u8 p2x;
113
114         p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
115         /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
116         pfvf->nix_blkaddr = BLKADDR_NIX0;
117         if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1)
118                 pfvf->nix_blkaddr = BLKADDR_NIX1;
119 }
120
121 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
122 {
123         struct npc_pkind *pkind = &rvu->hw->pkind;
124         int cgx_cnt_max = rvu->cgx_cnt_max;
125         int pf = PF_CGXMAP_BASE;
126         unsigned long lmac_bmap;
127         int size, free_pkind;
128         int cgx, lmac, iter;
129         int numvfs, hwvfs;
130
131         if (!cgx_cnt_max)
132                 return 0;
133
134         if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
135                 return -EINVAL;
136
137         /* Alloc map table
138          * An additional entry is required since PF id starts from 1 and
139          * hence entry at offset 0 is invalid.
140          */
141         size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
142         rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
143         if (!rvu->pf2cgxlmac_map)
144                 return -ENOMEM;
145
146         /* Initialize all entries with an invalid cgx and lmac id */
147         memset(rvu->pf2cgxlmac_map, 0xFF, size);
148
149         /* Reverse map table */
150         rvu->cgxlmac2pf_map =
151                 devm_kzalloc(rvu->dev,
152                              cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
153                              GFP_KERNEL);
154         if (!rvu->cgxlmac2pf_map)
155                 return -ENOMEM;
156
157         rvu->cgx_mapped_pfs = 0;
158         for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
159                 if (!rvu_cgx_pdata(cgx, rvu))
160                         continue;
161                 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
162                 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
163                         lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
164                                               iter);
165                         rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
166                         rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
167                         free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
168                         pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
169                         rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
170                         rvu->cgx_mapped_pfs++;
171                         rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
172                         rvu->cgx_mapped_vfs += numvfs;
173                         pf++;
174                 }
175         }
176         return 0;
177 }
178
179 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
180 {
181         struct cgx_evq_entry *qentry;
182         unsigned long flags;
183         int err;
184
185         qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
186         if (!qentry)
187                 return -ENOMEM;
188
189         /* Lock the event queue before we read the local link status */
190         spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
191         err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
192                                 &qentry->link_event.link_uinfo);
193         qentry->link_event.cgx_id = cgx_id;
194         qentry->link_event.lmac_id = lmac_id;
195         if (err) {
196                 kfree(qentry);
197                 goto skip_add;
198         }
199         list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
200 skip_add:
201         spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
202
203         /* start worker to process the events */
204         queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
205
206         return 0;
207 }
208
209 /* This is called from interrupt context and is expected to be atomic */
210 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
211 {
212         struct cgx_evq_entry *qentry;
213         struct rvu *rvu = data;
214
215         /* post event to the event queue */
216         qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
217         if (!qentry)
218                 return -ENOMEM;
219         qentry->link_event = *event;
220         spin_lock(&rvu->cgx_evq_lock);
221         list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
222         spin_unlock(&rvu->cgx_evq_lock);
223
224         /* start worker to process the events */
225         queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
226
227         return 0;
228 }
229
230 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
231 {
232         struct cgx_link_user_info *linfo;
233         struct cgx_link_info_msg *msg;
234         unsigned long pfmap;
235         int err, pfid;
236
237         linfo = &event->link_uinfo;
238         pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
239
240         do {
241                 pfid = find_first_bit(&pfmap,
242                                       rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
243                 clear_bit(pfid, &pfmap);
244
245                 /* check if notification is enabled */
246                 if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
247                         dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
248                                  event->cgx_id, event->lmac_id,
249                                  linfo->link_up ? "UP" : "DOWN");
250                         continue;
251                 }
252
253                 /* Send mbox message to PF */
254                 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
255                 if (!msg)
256                         continue;
257                 msg->link_info = *linfo;
258                 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
259                 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
260                 if (err)
261                         dev_warn(rvu->dev, "notification to pf %d failed\n",
262                                  pfid);
263         } while (pfmap);
264 }
265
266 static void cgx_evhandler_task(struct work_struct *work)
267 {
268         struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
269         struct cgx_evq_entry *qentry;
270         struct cgx_link_event *event;
271         unsigned long flags;
272
273         do {
274                 /* Dequeue an event */
275                 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
276                 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
277                                                   struct cgx_evq_entry,
278                                                   evq_node);
279                 if (qentry)
280                         list_del(&qentry->evq_node);
281                 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
282                 if (!qentry)
283                         break; /* nothing more to process */
284
285                 event = &qentry->link_event;
286
287                 /* process event */
288                 cgx_notify_pfs(event, rvu);
289                 kfree(qentry);
290         } while (1);
291 }
292
293 static int cgx_lmac_event_handler_init(struct rvu *rvu)
294 {
295         unsigned long lmac_bmap;
296         struct cgx_event_cb cb;
297         int cgx, lmac, err;
298         void *cgxd;
299
300         spin_lock_init(&rvu->cgx_evq_lock);
301         INIT_LIST_HEAD(&rvu->cgx_evq_head);
302         INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
303         rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
304         if (!rvu->cgx_evh_wq) {
305                 dev_err(rvu->dev, "alloc workqueue failed");
306                 return -ENOMEM;
307         }
308
309         cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
310         cb.data = rvu;
311
312         for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
313                 cgxd = rvu_cgx_pdata(cgx, rvu);
314                 if (!cgxd)
315                         continue;
316                 lmac_bmap = cgx_get_lmac_bmap(cgxd);
317                 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
318                         err = cgx_lmac_evh_register(&cb, cgxd, lmac);
319                         if (err)
320                                 dev_err(rvu->dev,
321                                         "%d:%d handler register failed\n",
322                                         cgx, lmac);
323                 }
324         }
325
326         return 0;
327 }
328
329 static void rvu_cgx_wq_destroy(struct rvu *rvu)
330 {
331         if (rvu->cgx_evh_wq) {
332                 destroy_workqueue(rvu->cgx_evh_wq);
333                 rvu->cgx_evh_wq = NULL;
334         }
335 }
336
337 int rvu_cgx_init(struct rvu *rvu)
338 {
339         int cgx, err;
340         void *cgxd;
341
342         /* CGX port id starts from 0 and are not necessarily contiguous
343          * Hence we allocate resources based on the maximum port id value.
344          */
345         rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
346         if (!rvu->cgx_cnt_max) {
347                 dev_info(rvu->dev, "No CGX devices found!\n");
348                 return -ENODEV;
349         }
350
351         rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
352                                       sizeof(void *), GFP_KERNEL);
353         if (!rvu->cgx_idmap)
354                 return -ENOMEM;
355
356         /* Initialize the cgxdata table */
357         for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
358                 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
359
360         /* Map CGX LMAC interfaces to RVU PFs */
361         err = rvu_map_cgx_lmac_pf(rvu);
362         if (err)
363                 return err;
364
365         /* Register for CGX events */
366         err = cgx_lmac_event_handler_init(rvu);
367         if (err)
368                 return err;
369
370         mutex_init(&rvu->cgx_cfg_lock);
371
372         /* Ensure event handler registration is completed, before
373          * we turn on the links
374          */
375         mb();
376
377         /* Do link up for all CGX ports */
378         for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
379                 cgxd = rvu_cgx_pdata(cgx, rvu);
380                 if (!cgxd)
381                         continue;
382                 err = cgx_lmac_linkup_start(cgxd);
383                 if (err)
384                         dev_err(rvu->dev,
385                                 "Link up process failed to start on cgx %d\n",
386                                 cgx);
387         }
388
389         return 0;
390 }
391
392 int rvu_cgx_exit(struct rvu *rvu)
393 {
394         unsigned long lmac_bmap;
395         int cgx, lmac;
396         void *cgxd;
397
398         for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
399                 cgxd = rvu_cgx_pdata(cgx, rvu);
400                 if (!cgxd)
401                         continue;
402                 lmac_bmap = cgx_get_lmac_bmap(cgxd);
403                 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
404                         cgx_lmac_evh_unregister(cgxd, lmac);
405         }
406
407         /* Ensure event handler unregister is completed */
408         mb();
409
410         rvu_cgx_wq_destroy(rvu);
411         return 0;
412 }
413
414 /* Most of the CGX configuration is restricted to the mapped PF only,
415  * VF's of mapped PF and other PFs are not allowed. This fn() checks
416  * whether a PFFUNC is permitted to do the config or not.
417  */
418 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
419 {
420         if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
421             !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
422                 return false;
423         return true;
424 }
425
426 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
427 {
428         struct mac_ops *mac_ops;
429         u8 cgx_id, lmac_id;
430         void *cgxd;
431
432         if (!is_pf_cgxmapped(rvu, pf))
433                 return;
434
435         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
436         cgxd = rvu_cgx_pdata(cgx_id, rvu);
437
438         mac_ops = get_mac_ops(cgxd);
439         /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
440         if (enable)
441                 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
442         else
443                 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
444 }
445
446 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
447 {
448         int pf = rvu_get_pf(pcifunc);
449         struct mac_ops *mac_ops;
450         u8 cgx_id, lmac_id;
451         void *cgxd;
452
453         if (!is_cgx_config_permitted(rvu, pcifunc))
454                 return LMAC_AF_ERR_PERM_DENIED;
455
456         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
457         cgxd = rvu_cgx_pdata(cgx_id, rvu);
458         mac_ops = get_mac_ops(cgxd);
459
460         return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
461 }
462
463 int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
464 {
465         struct mac_ops *mac_ops;
466
467         mac_ops = get_mac_ops(cgxd);
468         return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
469 }
470
471 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
472 {
473         int pf = rvu_get_pf(pcifunc);
474         int i = 0, lmac_count = 0;
475         struct mac_ops *mac_ops;
476         u8 max_dmac_filters;
477         u8 cgx_id, lmac_id;
478         void *cgx_dev;
479
480         if (!is_cgx_config_permitted(rvu, pcifunc))
481                 return;
482
483         if (rvu_npc_exact_has_match_table(rvu)) {
484                 rvu_npc_exact_reset(rvu, pcifunc);
485                 return;
486         }
487
488         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
489         cgx_dev = cgx_get_pdata(cgx_id);
490         lmac_count = cgx_get_lmac_cnt(cgx_dev);
491
492         mac_ops = get_mac_ops(cgx_dev);
493         if (!mac_ops)
494                 return;
495
496         max_dmac_filters = mac_ops->dmac_filter_count / lmac_count;
497
498         for (i = 0; i < max_dmac_filters; i++)
499                 cgx_lmac_addr_del(cgx_id, lmac_id, i);
500
501         /* As cgx_lmac_addr_del does not clear entry for index 0
502          * so it needs to be done explicitly
503          */
504         cgx_lmac_addr_reset(cgx_id, lmac_id);
505 }
506
507 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
508                                     struct msg_rsp *rsp)
509 {
510         rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
511         return 0;
512 }
513
514 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
515                                    struct msg_rsp *rsp)
516 {
517         rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
518         return 0;
519 }
520
521 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
522                               void *rsp)
523 {
524         int pf = rvu_get_pf(req->hdr.pcifunc);
525         struct mac_ops *mac_ops;
526         int stat = 0, err = 0;
527         u64 tx_stat, rx_stat;
528         u8 cgx_idx, lmac;
529         void *cgxd;
530
531         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
532                 return LMAC_AF_ERR_PERM_DENIED;
533
534         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
535         cgxd = rvu_cgx_pdata(cgx_idx, rvu);
536         mac_ops = get_mac_ops(cgxd);
537
538         /* Rx stats */
539         while (stat < mac_ops->rx_stats_cnt) {
540                 err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
541                 if (err)
542                         return err;
543                 if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
544                         ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
545                 else
546                         ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
547                 stat++;
548         }
549
550         /* Tx stats */
551         stat = 0;
552         while (stat < mac_ops->tx_stats_cnt) {
553                 err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
554                 if (err)
555                         return err;
556                 if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
557                         ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
558                 else
559                         ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
560                 stat++;
561         }
562         return 0;
563 }
564
565 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
566                                struct cgx_stats_rsp *rsp)
567 {
568         return rvu_lmac_get_stats(rvu, req, (void *)rsp);
569 }
570
571 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
572                                struct rpm_stats_rsp *rsp)
573 {
574         return rvu_lmac_get_stats(rvu, req, (void *)rsp);
575 }
576
577 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
578                                    struct msg_req *req,
579                                    struct cgx_fec_stats_rsp *rsp)
580 {
581         int pf = rvu_get_pf(req->hdr.pcifunc);
582         struct mac_ops *mac_ops;
583         u8 cgx_idx, lmac;
584         void *cgxd;
585
586         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
587                 return LMAC_AF_ERR_PERM_DENIED;
588         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
589
590         cgxd = rvu_cgx_pdata(cgx_idx, rvu);
591         mac_ops = get_mac_ops(cgxd);
592         return  mac_ops->get_fec_stats(cgxd, lmac, rsp);
593 }
594
595 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
596                                       struct cgx_mac_addr_set_or_get *req,
597                                       struct cgx_mac_addr_set_or_get *rsp)
598 {
599         int pf = rvu_get_pf(req->hdr.pcifunc);
600         u8 cgx_id, lmac_id;
601
602         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
603                 return -EPERM;
604
605         if (rvu_npc_exact_has_match_table(rvu))
606                 return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
607
608         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
609
610         cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
611
612         return 0;
613 }
614
615 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
616                                       struct cgx_mac_addr_add_req *req,
617                                       struct cgx_mac_addr_add_rsp *rsp)
618 {
619         int pf = rvu_get_pf(req->hdr.pcifunc);
620         u8 cgx_id, lmac_id;
621         int rc = 0;
622
623         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
624                 return -EPERM;
625
626         if (rvu_npc_exact_has_match_table(rvu))
627                 return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
628
629         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
630         rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
631         if (rc >= 0) {
632                 rsp->index = rc;
633                 return 0;
634         }
635
636         return rc;
637 }
638
639 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
640                                       struct cgx_mac_addr_del_req *req,
641                                       struct msg_rsp *rsp)
642 {
643         int pf = rvu_get_pf(req->hdr.pcifunc);
644         u8 cgx_id, lmac_id;
645
646         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
647                 return -EPERM;
648
649         if (rvu_npc_exact_has_match_table(rvu))
650                 return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
651
652         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
653         return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
654 }
655
656 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
657                                              struct msg_req *req,
658                                              struct cgx_max_dmac_entries_get_rsp
659                                              *rsp)
660 {
661         int pf = rvu_get_pf(req->hdr.pcifunc);
662         u8 cgx_id, lmac_id;
663
664         /* If msg is received from PFs(which are not mapped to CGX LMACs)
665          * or VF then no entries are allocated for DMAC filters at CGX level.
666          * So returning zero.
667          */
668         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
669                 rsp->max_dmac_filters = 0;
670                 return 0;
671         }
672
673         if (rvu_npc_exact_has_match_table(rvu)) {
674                 rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
675                 return 0;
676         }
677
678         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
679         rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
680         return 0;
681 }
682
683 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
684                                       struct cgx_mac_addr_set_or_get *req,
685                                       struct cgx_mac_addr_set_or_get *rsp)
686 {
687         int pf = rvu_get_pf(req->hdr.pcifunc);
688         u8 cgx_id, lmac_id;
689         int rc = 0;
690         u64 cfg;
691
692         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
693                 return -EPERM;
694
695         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
696
697         rsp->hdr.rc = rc;
698         cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
699         /* copy 48 bit mac address to req->mac_addr */
700         u64_to_ether_addr(cfg, rsp->mac_addr);
701         return 0;
702 }
703
704 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
705                                         struct msg_rsp *rsp)
706 {
707         u16 pcifunc = req->hdr.pcifunc;
708         int pf = rvu_get_pf(pcifunc);
709         u8 cgx_id, lmac_id;
710
711         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
712                 return -EPERM;
713
714         /* Disable drop on non hit rule */
715         if (rvu_npc_exact_has_match_table(rvu))
716                 return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
717
718         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
719
720         cgx_lmac_promisc_config(cgx_id, lmac_id, true);
721         return 0;
722 }
723
724 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
725                                          struct msg_rsp *rsp)
726 {
727         int pf = rvu_get_pf(req->hdr.pcifunc);
728         u8 cgx_id, lmac_id;
729
730         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
731                 return -EPERM;
732
733         /* Disable drop on non hit rule */
734         if (rvu_npc_exact_has_match_table(rvu))
735                 return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
736
737         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
738
739         cgx_lmac_promisc_config(cgx_id, lmac_id, false);
740         return 0;
741 }
742
743 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
744 {
745         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
746         int pf = rvu_get_pf(pcifunc);
747         struct mac_ops *mac_ops;
748         u8 cgx_id, lmac_id;
749         void *cgxd;
750
751         if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
752                 return 0;
753
754         /* This msg is expected only from PFs that are mapped to CGX LMACs,
755          * if received from other PF/VF simply ACK, nothing to do.
756          */
757         if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
758             !is_pf_cgxmapped(rvu, pf))
759                 return -ENODEV;
760
761         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
762         cgxd = rvu_cgx_pdata(cgx_id, rvu);
763
764         mac_ops = get_mac_ops(cgxd);
765         mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable);
766         /* If PTP is enabled then inform NPC that packets to be
767          * parsed by this PF will have their data shifted by 8 bytes
768          * and if PTP is disabled then no shift is required
769          */
770         if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
771                 return -EINVAL;
772         /* This flag is required to clean up CGX conf if app gets killed */
773         pfvf->hw_rx_tstamp_en = enable;
774
775         /* Inform MCS about 8B RX header */
776         rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
777         return 0;
778 }
779
780 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
781                                        struct msg_rsp *rsp)
782 {
783         if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
784                 return -EPERM;
785
786         return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
787 }
788
789 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
790                                         struct msg_rsp *rsp)
791 {
792         return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
793 }
794
795 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
796 {
797         int pf = rvu_get_pf(pcifunc);
798         u8 cgx_id, lmac_id;
799
800         if (!is_cgx_config_permitted(rvu, pcifunc))
801                 return -EPERM;
802
803         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
804
805         if (en) {
806                 set_bit(pf, &rvu->pf_notify_bmap);
807                 /* Send the current link status to PF */
808                 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
809         } else {
810                 clear_bit(pf, &rvu->pf_notify_bmap);
811         }
812
813         return 0;
814 }
815
816 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
817                                           struct msg_rsp *rsp)
818 {
819         rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
820         return 0;
821 }
822
823 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
824                                          struct msg_rsp *rsp)
825 {
826         rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
827         return 0;
828 }
829
830 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
831                                       struct cgx_link_info_msg *rsp)
832 {
833         u8 cgx_id, lmac_id;
834         int pf, err;
835
836         pf = rvu_get_pf(req->hdr.pcifunc);
837
838         if (!is_pf_cgxmapped(rvu, pf))
839                 return -ENODEV;
840
841         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
842
843         err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
844                                 &rsp->link_info);
845         return err;
846 }
847
848 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
849                                       struct msg_req *req,
850                                       struct cgx_features_info_msg *rsp)
851 {
852         int pf = rvu_get_pf(req->hdr.pcifunc);
853         u8 cgx_idx, lmac;
854         void *cgxd;
855
856         if (!is_pf_cgxmapped(rvu, pf))
857                 return 0;
858
859         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
860         cgxd = rvu_cgx_pdata(cgx_idx, rvu);
861         rsp->lmac_features = cgx_features_get(cgxd);
862
863         return 0;
864 }
865
866 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
867 {
868         struct mac_ops *mac_ops;
869         u32 fifo_len;
870
871         mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
872         fifo_len = mac_ops ? mac_ops->fifo_len : 0;
873
874         return fifo_len;
875 }
876
877 u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
878 {
879         struct mac_ops *mac_ops;
880         void *cgxd;
881
882         cgxd = rvu_cgx_pdata(cgx, rvu);
883         if (!cgxd)
884                 return 0;
885
886         mac_ops = get_mac_ops(cgxd);
887         if (!mac_ops->lmac_fifo_len)
888                 return 0;
889
890         return mac_ops->lmac_fifo_len(cgxd, lmac);
891 }
892
893 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
894 {
895         int pf = rvu_get_pf(pcifunc);
896         struct mac_ops *mac_ops;
897         u8 cgx_id, lmac_id;
898
899         if (!is_cgx_config_permitted(rvu, pcifunc))
900                 return -EPERM;
901
902         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
903         mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
904
905         return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
906                                           lmac_id, en);
907 }
908
909 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
910                                        struct msg_rsp *rsp)
911 {
912         rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
913         return 0;
914 }
915
916 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
917                                         struct msg_rsp *rsp)
918 {
919         rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
920         return 0;
921 }
922
923 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
924 {
925         int pf = rvu_get_pf(pcifunc);
926         u8 rx_pfc = 0, tx_pfc = 0;
927         struct mac_ops *mac_ops;
928         u8 cgx_id, lmac_id;
929         void *cgxd;
930
931         if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
932                 return 0;
933
934         /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
935          * if received from other PF/VF simply ACK, nothing to do.
936          */
937         if (!is_pf_cgxmapped(rvu, pf))
938                 return LMAC_AF_ERR_PF_NOT_MAPPED;
939
940         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
941         cgxd = rvu_cgx_pdata(cgx_id, rvu);
942         mac_ops = get_mac_ops(cgxd);
943
944         mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
945         if (tx_pfc || rx_pfc) {
946                 dev_warn(rvu->dev,
947                          "Can not configure 802.3X flow control as PFC frames are enabled");
948                 return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
949         }
950
951         mutex_lock(&rvu->rsrc_lock);
952         if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
953                                pcifunc & RVU_PFVF_FUNC_MASK)) {
954                 mutex_unlock(&rvu->rsrc_lock);
955                 return LMAC_AF_ERR_PERM_DENIED;
956         }
957         mutex_unlock(&rvu->rsrc_lock);
958
959         return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
960 }
961
962 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
963                                        struct cgx_pause_frm_cfg *req,
964                                        struct cgx_pause_frm_cfg *rsp)
965 {
966         int pf = rvu_get_pf(req->hdr.pcifunc);
967         struct mac_ops *mac_ops;
968         u8 cgx_id, lmac_id;
969         int err = 0;
970         void *cgxd;
971
972         /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
973          * if received from other PF/VF simply ACK, nothing to do.
974          */
975         if (!is_pf_cgxmapped(rvu, pf))
976                 return -ENODEV;
977
978         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
979         cgxd = rvu_cgx_pdata(cgx_id, rvu);
980         mac_ops = get_mac_ops(cgxd);
981
982         if (req->set)
983                 err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
984         else
985                 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
986
987         return err;
988 }
989
990 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
991                                            struct msg_rsp *rsp)
992 {
993         int pf = rvu_get_pf(req->hdr.pcifunc);
994         u8 cgx_id, lmac_id;
995
996         if (!is_pf_cgxmapped(rvu, pf))
997                 return LMAC_AF_ERR_PF_NOT_MAPPED;
998
999         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1000         return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
1001 }
1002
1003 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
1004  * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
1005  */
1006 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
1007                            int index, int rxtxflag, u64 *stat)
1008 {
1009         struct rvu_block *block;
1010         int blkaddr;
1011         u16 pcifunc;
1012         int pf, lf;
1013
1014         *stat = 0;
1015
1016         if (!cgxd || !rvu)
1017                 return -EINVAL;
1018
1019         pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
1020         if (pf < 0)
1021                 return pf;
1022
1023         /* Assumes LF of a PF and all of its VF belongs to the same
1024          * NIX block
1025          */
1026         pcifunc = pf << RVU_PFVF_PF_SHIFT;
1027         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1028         if (blkaddr < 0)
1029                 return 0;
1030         block = &rvu->hw->block[blkaddr];
1031
1032         for (lf = 0; lf < block->lf.max; lf++) {
1033                 /* Check if a lf is attached to this PF or one of its VFs */
1034                 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
1035                          ~RVU_PFVF_FUNC_MASK)))
1036                         continue;
1037                 if (rxtxflag == NIX_STATS_RX)
1038                         *stat += rvu_read64(rvu, blkaddr,
1039                                             NIX_AF_LFX_RX_STATX(lf, index));
1040                 else
1041                         *stat += rvu_read64(rvu, blkaddr,
1042                                             NIX_AF_LFX_TX_STATX(lf, index));
1043         }
1044
1045         return 0;
1046 }
1047
1048 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
1049 {
1050         struct rvu_pfvf *parent_pf, *pfvf;
1051         int cgx_users, err = 0;
1052
1053         if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
1054                 return 0;
1055
1056         parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
1057         pfvf = rvu_get_pfvf(rvu, pcifunc);
1058
1059         mutex_lock(&rvu->cgx_cfg_lock);
1060
1061         if (start && pfvf->cgx_in_use)
1062                 goto exit;  /* CGX is already started hence nothing to do */
1063         if (!start && !pfvf->cgx_in_use)
1064                 goto exit; /* CGX is already stopped hence nothing to do */
1065
1066         if (start) {
1067                 cgx_users = parent_pf->cgx_users;
1068                 parent_pf->cgx_users++;
1069         } else {
1070                 parent_pf->cgx_users--;
1071                 cgx_users = parent_pf->cgx_users;
1072         }
1073
1074         /* Start CGX when first of all NIXLFs is started.
1075          * Stop CGX when last of all NIXLFs is stopped.
1076          */
1077         if (!cgx_users) {
1078                 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
1079                                           start);
1080                 if (err) {
1081                         dev_err(rvu->dev, "Unable to %s CGX\n",
1082                                 start ? "start" : "stop");
1083                         /* Revert the usage count in case of error */
1084                         parent_pf->cgx_users = start ? parent_pf->cgx_users  - 1
1085                                                : parent_pf->cgx_users  + 1;
1086                         goto exit;
1087                 }
1088         }
1089         pfvf->cgx_in_use = start;
1090 exit:
1091         mutex_unlock(&rvu->cgx_cfg_lock);
1092         return err;
1093 }
1094
1095 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
1096                                        struct fec_mode *req,
1097                                        struct fec_mode *rsp)
1098 {
1099         int pf = rvu_get_pf(req->hdr.pcifunc);
1100         u8 cgx_id, lmac_id;
1101
1102         if (!is_pf_cgxmapped(rvu, pf))
1103                 return -EPERM;
1104
1105         if (req->fec == OTX2_FEC_OFF)
1106                 req->fec = OTX2_FEC_NONE;
1107         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1108         rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
1109         return 0;
1110 }
1111
1112 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
1113                                            struct cgx_fw_data *rsp)
1114 {
1115         int pf = rvu_get_pf(req->hdr.pcifunc);
1116         u8 cgx_id, lmac_id;
1117
1118         if (!rvu->fwdata)
1119                 return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
1120
1121         if (!is_pf_cgxmapped(rvu, pf))
1122                 return -EPERM;
1123
1124         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1125
1126         if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
1127                 memcpy(&rsp->fwdata,
1128                        &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id],
1129                        sizeof(struct cgx_lmac_fwdata_s));
1130         else
1131                 memcpy(&rsp->fwdata,
1132                        &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
1133                        sizeof(struct cgx_lmac_fwdata_s));
1134
1135         return 0;
1136 }
1137
1138 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
1139                                        struct cgx_set_link_mode_req *req,
1140                                        struct cgx_set_link_mode_rsp *rsp)
1141 {
1142         int pf = rvu_get_pf(req->hdr.pcifunc);
1143         u8 cgx_idx, lmac;
1144         void *cgxd;
1145
1146         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1147                 return -EPERM;
1148
1149         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
1150         cgxd = rvu_cgx_pdata(cgx_idx, rvu);
1151         rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
1152         return 0;
1153 }
1154
1155 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1156                                         struct msg_rsp *rsp)
1157 {
1158         int pf = rvu_get_pf(req->hdr.pcifunc);
1159         u8 cgx_id, lmac_id;
1160
1161         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1162                 return LMAC_AF_ERR_PERM_DENIED;
1163
1164         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1165
1166         if (rvu_npc_exact_has_match_table(rvu))
1167                 return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
1168
1169         return cgx_lmac_addr_reset(cgx_id, lmac_id);
1170 }
1171
1172 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
1173                                          struct cgx_mac_addr_update_req *req,
1174                                          struct cgx_mac_addr_update_rsp *rsp)
1175 {
1176         int pf = rvu_get_pf(req->hdr.pcifunc);
1177         u8 cgx_id, lmac_id;
1178
1179         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1180                 return LMAC_AF_ERR_PERM_DENIED;
1181
1182         if (rvu_npc_exact_has_match_table(rvu))
1183                 return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
1184
1185         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1186         return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
1187 }
1188
1189 int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
1190                                u8 rx_pause, u16 pfc_en)
1191 {
1192         int pf = rvu_get_pf(pcifunc);
1193         u8 rx_8023 = 0, tx_8023 = 0;
1194         struct mac_ops *mac_ops;
1195         u8 cgx_id, lmac_id;
1196         void *cgxd;
1197
1198         /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1199          * if received from other PF/VF simply ACK, nothing to do.
1200          */
1201         if (!is_pf_cgxmapped(rvu, pf))
1202                 return -ENODEV;
1203
1204         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1205         cgxd = rvu_cgx_pdata(cgx_id, rvu);
1206         mac_ops = get_mac_ops(cgxd);
1207
1208         mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
1209         if (tx_8023 || rx_8023) {
1210                 dev_warn(rvu->dev,
1211                          "Can not configure PFC as 802.3X pause frames are enabled");
1212                 return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
1213         }
1214
1215         mutex_lock(&rvu->rsrc_lock);
1216         if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
1217                                pcifunc & RVU_PFVF_FUNC_MASK)) {
1218                 mutex_unlock(&rvu->rsrc_lock);
1219                 return LMAC_AF_ERR_PERM_DENIED;
1220         }
1221         mutex_unlock(&rvu->rsrc_lock);
1222
1223         return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
1224 }
1225
1226 int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
1227                                             struct cgx_pfc_cfg *req,
1228                                             struct cgx_pfc_rsp *rsp)
1229 {
1230         int pf = rvu_get_pf(req->hdr.pcifunc);
1231         struct mac_ops *mac_ops;
1232         u8 cgx_id, lmac_id;
1233         void *cgxd;
1234         int err;
1235
1236         /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
1237          * if received from other PF/VF simply ACK, nothing to do.
1238          */
1239         if (!is_pf_cgxmapped(rvu, pf))
1240                 return -ENODEV;
1241
1242         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1243         cgxd = rvu_cgx_pdata(cgx_id, rvu);
1244         mac_ops = get_mac_ops(cgxd);
1245
1246         err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
1247                                          req->rx_pause, req->pfc_en);
1248
1249         mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
1250         return err;
1251 }
1252
1253 void rvu_mac_reset(struct rvu *rvu, u16 pcifunc)
1254 {
1255         int pf = rvu_get_pf(pcifunc);
1256         struct mac_ops *mac_ops;
1257         struct cgx *cgxd;
1258         u8 cgx, lmac;
1259
1260         if (!is_pf_cgxmapped(rvu, pf))
1261                 return;
1262
1263         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
1264         cgxd = rvu_cgx_pdata(cgx, rvu);
1265         mac_ops = get_mac_ops(cgxd);
1266
1267         if (mac_ops->mac_reset(cgxd, lmac, !is_vf(pcifunc)))
1268                 dev_err(rvu->dev, "Failed to reset MAC\n");
1269 }