1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Physical Function ethernet driver
4 * Copyright (C) 2020 Marvell.
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/etherdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/iommu.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_trace.h>
18 #include <linux/bitfield.h>
19 #include <net/page_pool/types.h>
22 #include "otx2_common.h"
23 #include "otx2_txrx.h"
24 #include "otx2_struct.h"
28 #include <rvu_trace.h>
30 #define DRV_NAME "rvu_nicpf"
31 #define DRV_STRING "Marvell RVU NIC Physical Function Driver"
33 /* Supported devices */
34 static const struct pci_device_id otx2_pf_id_table[] = {
35 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
36 { 0, } /* end of table */
39 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
40 MODULE_DESCRIPTION(DRV_STRING);
41 MODULE_LICENSE("GPL v2");
42 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
44 static void otx2_vf_link_event_task(struct work_struct *work);
51 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
52 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
54 static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
56 struct otx2_nic *pf = netdev_priv(netdev);
57 bool if_up = netif_running(netdev);
60 if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
61 netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
68 netdev_info(netdev, "Changing MTU from %d to %d\n",
69 netdev->mtu, new_mtu);
70 netdev->mtu = new_mtu;
73 err = otx2_open(netdev);
78 static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
80 int irq, vfs = pf->total_vfs;
82 /* Disable VFs ME interrupts */
83 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
84 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
87 /* Disable VFs FLR interrupts */
88 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
89 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
95 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
96 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
99 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
100 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
104 static void otx2_flr_wq_destroy(struct otx2_nic *pf)
108 destroy_workqueue(pf->flr_wq);
110 devm_kfree(pf->dev, pf->flr_wrk);
113 static void otx2_flr_handler(struct work_struct *work)
115 struct flr_work *flrwork = container_of(work, struct flr_work, work);
116 struct otx2_nic *pf = flrwork->pf;
117 struct mbox *mbox = &pf->mbox;
121 vf = flrwork - pf->flr_wrk;
123 mutex_lock(&mbox->lock);
124 req = otx2_mbox_alloc_msg_vf_flr(mbox);
126 mutex_unlock(&mbox->lock);
129 req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
130 req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
132 if (!otx2_sync_mbox_msg(&pf->mbox)) {
137 /* clear transcation pending bit */
138 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
139 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
142 mutex_unlock(&mbox->lock);
145 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
147 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
148 int reg, dev, vf, start_vf, num_reg = 1;
151 if (pf->total_vfs > 64)
154 for (reg = 0; reg < num_reg; reg++) {
155 intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
159 for (vf = 0; vf < 64; vf++) {
160 if (!(intr & BIT_ULL(vf)))
163 queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
164 /* Clear interrupt */
165 otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
166 /* Disable the interrupt */
167 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
174 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
176 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
177 int vf, reg, num_reg = 1;
180 if (pf->total_vfs > 64)
183 for (reg = 0; reg < num_reg; reg++) {
184 intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
187 for (vf = 0; vf < 64; vf++) {
188 if (!(intr & BIT_ULL(vf)))
190 /* clear trpend bit */
191 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
192 /* clear interrupt */
193 otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
199 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
201 struct otx2_hw *hw = &pf->hw;
205 /* Register ME interrupt handler*/
206 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
207 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
208 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
209 otx2_pf_me_intr_handler, 0, irq_name, pf);
212 "RVUPF: IRQ registration failed for ME0\n");
215 /* Register FLR interrupt handler */
216 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
217 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
218 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
219 otx2_pf_flr_intr_handler, 0, irq_name, pf);
222 "RVUPF: IRQ registration failed for FLR0\n");
227 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
228 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
229 rvu_get_pf(pf->pcifunc));
230 ret = request_irq(pci_irq_vector
231 (pf->pdev, RVU_PF_INT_VEC_VFME1),
232 otx2_pf_me_intr_handler, 0, irq_name, pf);
235 "RVUPF: IRQ registration failed for ME1\n");
237 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
238 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
239 rvu_get_pf(pf->pcifunc));
240 ret = request_irq(pci_irq_vector
241 (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
242 otx2_pf_flr_intr_handler, 0, irq_name, pf);
245 "RVUPF: IRQ registration failed for FLR1\n");
250 /* Enable ME interrupt for all VFs*/
251 otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
252 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
254 /* Enable FLR interrupt for all VFs*/
255 otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
256 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
261 otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
262 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
265 otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
266 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
272 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
276 pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI);
280 pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
281 sizeof(struct flr_work), GFP_KERNEL);
283 destroy_workqueue(pf->flr_wq);
287 for (vf = 0; vf < num_vfs; vf++) {
288 pf->flr_wrk[vf].pf = pf;
289 INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
295 static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
296 int first, int mdevs, u64 intr, int type)
298 struct otx2_mbox_dev *mdev;
299 struct otx2_mbox *mbox;
300 struct mbox_hdr *hdr;
303 for (i = first; i < mdevs; i++) {
305 if (!(intr & BIT_ULL(i - first)))
309 mdev = &mbox->dev[i];
310 if (type == TYPE_PFAF)
311 otx2_sync_mbox_bbuf(mbox, i);
312 hdr = mdev->mbase + mbox->rx_start;
313 /* The hdr->num_msgs is set to zero immediately in the interrupt
314 * handler to ensure that it holds a correct value next time
315 * when the interrupt handler is called.
316 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
317 * pf>mbox.up_num_msgs holds the data for use in
318 * pfaf_mbox_up_handler.
321 mw[i].num_msgs = hdr->num_msgs;
323 if (type == TYPE_PFAF)
324 memset(mbox->hwbase + mbox->rx_start, 0,
325 ALIGN(sizeof(struct mbox_hdr),
328 queue_work(mbox_wq, &mw[i].mbox_wrk);
332 mdev = &mbox->dev[i];
333 if (type == TYPE_PFAF)
334 otx2_sync_mbox_bbuf(mbox, i);
335 hdr = mdev->mbase + mbox->rx_start;
337 mw[i].up_num_msgs = hdr->num_msgs;
339 if (type == TYPE_PFAF)
340 memset(mbox->hwbase + mbox->rx_start, 0,
341 ALIGN(sizeof(struct mbox_hdr),
344 queue_work(mbox_wq, &mw[i].mbox_up_wrk);
349 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
350 struct otx2_mbox *pfvf_mbox, void *bbuf_base,
353 struct otx2_mbox_dev *src_mdev = mdev;
356 /* Msgs are already copied, trigger VF's mbox irq */
359 offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
360 writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
362 /* Restore VF's mbox bounce buffer region address */
363 src_mdev->mbase = bbuf_base;
366 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
367 struct otx2_mbox *src_mbox,
368 int dir, int vf, int num_msgs)
370 struct otx2_mbox_dev *src_mdev, *dst_mdev;
371 struct mbox_hdr *mbox_hdr;
372 struct mbox_hdr *req_hdr;
373 struct mbox *dst_mbox;
376 if (dir == MBOX_DIR_PFAF) {
377 /* Set VF's mailbox memory as PF's bounce buffer memory, so
378 * that explicit copying of VF's msgs to PF=>AF mbox region
379 * and AF=>PF responses to VF's mbox region can be avoided.
381 src_mdev = &src_mbox->dev[vf];
382 mbox_hdr = src_mbox->hwbase +
383 src_mbox->rx_start + (vf * MBOX_SIZE);
385 dst_mbox = &pf->mbox;
386 dst_size = dst_mbox->mbox.tx_size -
387 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
388 /* Check if msgs fit into destination area and has valid size */
389 if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
392 dst_mdev = &dst_mbox->mbox.dev[0];
394 mutex_lock(&pf->mbox.lock);
395 dst_mdev->mbase = src_mdev->mbase;
396 dst_mdev->msg_size = mbox_hdr->msg_size;
397 dst_mdev->num_msgs = num_msgs;
398 err = otx2_sync_mbox_msg(dst_mbox);
399 /* Error code -EIO indicate there is a communication failure
400 * to the AF. Rest of the error codes indicate that AF processed
401 * VF messages and set the error codes in response messages
402 * (if any) so simply forward responses to VF.
406 "AF not responding to VF%d messages\n", vf);
407 /* restore PF mbase and exit */
408 dst_mdev->mbase = pf->mbox.bbuf_base;
409 mutex_unlock(&pf->mbox.lock);
412 /* At this point, all the VF messages sent to AF are acked
413 * with proper responses and responses are copied to VF
414 * mailbox hence raise interrupt to VF.
416 req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
417 dst_mbox->mbox.rx_start);
418 req_hdr->num_msgs = num_msgs;
420 otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
421 pf->mbox.bbuf_base, vf);
422 mutex_unlock(&pf->mbox.lock);
423 } else if (dir == MBOX_DIR_PFVF_UP) {
424 src_mdev = &src_mbox->dev[0];
425 mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
426 req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
428 req_hdr->num_msgs = num_msgs;
430 dst_mbox = &pf->mbox_pfvf[0];
431 dst_size = dst_mbox->mbox_up.tx_size -
432 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
433 /* Check if msgs fit into destination area */
434 if (mbox_hdr->msg_size > dst_size)
437 dst_mdev = &dst_mbox->mbox_up.dev[vf];
438 dst_mdev->mbase = src_mdev->mbase;
439 dst_mdev->msg_size = mbox_hdr->msg_size;
440 dst_mdev->num_msgs = mbox_hdr->num_msgs;
441 err = otx2_sync_mbox_up_msg(dst_mbox, vf);
444 "VF%d is not responding to mailbox\n", vf);
447 } else if (dir == MBOX_DIR_VFPF_UP) {
448 req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
450 req_hdr->num_msgs = num_msgs;
451 otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
453 pf->mbox_pfvf[vf].bbuf_base,
460 static void otx2_pfvf_mbox_handler(struct work_struct *work)
462 struct mbox_msghdr *msg = NULL;
463 int offset, vf_idx, id, err;
464 struct otx2_mbox_dev *mdev;
465 struct mbox_hdr *req_hdr;
466 struct otx2_mbox *mbox;
467 struct mbox *vf_mbox;
470 vf_mbox = container_of(work, struct mbox, mbox_wrk);
472 vf_idx = vf_mbox - pf->mbox_pfvf;
474 mbox = &pf->mbox_pfvf[0].mbox;
475 mdev = &mbox->dev[vf_idx];
476 req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
478 offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
480 for (id = 0; id < vf_mbox->num_msgs; id++) {
481 msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
484 if (msg->sig != OTX2_MBOX_REQ_SIG)
487 /* Set VF's number in each of the msg */
488 msg->pcifunc &= RVU_PFVF_FUNC_MASK;
489 msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
490 offset = msg->next_msgoff;
492 err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
499 otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
500 otx2_mbox_msg_send(mbox, vf_idx);
503 static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
505 struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
506 struct otx2_nic *pf = vf_mbox->pfvf;
507 struct otx2_mbox_dev *mdev;
508 int offset, id, vf_idx = 0;
509 struct mbox_hdr *rsp_hdr;
510 struct mbox_msghdr *msg;
511 struct otx2_mbox *mbox;
513 vf_idx = vf_mbox - pf->mbox_pfvf;
514 mbox = &pf->mbox_pfvf[0].mbox_up;
515 mdev = &mbox->dev[vf_idx];
517 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
518 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
520 for (id = 0; id < vf_mbox->up_num_msgs; id++) {
521 msg = mdev->mbase + offset;
523 if (msg->id >= MBOX_MSG_MAX) {
525 "Mbox msg with unknown ID 0x%x\n", msg->id);
529 if (msg->sig != OTX2_MBOX_RSP_SIG) {
531 "Mbox msg with wrong signature %x, ID 0x%x\n",
537 case MBOX_MSG_CGX_LINK_EVENT:
542 "Mbox msg response has err %d, ID 0x%x\n",
548 offset = mbox->rx_start + msg->next_msgoff;
549 if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
550 __otx2_mbox_reset(mbox, 0);
555 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
557 struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
558 int vfs = pf->total_vfs;
562 mbox = pf->mbox_pfvf;
563 /* Handle VF interrupts */
565 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
566 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
567 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
570 trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
574 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
575 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
577 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
580 trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
585 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
587 void __iomem *hwbase;
595 pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
596 sizeof(struct mbox), GFP_KERNEL);
600 pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox",
601 WQ_HIGHPRI | WQ_MEM_RECLAIM);
602 if (!pf->mbox_pfvf_wq)
605 /* On CN10K platform, PF <-> VF mailbox region follows after
606 * PF <-> AF mailbox region.
608 if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
609 base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
612 base = readq((void __iomem *)((u64)pf->reg_base +
613 RVU_PF_VF_BAR4_ADDR));
615 hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
621 mbox = &pf->mbox_pfvf[0];
622 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
623 MBOX_DIR_PFVF, numvfs);
627 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
628 MBOX_DIR_PFVF_UP, numvfs);
632 for (vf = 0; vf < numvfs; vf++) {
634 INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
635 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
645 destroy_workqueue(pf->mbox_pfvf_wq);
649 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
651 struct mbox *mbox = &pf->mbox_pfvf[0];
656 if (pf->mbox_pfvf_wq) {
657 destroy_workqueue(pf->mbox_pfvf_wq);
658 pf->mbox_pfvf_wq = NULL;
661 if (mbox->mbox.hwbase)
662 iounmap(mbox->mbox.hwbase);
664 otx2_mbox_destroy(&mbox->mbox);
667 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
669 /* Clear PF <=> VF mailbox IRQ */
670 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
671 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
673 /* Enable PF <=> VF mailbox IRQ */
674 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
677 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
682 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
686 /* Disable PF <=> VF mailbox IRQ */
687 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
688 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
690 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
691 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
692 free_irq(vector, pf);
695 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
696 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
697 free_irq(vector, pf);
701 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
703 struct otx2_hw *hw = &pf->hw;
707 /* Register MBOX0 interrupt handler */
708 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
710 snprintf(irq_name, NAME_SIZE,
711 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
713 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
714 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
715 otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
718 "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
723 /* Register MBOX1 interrupt handler */
724 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
726 snprintf(irq_name, NAME_SIZE,
727 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
729 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
730 err = request_irq(pci_irq_vector(pf->pdev,
731 RVU_PF_INT_VEC_VFPF_MBOX1),
732 otx2_pfvf_mbox_intr_handler,
736 "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
741 otx2_enable_pfvf_mbox_intr(pf, numvfs);
746 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
747 struct mbox_msghdr *msg)
751 if (msg->id >= MBOX_MSG_MAX) {
753 "Mbox msg with unknown ID 0x%x\n", msg->id);
757 if (msg->sig != OTX2_MBOX_RSP_SIG) {
759 "Mbox msg with wrong signature %x, ID 0x%x\n",
764 /* message response heading VF */
765 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
767 struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
768 struct delayed_work *dwork;
771 case MBOX_MSG_NIX_LF_START_RX:
772 config->intf_down = false;
773 dwork = &config->link_event_work;
774 schedule_delayed_work(dwork, msecs_to_jiffies(100));
776 case MBOX_MSG_NIX_LF_STOP_RX:
777 config->intf_down = true;
786 pf->pcifunc = msg->pcifunc;
788 case MBOX_MSG_MSIX_OFFSET:
789 mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
791 case MBOX_MSG_NPA_LF_ALLOC:
792 mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
794 case MBOX_MSG_NIX_LF_ALLOC:
795 mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
797 case MBOX_MSG_NIX_BP_ENABLE:
798 mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
800 case MBOX_MSG_CGX_STATS:
801 mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
803 case MBOX_MSG_CGX_FEC_STATS:
804 mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
809 "Mbox msg response has err %d, ID 0x%x\n",
815 static void otx2_pfaf_mbox_handler(struct work_struct *work)
817 struct otx2_mbox_dev *mdev;
818 struct mbox_hdr *rsp_hdr;
819 struct mbox_msghdr *msg;
820 struct otx2_mbox *mbox;
821 struct mbox *af_mbox;
825 af_mbox = container_of(work, struct mbox, mbox_wrk);
826 mbox = &af_mbox->mbox;
827 mdev = &mbox->dev[0];
828 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
830 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
833 for (id = 0; id < af_mbox->num_msgs; id++) {
834 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
835 otx2_process_pfaf_mbox_msg(pf, msg);
836 offset = mbox->rx_start + msg->next_msgoff;
837 if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
838 __otx2_mbox_reset(mbox, 0);
844 static void otx2_handle_link_event(struct otx2_nic *pf)
846 struct cgx_link_user_info *linfo = &pf->linfo;
847 struct net_device *netdev = pf->netdev;
849 pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
850 linfo->link_up ? "UP" : "DOWN", linfo->speed,
851 linfo->full_duplex ? "Full" : "Half");
852 if (linfo->link_up) {
853 netif_carrier_on(netdev);
854 netif_tx_start_all_queues(netdev);
856 netif_tx_stop_all_queues(netdev);
857 netif_carrier_off(netdev);
861 int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
862 struct mcs_intr_info *event,
865 cn10k_handle_mcs_event(pf, event);
870 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
871 struct cgx_link_info_msg *msg,
876 /* Copy the link info sent by AF */
877 pf->linfo = msg->link_info;
879 /* notify VFs about link event */
880 for (i = 0; i < pci_num_vf(pf->pdev); i++) {
881 struct otx2_vf_config *config = &pf->vf_configs[i];
882 struct delayed_work *dwork = &config->link_event_work;
884 if (config->intf_down)
887 schedule_delayed_work(dwork, msecs_to_jiffies(100));
890 /* interface has not been fully configured yet */
891 if (pf->flags & OTX2_FLAG_INTF_DOWN)
894 otx2_handle_link_event(pf);
898 static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
899 struct mbox_msghdr *req)
901 /* Check if valid, if not reply with a invalid msg */
902 if (req->sig != OTX2_MBOX_REQ_SIG) {
903 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
908 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
910 struct _rsp_type *rsp; \
913 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
914 &pf->mbox.mbox_up, 0, \
915 sizeof(struct _rsp_type)); \
920 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
921 rsp->hdr.pcifunc = 0; \
924 err = otx2_mbox_up_handler_ ## _fn_name( \
925 pf, (struct _req_type *)req, rsp); \
933 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
939 static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
941 struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
942 struct otx2_mbox *mbox = &af_mbox->mbox_up;
943 struct otx2_mbox_dev *mdev = &mbox->dev[0];
944 struct otx2_nic *pf = af_mbox->pfvf;
945 int offset, id, devid = 0;
946 struct mbox_hdr *rsp_hdr;
947 struct mbox_msghdr *msg;
949 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
951 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
953 for (id = 0; id < af_mbox->up_num_msgs; id++) {
954 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
956 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
957 /* Skip processing VF's messages */
959 otx2_process_mbox_msg_up(pf, msg);
960 offset = mbox->rx_start + msg->next_msgoff;
963 otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
964 MBOX_DIR_PFVF_UP, devid - 1,
965 af_mbox->up_num_msgs);
969 otx2_mbox_msg_send(mbox, 0);
972 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
974 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
978 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
982 trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
984 otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
989 static void otx2_disable_mbox_intr(struct otx2_nic *pf)
991 int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
993 /* Disable AF => PF mailbox IRQ */
994 otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
995 free_irq(vector, pf);
998 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
1000 struct otx2_hw *hw = &pf->hw;
1001 struct msg_req *req;
1005 /* Register mailbox interrupt handler */
1006 irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
1007 snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
1008 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
1009 otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
1012 "RVUPF: IRQ registration failed for PFAF mbox irq\n");
1016 /* Enable mailbox interrupt for msgs coming from AF.
1017 * First clear to avoid spurious interrupts, if any.
1019 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
1020 otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
1025 /* Check mailbox communication with AF */
1026 req = otx2_mbox_alloc_msg_ready(&pf->mbox);
1028 otx2_disable_mbox_intr(pf);
1031 err = otx2_sync_mbox_msg(&pf->mbox);
1034 "AF not responding to mailbox, deferring probe\n");
1035 otx2_disable_mbox_intr(pf);
1036 return -EPROBE_DEFER;
1042 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
1044 struct mbox *mbox = &pf->mbox;
1047 destroy_workqueue(pf->mbox_wq);
1051 if (mbox->mbox.hwbase)
1052 iounmap((void __iomem *)mbox->mbox.hwbase);
1054 otx2_mbox_destroy(&mbox->mbox);
1055 otx2_mbox_destroy(&mbox->mbox_up);
1058 static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
1060 struct mbox *mbox = &pf->mbox;
1061 void __iomem *hwbase;
1065 pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox",
1066 WQ_HIGHPRI | WQ_MEM_RECLAIM);
1070 /* Mailbox is a reserved memory (in RAM) region shared between
1071 * admin function (i.e AF) and this PF, shouldn't be mapped as
1072 * device memory to allow unaligned accesses.
1074 hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
1077 dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
1082 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
1087 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
1088 MBOX_DIR_PFAF_UP, 1);
1092 err = otx2_mbox_bbuf_init(mbox, pf->pdev);
1096 INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
1097 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
1098 mutex_init(&mbox->lock);
1102 otx2_pfaf_mbox_destroy(pf);
1106 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
1108 struct msg_req *msg;
1111 mutex_lock(&pf->mbox.lock);
1113 msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
1115 msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
1118 mutex_unlock(&pf->mbox.lock);
1122 err = otx2_sync_mbox_msg(&pf->mbox);
1123 mutex_unlock(&pf->mbox.lock);
1127 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
1129 struct msg_req *msg;
1132 if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap,
1133 pf->flow_cfg->dmacflt_max_flows))
1134 netdev_warn(pf->netdev,
1135 "CGX/RPM internal loopback might not work as DMAC filters are active\n");
1137 mutex_lock(&pf->mbox.lock);
1139 msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
1141 msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
1144 mutex_unlock(&pf->mbox.lock);
1148 err = otx2_sync_mbox_msg(&pf->mbox);
1149 mutex_unlock(&pf->mbox.lock);
1153 int otx2_set_real_num_queues(struct net_device *netdev,
1154 int tx_queues, int rx_queues)
1158 err = netif_set_real_num_tx_queues(netdev, tx_queues);
1161 "Failed to set no of Tx queues: %d\n", tx_queues);
1165 err = netif_set_real_num_rx_queues(netdev, rx_queues);
1168 "Failed to set no of Rx queues: %d\n", rx_queues);
1171 EXPORT_SYMBOL(otx2_set_real_num_queues);
1173 static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = {
1175 "NIX_SQOPERR_CTX_FAULT",
1176 "NIX_SQOPERR_CTX_POISON",
1177 "NIX_SQOPERR_DISABLED",
1178 "NIX_SQOPERR_SIZE_ERR",
1179 "NIX_SQOPERR_OFLOW",
1180 "NIX_SQOPERR_SQB_NULL",
1181 "NIX_SQOPERR_SQB_FAULT",
1182 "NIX_SQOPERR_SQE_SZ_ZERO",
1185 static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
1186 "NIX_MNQERR_SQ_CTX_FAULT",
1187 "NIX_MNQERR_SQ_CTX_POISON",
1188 "NIX_MNQERR_SQB_FAULT",
1189 "NIX_MNQERR_SQB_POISON",
1190 "NIX_MNQERR_TOTAL_ERR",
1191 "NIX_MNQERR_LSO_ERR",
1192 "NIX_MNQERR_CQ_QUERY_ERR",
1193 "NIX_MNQERR_MAX_SQE_SIZE_ERR",
1194 "NIX_MNQERR_MAXLEN_ERR",
1195 "NIX_MNQERR_SQE_SIZEM1_ZERO",
1198 static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
1199 [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
1200 [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
1201 [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
1202 [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
1203 [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
1204 [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
1205 [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
1206 [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
1207 [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
1208 [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
1209 [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
1210 [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
1211 [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
1212 [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
1213 [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
1214 [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
1215 [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
1216 [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
1217 [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
1218 [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
1219 [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
1220 [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
1221 [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
1222 [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
1223 [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
1224 [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
1227 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
1229 struct otx2_nic *pf = data;
1230 struct otx2_snd_queue *sq;
1235 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
1236 ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
1237 val = otx2_atomic64_add((qidx << 44), ptr);
1239 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
1240 (val & NIX_CQERRINT_BITS));
1241 if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
1244 if (val & BIT_ULL(42)) {
1245 netdev_err(pf->netdev,
1246 "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1247 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1249 if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
1250 netdev_err(pf->netdev, "CQ%lld: Doorbell error",
1252 if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
1253 netdev_err(pf->netdev,
1254 "CQ%lld: Memory fault on CQE write to LLC/DRAM",
1258 schedule_work(&pf->reset_task);
1262 for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
1263 u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
1264 u8 sq_op_err_code, mnq_err_code, snd_err_code;
1266 sq = &pf->qset.sq[qidx];
1270 /* Below debug registers captures first errors corresponding to
1271 * those registers. We don't have to check against SQ qid as
1272 * these are fatal errors.
1275 ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
1276 val = otx2_atomic64_add((qidx << 44), ptr);
1277 otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
1278 (val & NIX_SQINT_BITS));
1280 if (val & BIT_ULL(42)) {
1281 netdev_err(pf->netdev,
1282 "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1283 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1287 sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG);
1288 if (!(sq_op_err_dbg & BIT(44)))
1289 goto chk_mnq_err_dbg;
1291 sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
1292 netdev_err(pf->netdev,
1293 "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx) err=%s(%#x)\n",
1294 qidx, sq_op_err_dbg,
1295 nix_sqoperr_e_str[sq_op_err_code],
1298 otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
1300 if (sq_op_err_code == NIX_SQOPERR_SQB_NULL)
1301 goto chk_mnq_err_dbg;
1303 /* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure.
1304 * TODO: But we are in irq context. How to call mbox functions which does sleep
1308 mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG);
1309 if (!(mnq_err_dbg & BIT(44)))
1310 goto chk_snd_err_dbg;
1312 mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
1313 netdev_err(pf->netdev,
1314 "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx) err=%s(%#x)\n",
1315 qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code],
1317 otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
1320 snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
1321 if (snd_err_dbg & BIT(44)) {
1322 snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
1323 netdev_err(pf->netdev,
1324 "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
1326 nix_snd_status_e_str[snd_err_code],
1328 otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
1332 /* Print values and reset */
1333 if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
1334 netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
1337 schedule_work(&pf->reset_task);
1343 static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
1345 struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
1346 struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
1347 int qidx = cq_poll->cint_idx;
1349 /* Disable interrupts.
1351 * Completion interrupts behave in a level-triggered interrupt
1352 * fashion, and hence have to be cleared only after it is serviced.
1354 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1358 napi_schedule_irqoff(&cq_poll->napi);
1363 static void otx2_disable_napi(struct otx2_nic *pf)
1365 struct otx2_qset *qset = &pf->qset;
1366 struct otx2_cq_poll *cq_poll;
1369 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1370 cq_poll = &qset->napi[qidx];
1371 cancel_work_sync(&cq_poll->dim.work);
1372 napi_disable(&cq_poll->napi);
1373 netif_napi_del(&cq_poll->napi);
1377 static void otx2_free_cq_res(struct otx2_nic *pf)
1379 struct otx2_qset *qset = &pf->qset;
1380 struct otx2_cq_queue *cq;
1384 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
1385 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1386 cq = &qset->cq[qidx];
1387 qmem_free(pf->dev, cq->cqe);
1391 static void otx2_free_sq_res(struct otx2_nic *pf)
1393 struct otx2_qset *qset = &pf->qset;
1394 struct otx2_snd_queue *sq;
1398 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
1399 /* Free SQB pointers */
1400 otx2_sq_free_sqbs(pf);
1401 for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
1402 sq = &qset->sq[qidx];
1403 /* Skip freeing Qos queues if they are not initialized */
1406 qmem_free(pf->dev, sq->sqe);
1407 qmem_free(pf->dev, sq->tso_hdrs);
1409 kfree(sq->sqb_ptrs);
1413 static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
1419 if (pf->hw.rbuf_len)
1420 return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
1422 /* The data transferred by NIX to memory consists of actual packet
1423 * plus additional data which has timestamp and/or EDSA/HIGIG2
1424 * headers if interface is configured in corresponding modes.
1425 * NIX transfers entire data using 6 segments/buffers and writes
1426 * a CQE_RX descriptor with those segment addresses. First segment
1427 * has additional data prepended to packet. Also software omits a
1428 * headroom of 128 bytes in each segment. Hence the total size of
1429 * memory needed to receive a packet with 'mtu' is:
1430 * frame size = mtu + additional data;
1431 * memory = frame_size + headroom * 6;
1432 * each receive buffer size = memory / 6;
1434 frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1435 total_size = frame_size + OTX2_HEAD_ROOM * 6;
1436 rbuf_size = total_size / 6;
1438 return ALIGN(rbuf_size, 2048);
1441 static int otx2_init_hw_resources(struct otx2_nic *pf)
1443 struct nix_lf_free_req *free_req;
1444 struct mbox *mbox = &pf->mbox;
1445 struct otx2_hw *hw = &pf->hw;
1446 struct msg_req *req;
1449 /* Set required NPA LF's pool counts
1450 * Auras and Pools are used in a 1:1 mapping,
1451 * so, aura count = pool count.
1453 hw->rqpool_cnt = hw->rx_queues;
1454 hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
1455 hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
1457 /* Maximum hardware supported transmit length */
1458 pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
1460 pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
1462 mutex_lock(&mbox->lock);
1464 err = otx2_config_npa(pf);
1469 err = otx2_config_nix(pf);
1471 goto err_free_npa_lf;
1473 /* Enable backpressure for CGX mapped PF/VFs */
1474 if (!is_otx2_lbkvf(pf->pdev))
1475 otx2_nix_config_bp(pf, true);
1477 /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
1478 err = otx2_rq_aura_pool_init(pf);
1480 mutex_unlock(&mbox->lock);
1481 goto err_free_nix_lf;
1483 /* Init Auras and pools used by NIX SQ, for queueing SQEs */
1484 err = otx2_sq_aura_pool_init(pf);
1486 mutex_unlock(&mbox->lock);
1487 goto err_free_rq_ptrs;
1490 err = otx2_txsch_alloc(pf);
1492 mutex_unlock(&mbox->lock);
1493 goto err_free_sq_ptrs;
1498 err = otx2_pfc_txschq_alloc(pf);
1500 mutex_unlock(&mbox->lock);
1501 goto err_free_sq_ptrs;
1506 err = otx2_config_nix_queues(pf);
1508 mutex_unlock(&mbox->lock);
1509 goto err_free_txsch;
1512 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1513 err = otx2_txschq_config(pf, lvl, 0, false);
1515 mutex_unlock(&mbox->lock);
1516 goto err_free_nix_queues;
1522 err = otx2_pfc_txschq_config(pf);
1524 mutex_unlock(&mbox->lock);
1525 goto err_free_nix_queues;
1530 mutex_unlock(&mbox->lock);
1533 err_free_nix_queues:
1534 otx2_free_sq_res(pf);
1535 otx2_free_cq_res(pf);
1536 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1538 otx2_txschq_stop(pf);
1540 otx2_sq_free_sqbs(pf);
1542 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1543 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1544 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1545 otx2_aura_pool_free(pf);
1547 mutex_lock(&mbox->lock);
1548 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1550 free_req->flags = NIX_LF_DISABLE_FLOWS;
1551 if (otx2_sync_mbox_msg(mbox))
1552 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1556 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1558 if (otx2_sync_mbox_msg(mbox))
1559 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1562 mutex_unlock(&mbox->lock);
1566 static void otx2_free_hw_resources(struct otx2_nic *pf)
1568 struct otx2_qset *qset = &pf->qset;
1569 struct nix_lf_free_req *free_req;
1570 struct mbox *mbox = &pf->mbox;
1571 struct otx2_cq_queue *cq;
1572 struct otx2_pool *pool;
1573 struct msg_req *req;
1577 /* Ensure all SQE are processed */
1580 /* Stop transmission */
1581 otx2_txschq_stop(pf);
1585 otx2_pfc_txschq_stop(pf);
1588 otx2_clean_qos_queues(pf);
1590 mutex_lock(&mbox->lock);
1591 /* Disable backpressure */
1592 if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1593 otx2_nix_config_bp(pf, false);
1594 mutex_unlock(&mbox->lock);
1597 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1599 /*Dequeue all CQEs */
1600 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1601 cq = &qset->cq[qidx];
1602 if (cq->cq_type == CQ_RX)
1603 otx2_cleanup_rx_cqes(pf, cq, qidx);
1605 otx2_cleanup_tx_cqes(pf, cq);
1607 otx2_free_pending_sqe(pf);
1609 otx2_free_sq_res(pf);
1611 /* Free RQ buffer pointers*/
1612 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1614 for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
1615 pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
1616 pool = &pf->qset.pool[pool_id];
1617 page_pool_destroy(pool->page_pool);
1618 pool->page_pool = NULL;
1621 otx2_free_cq_res(pf);
1623 /* Free all ingress bandwidth profiles allocated */
1624 cn10k_free_all_ipolicers(pf);
1626 mutex_lock(&mbox->lock);
1628 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1630 free_req->flags = NIX_LF_DISABLE_FLOWS;
1631 if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
1632 free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
1633 if (otx2_sync_mbox_msg(mbox))
1634 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1636 mutex_unlock(&mbox->lock);
1638 /* Disable NPA Pool and Aura hw context */
1639 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1640 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1641 otx2_aura_pool_free(pf);
1643 mutex_lock(&mbox->lock);
1645 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1647 if (otx2_sync_mbox_msg(mbox))
1648 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1650 mutex_unlock(&mbox->lock);
1653 static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
1657 /* The AF driver will determine whether to allow the VF netdev or not */
1658 if (is_otx2_vf(pfvf->pcifunc))
1661 /* check if there are any trusted VFs associated with the PF netdev */
1662 for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++)
1663 if (pfvf->vf_configs[vf].trusted)
1668 static void otx2_do_set_rx_mode(struct otx2_nic *pf)
1670 struct net_device *netdev = pf->netdev;
1671 struct nix_rx_mode *req;
1672 bool promisc = false;
1674 if (!(netdev->flags & IFF_UP))
1677 if ((netdev->flags & IFF_PROMISC) ||
1678 (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
1682 /* Write unicast address to mcam entries or del from mcam */
1683 if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
1684 __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
1686 mutex_lock(&pf->mbox.lock);
1687 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1689 mutex_unlock(&pf->mbox.lock);
1693 req->mode = NIX_RX_MODE_UCAST;
1696 req->mode |= NIX_RX_MODE_PROMISC;
1697 if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1698 req->mode |= NIX_RX_MODE_ALLMULTI;
1700 if (otx2_promisc_use_mce_list(pf))
1701 req->mode |= NIX_RX_MODE_USE_MCE;
1703 otx2_sync_mbox_msg(&pf->mbox);
1704 mutex_unlock(&pf->mbox.lock);
1707 static void otx2_set_irq_coalesce(struct otx2_nic *pfvf)
1711 for (cint = 0; cint < pfvf->hw.cint_cnt; cint++)
1712 otx2_config_irq_coalescing(pfvf, cint);
1715 static void otx2_dim_work(struct work_struct *w)
1717 struct dim_cq_moder cur_moder;
1718 struct otx2_cq_poll *cq_poll;
1719 struct otx2_nic *pfvf;
1722 dim = container_of(w, struct dim, work);
1723 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1724 cq_poll = container_of(dim, struct otx2_cq_poll, dim);
1725 pfvf = (struct otx2_nic *)cq_poll->dev;
1726 pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ?
1727 CQ_TIMER_THRESH_MAX : cur_moder.usec;
1728 pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
1729 NAPI_POLL_WEIGHT : cur_moder.pkts;
1730 otx2_set_irq_coalesce(pfvf);
1731 dim->state = DIM_START_MEASURE;
1734 int otx2_open(struct net_device *netdev)
1736 struct otx2_nic *pf = netdev_priv(netdev);
1737 struct otx2_cq_poll *cq_poll = NULL;
1738 struct otx2_qset *qset = &pf->qset;
1739 int err = 0, qidx, vec;
1742 netif_carrier_off(netdev);
1744 /* RQ and SQs are mapped to different CQs,
1745 * so find out max CQ IRQs (i.e CINTs) needed.
1747 pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
1748 pf->hw.tc_tx_queues);
1750 pf->qset.cq_cnt = pf->hw.rx_queues + otx2_get_total_tx_queues(pf);
1752 qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
1757 qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
1759 qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
1762 qset->cq = kcalloc(pf->qset.cq_cnt,
1763 sizeof(struct otx2_cq_queue), GFP_KERNEL);
1767 qset->sq = kcalloc(otx2_get_total_tx_queues(pf),
1768 sizeof(struct otx2_snd_queue), GFP_KERNEL);
1772 qset->rq = kcalloc(pf->hw.rx_queues,
1773 sizeof(struct otx2_rcv_queue), GFP_KERNEL);
1777 err = otx2_init_hw_resources(pf);
1781 /* Register NAPI handler */
1782 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1783 cq_poll = &qset->napi[qidx];
1784 cq_poll->cint_idx = qidx;
1785 /* RQ0 & SQ0 are mapped to CINT0 and so on..
1786 * 'cq_ids[0]' points to RQ's CQ and
1787 * 'cq_ids[1]' points to SQ's CQ and
1788 * 'cq_ids[2]' points to XDP's CQ and
1790 cq_poll->cq_ids[CQ_RX] =
1791 (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
1792 cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
1793 qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
1795 cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
1796 (qidx + pf->hw.rx_queues +
1800 cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
1802 cq_poll->cq_ids[CQ_QOS] = (qidx < pf->hw.tc_tx_queues) ?
1803 (qidx + pf->hw.rx_queues +
1804 pf->hw.non_qos_queues) :
1807 cq_poll->dev = (void *)pf;
1808 cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
1809 INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
1810 netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler);
1811 napi_enable(&cq_poll->napi);
1814 /* Set maximum frame size allowed in HW */
1815 err = otx2_hw_set_mtu(pf, netdev->mtu);
1817 goto err_disable_napi;
1819 /* Setup segmentation algorithms, if failed, clear offload capability */
1820 otx2_setup_segmentation(pf);
1822 /* Initialize RSS */
1823 err = otx2_rss_init(pf);
1825 goto err_disable_napi;
1827 /* Register Queue IRQ handlers */
1828 vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
1829 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1831 snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
1833 err = request_irq(pci_irq_vector(pf->pdev, vec),
1834 otx2_q_intr_handler, 0, irq_name, pf);
1837 "RVUPF%d: IRQ registration failed for QERR\n",
1838 rvu_get_pf(pf->pcifunc));
1839 goto err_disable_napi;
1842 /* Enable QINT IRQ */
1843 otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
1845 /* Register CQ IRQ handlers */
1846 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1847 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1848 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1850 snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
1853 err = request_irq(pci_irq_vector(pf->pdev, vec),
1854 otx2_cq_intr_handler, 0, irq_name,
1858 "RVUPF%d: IRQ registration failed for CQ%d\n",
1859 rvu_get_pf(pf->pcifunc), qidx);
1860 goto err_free_cints;
1864 otx2_config_irq_coalescing(pf, qidx);
1867 otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
1868 otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
1871 otx2_set_cints_affinity(pf);
1873 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
1874 otx2_enable_rxvlan(pf, true);
1876 /* When reinitializing enable time stamping if it is enabled before */
1877 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
1878 pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1879 otx2_config_hw_tx_tstamp(pf, true);
1881 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
1882 pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1883 otx2_config_hw_rx_tstamp(pf, true);
1886 pf->flags &= ~OTX2_FLAG_INTF_DOWN;
1887 /* 'intf_down' may be checked on any cpu */
1890 /* Enable QoS configuration before starting tx queues */
1891 otx2_qos_config_txschq(pf);
1893 /* we have already received link status notification */
1894 if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1895 otx2_handle_link_event(pf);
1897 /* Install DMAC Filters */
1898 if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
1899 otx2_dmacflt_reinstall_flows(pf);
1901 otx2_tc_apply_ingress_police_rules(pf);
1903 err = otx2_rxtx_enable(pf, true);
1904 /* If a mbox communication error happens at this point then interface
1905 * will end up in a state such that it is in down state but hardware
1906 * mcam entries are enabled to receive the packets. Hence disable the
1910 goto err_disable_rxtx;
1912 goto err_tx_stop_queues;
1914 otx2_do_set_rx_mode(pf);
1919 otx2_rxtx_enable(pf, false);
1921 netif_tx_stop_all_queues(netdev);
1922 netif_carrier_off(netdev);
1923 pf->flags |= OTX2_FLAG_INTF_DOWN;
1925 otx2_free_cints(pf, qidx);
1926 vec = pci_irq_vector(pf->pdev,
1927 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1928 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1931 otx2_disable_napi(pf);
1932 otx2_free_hw_resources(pf);
1940 EXPORT_SYMBOL(otx2_open);
1942 int otx2_stop(struct net_device *netdev)
1944 struct otx2_nic *pf = netdev_priv(netdev);
1945 struct otx2_cq_poll *cq_poll = NULL;
1946 struct otx2_qset *qset = &pf->qset;
1947 struct otx2_rss_info *rss;
1950 /* If the DOWN flag is set resources are already freed */
1951 if (pf->flags & OTX2_FLAG_INTF_DOWN)
1954 netif_carrier_off(netdev);
1955 netif_tx_stop_all_queues(netdev);
1957 pf->flags |= OTX2_FLAG_INTF_DOWN;
1958 /* 'intf_down' may be checked on any cpu */
1961 /* First stop packet Rx/Tx */
1962 otx2_rxtx_enable(pf, false);
1964 /* Clear RSS enable flag */
1965 rss = &pf->hw.rss_info;
1966 rss->enable = false;
1967 if (!netif_is_rxfh_configured(netdev))
1968 kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
1970 /* Cleanup Queue IRQ */
1971 vec = pci_irq_vector(pf->pdev,
1972 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1973 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1976 /* Cleanup CQ NAPI and IRQ */
1977 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1978 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1979 /* Disable interrupt */
1980 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1982 synchronize_irq(pci_irq_vector(pf->pdev, vec));
1984 cq_poll = &qset->napi[qidx];
1985 napi_synchronize(&cq_poll->napi);
1989 netif_tx_disable(netdev);
1991 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
1992 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
1993 devm_kfree(pf->dev, pf->refill_wrk);
1995 otx2_free_hw_resources(pf);
1996 otx2_free_cints(pf, pf->hw.cint_cnt);
1997 otx2_disable_napi(pf);
1999 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
2000 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
2007 /* Do not clear RQ/SQ ringsize settings */
2008 memset_startat(qset, 0, sqe_cnt);
2011 EXPORT_SYMBOL(otx2_stop);
2013 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
2015 struct otx2_nic *pf = netdev_priv(netdev);
2016 int qidx = skb_get_queue_mapping(skb);
2017 struct otx2_snd_queue *sq;
2018 struct netdev_queue *txq;
2021 /* XDP SQs are not mapped with TXQs
2022 * advance qid to derive correct sq mapped with QOS
2024 sq_idx = (qidx >= pf->hw.tx_queues) ? (qidx + pf->hw.xdp_queues) : qidx;
2026 /* Check for minimum and maximum packet length */
2027 if (skb->len <= ETH_HLEN ||
2028 (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
2030 return NETDEV_TX_OK;
2033 sq = &pf->qset.sq[sq_idx];
2034 txq = netdev_get_tx_queue(netdev, qidx);
2036 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
2037 netif_tx_stop_queue(txq);
2039 /* Check again, incase SQBs got freed up */
2041 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
2043 netif_tx_wake_queue(txq);
2045 return NETDEV_TX_BUSY;
2048 return NETDEV_TX_OK;
2051 static int otx2_qos_select_htb_queue(struct otx2_nic *pf, struct sk_buff *skb,
2056 if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
2057 classid = TC_H_MIN(skb->priority);
2059 classid = READ_ONCE(pf->qos.defcls);
2064 return otx2_get_txq_by_classid(pf, classid);
2067 u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
2068 struct net_device *sb_dev)
2070 struct otx2_nic *pf = netdev_priv(netdev);
2077 qos_enabled = netdev->real_num_tx_queues > pf->hw.tx_queues;
2078 if (unlikely(qos_enabled)) {
2079 /* This smp_load_acquire() pairs with smp_store_release() in
2080 * otx2_qos_root_add() called from htb offload root creation
2082 u16 htb_maj_id = smp_load_acquire(&pf->qos.maj_id);
2084 if (unlikely(htb_maj_id)) {
2085 txq = otx2_qos_select_htb_queue(pf, skb, htb_maj_id);
2094 if (!skb_vlan_tag_present(skb))
2097 vlan_prio = skb->vlan_tci >> 13;
2098 if ((vlan_prio > pf->hw.tx_queues - 1) ||
2099 !pf->pfc_alloc_status[vlan_prio])
2106 txq = netdev_pick_tx(netdev, skb, NULL);
2107 if (unlikely(qos_enabled))
2108 return txq % pf->hw.tx_queues;
2112 EXPORT_SYMBOL(otx2_select_queue);
2114 static netdev_features_t otx2_fix_features(struct net_device *dev,
2115 netdev_features_t features)
2117 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2118 features |= NETIF_F_HW_VLAN_STAG_RX;
2120 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2125 static void otx2_set_rx_mode(struct net_device *netdev)
2127 struct otx2_nic *pf = netdev_priv(netdev);
2129 queue_work(pf->otx2_wq, &pf->rx_mode_work);
2132 static void otx2_rx_mode_wrk_handler(struct work_struct *work)
2134 struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
2136 otx2_do_set_rx_mode(pf);
2139 static int otx2_set_features(struct net_device *netdev,
2140 netdev_features_t features)
2142 netdev_features_t changed = features ^ netdev->features;
2143 struct otx2_nic *pf = netdev_priv(netdev);
2145 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
2146 return otx2_cgx_config_loopback(pf,
2147 features & NETIF_F_LOOPBACK);
2149 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
2150 return otx2_enable_rxvlan(pf,
2151 features & NETIF_F_HW_VLAN_CTAG_RX);
2153 return otx2_handle_ntuple_tc_features(netdev, features);
2156 static void otx2_reset_task(struct work_struct *work)
2158 struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
2160 if (!netif_running(pf->netdev))
2164 otx2_stop(pf->netdev);
2166 otx2_open(pf->netdev);
2167 netif_trans_update(pf->netdev);
2171 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
2173 struct msg_req *req;
2176 if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
2179 mutex_lock(&pfvf->mbox.lock);
2181 req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
2183 req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
2185 mutex_unlock(&pfvf->mbox.lock);
2189 err = otx2_sync_mbox_msg(&pfvf->mbox);
2191 mutex_unlock(&pfvf->mbox.lock);
2195 mutex_unlock(&pfvf->mbox.lock);
2197 pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
2199 pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
2203 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
2205 struct msg_req *req;
2208 if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
2211 mutex_lock(&pfvf->mbox.lock);
2213 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
2215 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
2217 mutex_unlock(&pfvf->mbox.lock);
2221 err = otx2_sync_mbox_msg(&pfvf->mbox);
2223 mutex_unlock(&pfvf->mbox.lock);
2227 mutex_unlock(&pfvf->mbox.lock);
2229 pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
2231 pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
2235 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
2237 struct otx2_nic *pfvf = netdev_priv(netdev);
2238 struct hwtstamp_config config;
2243 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2246 switch (config.tx_type) {
2247 case HWTSTAMP_TX_OFF:
2248 if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
2249 pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
2251 cancel_delayed_work(&pfvf->ptp->synctstamp_work);
2252 otx2_config_hw_tx_tstamp(pfvf, false);
2254 case HWTSTAMP_TX_ONESTEP_SYNC:
2255 if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
2257 pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
2258 schedule_delayed_work(&pfvf->ptp->synctstamp_work,
2259 msecs_to_jiffies(500));
2261 case HWTSTAMP_TX_ON:
2262 otx2_config_hw_tx_tstamp(pfvf, true);
2268 switch (config.rx_filter) {
2269 case HWTSTAMP_FILTER_NONE:
2270 otx2_config_hw_rx_tstamp(pfvf, false);
2272 case HWTSTAMP_FILTER_ALL:
2273 case HWTSTAMP_FILTER_SOME:
2274 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2275 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2276 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2277 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2278 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2279 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2280 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2281 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2282 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2283 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2284 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2285 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2286 otx2_config_hw_rx_tstamp(pfvf, true);
2287 config.rx_filter = HWTSTAMP_FILTER_ALL;
2293 memcpy(&pfvf->tstamp, &config, sizeof(config));
2295 return copy_to_user(ifr->ifr_data, &config,
2296 sizeof(config)) ? -EFAULT : 0;
2298 EXPORT_SYMBOL(otx2_config_hwtstamp);
2300 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
2302 struct otx2_nic *pfvf = netdev_priv(netdev);
2303 struct hwtstamp_config *cfg = &pfvf->tstamp;
2307 return otx2_config_hwtstamp(netdev, req);
2309 return copy_to_user(req->ifr_data, cfg,
2310 sizeof(*cfg)) ? -EFAULT : 0;
2315 EXPORT_SYMBOL(otx2_ioctl);
2317 static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
2319 struct npc_install_flow_req *req;
2322 mutex_lock(&pf->mbox.lock);
2323 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2329 ether_addr_copy(req->packet.dmac, mac);
2330 eth_broadcast_addr((u8 *)&req->mask.dmac);
2331 req->features = BIT_ULL(NPC_DMAC);
2332 req->channel = pf->hw.rx_chan_base;
2333 req->intf = NIX_INTF_RX;
2334 req->default_rule = 1;
2337 req->op = NIX_RX_ACTION_DEFAULT;
2339 err = otx2_sync_mbox_msg(&pf->mbox);
2341 mutex_unlock(&pf->mbox.lock);
2345 static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2347 struct otx2_nic *pf = netdev_priv(netdev);
2348 struct pci_dev *pdev = pf->pdev;
2349 struct otx2_vf_config *config;
2352 if (!netif_running(netdev))
2355 if (vf >= pf->total_vfs)
2358 if (!is_valid_ether_addr(mac))
2361 config = &pf->vf_configs[vf];
2362 ether_addr_copy(config->mac, mac);
2364 ret = otx2_do_set_vf_mac(pf, vf, mac);
2366 dev_info(&pdev->dev,
2367 "Load/Reload VF driver\n");
2372 static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
2375 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
2376 struct nix_vtag_config_rsp *vtag_rsp;
2377 struct npc_delete_flow_req *del_req;
2378 struct nix_vtag_config *vtag_req;
2379 struct npc_install_flow_req *req;
2380 struct otx2_vf_config *config;
2384 config = &pf->vf_configs[vf];
2386 if (!vlan && !config->vlan)
2389 mutex_lock(&pf->mbox.lock);
2391 /* free old tx vtag entry */
2393 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2398 vtag_req->cfg_type = 0;
2399 vtag_req->tx.free_vtag0 = 1;
2400 vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
2402 err = otx2_sync_mbox_msg(&pf->mbox);
2407 if (!vlan && config->vlan) {
2409 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2414 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2416 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2417 err = otx2_sync_mbox_msg(&pf->mbox);
2422 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2427 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2429 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2430 err = otx2_sync_mbox_msg(&pf->mbox);
2436 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2442 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2443 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2444 req->packet.vlan_tci = htons(vlan);
2445 req->mask.vlan_tci = htons(VLAN_VID_MASK);
2446 /* af fills the destination mac addr */
2447 eth_broadcast_addr((u8 *)&req->mask.dmac);
2448 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
2449 req->channel = pf->hw.rx_chan_base;
2450 req->intf = NIX_INTF_RX;
2452 req->op = NIX_RX_ACTION_DEFAULT;
2453 req->vtag0_valid = true;
2454 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
2457 err = otx2_sync_mbox_msg(&pf->mbox);
2462 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2468 /* configure tx vtag params */
2469 vtag_req->vtag_size = VTAGSIZE_T4;
2470 vtag_req->cfg_type = 0; /* tx vlan cfg */
2471 vtag_req->tx.cfg_vtag0 = 1;
2472 vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
2474 err = otx2_sync_mbox_msg(&pf->mbox);
2478 vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
2479 (&pf->mbox.mbox, 0, &vtag_req->hdr);
2480 if (IS_ERR(vtag_rsp)) {
2481 err = PTR_ERR(vtag_rsp);
2484 config->tx_vtag_idx = vtag_rsp->vtag0_idx;
2486 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2492 eth_zero_addr((u8 *)&req->mask.dmac);
2493 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2494 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2495 req->features = BIT_ULL(NPC_DMAC);
2496 req->channel = pf->hw.tx_chan_base;
2497 req->intf = NIX_INTF_TX;
2499 req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
2500 req->vtag0_def = vtag_rsp->vtag0_idx;
2501 req->vtag0_op = VTAG_INSERT;
2504 err = otx2_sync_mbox_msg(&pf->mbox);
2506 config->vlan = vlan;
2507 mutex_unlock(&pf->mbox.lock);
2511 static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
2514 struct otx2_nic *pf = netdev_priv(netdev);
2515 struct pci_dev *pdev = pf->pdev;
2517 if (!netif_running(netdev))
2520 if (vf >= pci_num_vf(pdev))
2523 /* qos is currently unsupported */
2524 if (vlan >= VLAN_N_VID || qos)
2527 if (proto != htons(ETH_P_8021Q))
2528 return -EPROTONOSUPPORT;
2530 if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
2533 return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
2536 static int otx2_get_vf_config(struct net_device *netdev, int vf,
2537 struct ifla_vf_info *ivi)
2539 struct otx2_nic *pf = netdev_priv(netdev);
2540 struct pci_dev *pdev = pf->pdev;
2541 struct otx2_vf_config *config;
2543 if (!netif_running(netdev))
2546 if (vf >= pci_num_vf(pdev))
2549 config = &pf->vf_configs[vf];
2551 ether_addr_copy(ivi->mac, config->mac);
2552 ivi->vlan = config->vlan;
2553 ivi->trusted = config->trusted;
2558 static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
2565 dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
2566 offset_in_page(xdpf->data), xdpf->len,
2568 if (dma_mapping_error(pf->dev, dma_addr))
2571 err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
2573 otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
2574 page = virt_to_page(xdpf->data);
2581 static int otx2_xdp_xmit(struct net_device *netdev, int n,
2582 struct xdp_frame **frames, u32 flags)
2584 struct otx2_nic *pf = netdev_priv(netdev);
2585 int qidx = smp_processor_id();
2586 struct otx2_snd_queue *sq;
2589 if (!netif_running(netdev))
2592 qidx += pf->hw.tx_queues;
2593 sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
2595 /* Abort xmit if xdp queue is not */
2599 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2602 for (i = 0; i < n; i++) {
2603 struct xdp_frame *xdpf = frames[i];
2606 err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
2613 static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
2615 struct net_device *dev = pf->netdev;
2616 bool if_up = netif_running(pf->netdev);
2617 struct bpf_prog *old_prog;
2619 if (prog && dev->mtu > MAX_XDP_MTU) {
2620 netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
2625 otx2_stop(pf->netdev);
2627 old_prog = xchg(&pf->xdp_prog, prog);
2630 bpf_prog_put(old_prog);
2633 bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
2635 /* Network stack and XDP shared same rx queues.
2636 * Use separate tx queues for XDP and network stack.
2639 pf->hw.xdp_queues = pf->hw.rx_queues;
2640 xdp_features_set_redirect_target(dev, false);
2642 pf->hw.xdp_queues = 0;
2643 xdp_features_clear_redirect_target(dev);
2646 pf->hw.non_qos_queues += pf->hw.xdp_queues;
2649 otx2_open(pf->netdev);
2654 static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
2656 struct otx2_nic *pf = netdev_priv(netdev);
2658 switch (xdp->command) {
2659 case XDP_SETUP_PROG:
2660 return otx2_xdp_setup(pf, xdp->prog);
2666 static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
2669 struct set_vf_perm *req;
2672 mutex_lock(&pf->mbox.lock);
2673 req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
2679 /* Let AF reset VF permissions as sriov is disabled */
2680 if (req_perm == OTX2_RESET_VF_PERM) {
2681 req->flags |= RESET_VF_PERM;
2682 } else if (req_perm == OTX2_TRUSTED_VF) {
2683 if (pf->vf_configs[vf].trusted)
2684 req->flags |= VF_TRUSTED;
2688 rc = otx2_sync_mbox_msg(&pf->mbox);
2690 mutex_unlock(&pf->mbox.lock);
2694 static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
2697 struct otx2_nic *pf = netdev_priv(netdev);
2698 struct pci_dev *pdev = pf->pdev;
2701 if (vf >= pci_num_vf(pdev))
2704 if (pf->vf_configs[vf].trusted == enable)
2707 pf->vf_configs[vf].trusted = enable;
2708 rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
2711 pf->vf_configs[vf].trusted = !enable;
2713 netdev_info(pf->netdev, "VF %d is %strusted\n",
2714 vf, enable ? "" : "not ");
2715 otx2_set_rx_mode(netdev);
2721 static const struct net_device_ops otx2_netdev_ops = {
2722 .ndo_open = otx2_open,
2723 .ndo_stop = otx2_stop,
2724 .ndo_start_xmit = otx2_xmit,
2725 .ndo_select_queue = otx2_select_queue,
2726 .ndo_fix_features = otx2_fix_features,
2727 .ndo_set_mac_address = otx2_set_mac_address,
2728 .ndo_change_mtu = otx2_change_mtu,
2729 .ndo_set_rx_mode = otx2_set_rx_mode,
2730 .ndo_set_features = otx2_set_features,
2731 .ndo_tx_timeout = otx2_tx_timeout,
2732 .ndo_get_stats64 = otx2_get_stats64,
2733 .ndo_eth_ioctl = otx2_ioctl,
2734 .ndo_set_vf_mac = otx2_set_vf_mac,
2735 .ndo_set_vf_vlan = otx2_set_vf_vlan,
2736 .ndo_get_vf_config = otx2_get_vf_config,
2737 .ndo_bpf = otx2_xdp,
2738 .ndo_xdp_xmit = otx2_xdp_xmit,
2739 .ndo_setup_tc = otx2_setup_tc,
2740 .ndo_set_vf_trust = otx2_ndo_set_vf_trust,
2743 static int otx2_wq_init(struct otx2_nic *pf)
2745 pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
2749 INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
2750 INIT_WORK(&pf->reset_task, otx2_reset_task);
2754 static int otx2_check_pf_usable(struct otx2_nic *nic)
2758 rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
2759 rev = (rev >> 12) & 0xFF;
2760 /* Check if AF has setup revision for RVUM block,
2761 * otherwise this driver probe should be deferred
2762 * until AF driver comes up.
2766 "AF is not initialized, deferring probe\n");
2767 return -EPROBE_DEFER;
2772 static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
2774 struct otx2_hw *hw = &pf->hw;
2777 /* NPA interrupts are inot registered, so alloc only
2778 * upto NIX vector offset.
2780 num_vec = hw->nix_msixoff;
2781 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
2783 otx2_disable_mbox_intr(pf);
2784 pci_free_irq_vectors(hw->pdev);
2785 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
2787 dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
2792 return otx2_register_mbox_intr(pf, false);
2795 static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
2799 pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
2800 sizeof(struct otx2_vf_config),
2802 if (!pf->vf_configs)
2805 for (i = 0; i < pf->total_vfs; i++) {
2806 pf->vf_configs[i].pf = pf;
2807 pf->vf_configs[i].intf_down = true;
2808 pf->vf_configs[i].trusted = false;
2809 INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
2810 otx2_vf_link_event_task);
2816 static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
2820 if (!pf->vf_configs)
2823 for (i = 0; i < pf->total_vfs; i++) {
2824 cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
2825 otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
2829 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2831 struct device *dev = &pdev->dev;
2832 int err, qcount, qos_txqs;
2833 struct net_device *netdev;
2834 struct otx2_nic *pf;
2838 err = pcim_enable_device(pdev);
2840 dev_err(dev, "Failed to enable PCI device\n");
2844 err = pci_request_regions(pdev, DRV_NAME);
2846 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2850 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2852 dev_err(dev, "DMA mask config failed, abort\n");
2853 goto err_release_regions;
2856 pci_set_master(pdev);
2858 /* Set number of queues */
2859 qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
2860 qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
2862 netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
2865 goto err_release_regions;
2868 pci_set_drvdata(pdev, netdev);
2869 SET_NETDEV_DEV(netdev, &pdev->dev);
2870 pf = netdev_priv(netdev);
2871 pf->netdev = netdev;
2874 pf->total_vfs = pci_sriov_get_totalvfs(pdev);
2875 pf->flags |= OTX2_FLAG_INTF_DOWN;
2879 hw->rx_queues = qcount;
2880 hw->tx_queues = qcount;
2881 hw->non_qos_queues = qcount;
2882 hw->max_queues = qcount;
2883 hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
2884 /* Use CQE of 128 byte descriptor size by default */
2887 num_vec = pci_msix_vec_count(pdev);
2888 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
2890 if (!hw->irq_name) {
2892 goto err_free_netdev;
2895 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
2896 sizeof(cpumask_var_t), GFP_KERNEL);
2897 if (!hw->affinity_mask) {
2899 goto err_free_netdev;
2903 pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2904 if (!pf->reg_base) {
2905 dev_err(dev, "Unable to map physical function CSRs, aborting\n");
2907 goto err_free_netdev;
2910 err = otx2_check_pf_usable(pf);
2912 goto err_free_netdev;
2914 err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
2915 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
2917 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
2919 goto err_free_netdev;
2922 otx2_setup_dev_hw_settings(pf);
2924 /* Init PF <=> AF mailbox stuff */
2925 err = otx2_pfaf_mbox_init(pf);
2927 goto err_free_irq_vectors;
2929 /* Register mailbox interrupt */
2930 err = otx2_register_mbox_intr(pf, true);
2932 goto err_mbox_destroy;
2934 /* Request AF to attach NPA and NIX LFs to this PF.
2935 * NIX and NPA LFs are needed for this PF to function as a NIC.
2937 err = otx2_attach_npa_nix(pf);
2939 goto err_disable_mbox_intr;
2941 err = otx2_realloc_msix_vectors(pf);
2943 goto err_detach_rsrc;
2945 err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
2947 goto err_detach_rsrc;
2949 err = cn10k_lmtst_init(pf);
2951 goto err_detach_rsrc;
2953 /* Assign default mac address */
2954 otx2_get_mac_from_af(netdev);
2956 /* Don't check for error. Proceed without ptp */
2959 /* NPA's pool is a stack to which SW frees buffer pointers via Aura.
2960 * HW allocates buffer pointer from stack and uses it for DMA'ing
2961 * ingress packet. In some scenarios HW can free back allocated buffer
2962 * pointers to pool. This makes it impossible for SW to maintain a
2963 * parallel list where physical addresses of buffer pointers (IOVAs)
2964 * given to HW can be saved for later reference.
2966 * So the only way to convert Rx packet's buffer address is to use
2967 * IOMMU's iova_to_phys() handler which translates the address by
2968 * walking through the translation tables.
2970 pf->iommu_domain = iommu_get_domain_for_dev(dev);
2972 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
2973 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
2974 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2975 NETIF_F_GSO_UDP_L4);
2976 netdev->features |= netdev->hw_features;
2978 err = otx2_mcam_flow_init(pf);
2980 goto err_ptp_destroy;
2982 err = cn10k_mcs_init(pf);
2984 goto err_del_mcam_entries;
2986 if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
2987 netdev->hw_features |= NETIF_F_NTUPLE;
2989 if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
2990 netdev->priv_flags |= IFF_UNICAST_FLT;
2992 /* Support TSO on tag interface */
2993 netdev->vlan_features |= netdev->features;
2994 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
2995 NETIF_F_HW_VLAN_STAG_TX;
2996 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
2997 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
2998 NETIF_F_HW_VLAN_STAG_RX;
2999 netdev->features |= netdev->hw_features;
3001 /* HW supports tc offload but mutually exclusive with n-tuple filters */
3002 if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
3003 netdev->hw_features |= NETIF_F_HW_TC;
3005 netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
3007 netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
3008 netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
3010 netdev->netdev_ops = &otx2_netdev_ops;
3011 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
3013 netdev->min_mtu = OTX2_MIN_MTU;
3014 netdev->max_mtu = otx2_get_max_mtu(pf);
3016 err = register_netdev(netdev);
3018 dev_err(dev, "Failed to register netdevice\n");
3022 err = otx2_wq_init(pf);
3024 goto err_unreg_netdev;
3026 otx2_set_ethtool_ops(netdev);
3028 err = otx2_init_tc(pf);
3030 goto err_mcam_flow_del;
3032 err = otx2_register_dl(pf);
3034 goto err_mcam_flow_del;
3036 /* Initialize SR-IOV resources */
3037 err = otx2_sriov_vfcfg_init(pf);
3039 goto err_pf_sriov_init;
3041 /* Enable link notifications */
3042 otx2_cgx_config_linkevents(pf, true);
3045 err = otx2_dcbnl_set_ops(netdev);
3047 goto err_pf_sriov_init;
3050 otx2_qos_init(pf, qos_txqs);
3055 otx2_shutdown_tc(pf);
3057 otx2_mcam_flow_del(pf);
3059 unregister_netdev(netdev);
3062 err_del_mcam_entries:
3063 otx2_mcam_flow_del(pf);
3065 otx2_ptp_destroy(pf);
3067 if (pf->hw.lmt_info)
3068 free_percpu(pf->hw.lmt_info);
3069 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
3070 qmem_free(pf->dev, pf->dync_lmt);
3071 otx2_detach_resources(&pf->mbox);
3072 err_disable_mbox_intr:
3073 otx2_disable_mbox_intr(pf);
3075 otx2_pfaf_mbox_destroy(pf);
3076 err_free_irq_vectors:
3077 pci_free_irq_vectors(hw->pdev);
3079 pci_set_drvdata(pdev, NULL);
3080 free_netdev(netdev);
3081 err_release_regions:
3082 pci_release_regions(pdev);
3086 static void otx2_vf_link_event_task(struct work_struct *work)
3088 struct otx2_vf_config *config;
3089 struct cgx_link_info_msg *req;
3090 struct mbox_msghdr *msghdr;
3091 struct otx2_nic *pf;
3094 config = container_of(work, struct otx2_vf_config,
3095 link_event_work.work);
3096 vf_idx = config - config->pf->vf_configs;
3099 msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
3100 sizeof(*req), sizeof(struct msg_rsp));
3102 dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
3106 req = (struct cgx_link_info_msg *)msghdr;
3107 req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
3108 req->hdr.sig = OTX2_MBOX_REQ_SIG;
3109 memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
3111 otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
3114 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
3116 struct net_device *netdev = pci_get_drvdata(pdev);
3117 struct otx2_nic *pf = netdev_priv(netdev);
3120 /* Init PF <=> VF mailbox stuff */
3121 ret = otx2_pfvf_mbox_init(pf, numvfs);
3125 ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
3129 ret = otx2_pf_flr_init(pf, numvfs);
3133 ret = otx2_register_flr_me_intr(pf, numvfs);
3137 ret = pci_enable_sriov(pdev, numvfs);
3143 otx2_disable_flr_me_intr(pf);
3145 otx2_flr_wq_destroy(pf);
3147 otx2_disable_pfvf_mbox_intr(pf, numvfs);
3149 otx2_pfvf_mbox_destroy(pf);
3153 static int otx2_sriov_disable(struct pci_dev *pdev)
3155 struct net_device *netdev = pci_get_drvdata(pdev);
3156 struct otx2_nic *pf = netdev_priv(netdev);
3157 int numvfs = pci_num_vf(pdev);
3162 pci_disable_sriov(pdev);
3164 otx2_disable_flr_me_intr(pf);
3165 otx2_flr_wq_destroy(pf);
3166 otx2_disable_pfvf_mbox_intr(pf, numvfs);
3167 otx2_pfvf_mbox_destroy(pf);
3172 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
3175 return otx2_sriov_disable(pdev);
3177 return otx2_sriov_enable(pdev, numvfs);
3180 static void otx2_remove(struct pci_dev *pdev)
3182 struct net_device *netdev = pci_get_drvdata(pdev);
3183 struct otx2_nic *pf;
3188 pf = netdev_priv(netdev);
3190 pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
3192 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
3193 otx2_config_hw_tx_tstamp(pf, false);
3194 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
3195 otx2_config_hw_rx_tstamp(pf, false);
3197 /* Disable 802.3x pause frames */
3198 if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
3199 (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
3200 pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
3201 pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
3202 otx2_config_pause_frm(pf);
3206 /* Disable PFC config */
3209 otx2_config_priority_flow_ctrl(pf);
3212 cancel_work_sync(&pf->reset_task);
3213 /* Disable link notifications */
3214 otx2_cgx_config_linkevents(pf, false);
3216 otx2_unregister_dl(pf);
3217 unregister_netdev(netdev);
3219 otx2_sriov_disable(pf->pdev);
3220 otx2_sriov_vfcfg_cleanup(pf);
3222 destroy_workqueue(pf->otx2_wq);
3224 otx2_ptp_destroy(pf);
3225 otx2_mcam_flow_del(pf);
3226 otx2_shutdown_tc(pf);
3227 otx2_shutdown_qos(pf);
3228 otx2_detach_resources(&pf->mbox);
3229 if (pf->hw.lmt_info)
3230 free_percpu(pf->hw.lmt_info);
3231 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
3232 qmem_free(pf->dev, pf->dync_lmt);
3233 otx2_disable_mbox_intr(pf);
3234 otx2_pfaf_mbox_destroy(pf);
3235 pci_free_irq_vectors(pf->pdev);
3236 pci_set_drvdata(pdev, NULL);
3237 free_netdev(netdev);
3239 pci_release_regions(pdev);
3242 static struct pci_driver otx2_pf_driver = {
3244 .id_table = otx2_pf_id_table,
3245 .probe = otx2_probe,
3246 .shutdown = otx2_remove,
3247 .remove = otx2_remove,
3248 .sriov_configure = otx2_sriov_configure
3251 static int __init otx2_rvupf_init_module(void)
3253 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3255 return pci_register_driver(&otx2_pf_driver);
3258 static void __exit otx2_rvupf_cleanup_module(void)
3260 pci_unregister_driver(&otx2_pf_driver);
3263 module_init(otx2_rvupf_init_module);
3264 module_exit(otx2_rvupf_cleanup_module);