1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Virtual Function ethernet driver
4 * Copyright (C) 2020 Marvell.
8 #include <linux/etherdevice.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/net_tstamp.h>
13 #include "otx2_common.h"
18 #define DRV_NAME "rvu_nicvf"
19 #define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
21 static const struct pci_device_id otx2_vf_id_table[] = {
22 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
23 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
27 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
28 MODULE_DESCRIPTION(DRV_STRING);
29 MODULE_LICENSE("GPL v2");
30 MODULE_DEVICE_TABLE(pci, otx2_vf_id_table);
32 /* RVU VF Interrupt Vector Enumeration */
34 RVU_VF_INT_VEC_MBOX = 0x0,
37 static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf,
38 struct mbox_msghdr *msg)
40 if (msg->id >= MBOX_MSG_MAX) {
42 "Mbox msg with unknown ID %d\n", msg->id);
46 if (msg->sig != OTX2_MBOX_RSP_SIG) {
48 "Mbox msg with wrong signature %x, ID %d\n",
53 if (msg->rc == MBOX_MSG_INVALID) {
55 "PF/AF says the sent msg(s) %d were invalid\n",
62 vf->pcifunc = msg->pcifunc;
64 case MBOX_MSG_MSIX_OFFSET:
65 mbox_handler_msix_offset(vf, (struct msix_offset_rsp *)msg);
67 case MBOX_MSG_NPA_LF_ALLOC:
68 mbox_handler_npa_lf_alloc(vf, (struct npa_lf_alloc_rsp *)msg);
70 case MBOX_MSG_NIX_LF_ALLOC:
71 mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg);
73 case MBOX_MSG_NIX_TXSCH_ALLOC:
74 mbox_handler_nix_txsch_alloc(vf,
75 (struct nix_txsch_alloc_rsp *)msg);
77 case MBOX_MSG_NIX_BP_ENABLE:
78 mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg);
83 "Mbox msg response has err %d, ID %d\n",
88 static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
90 struct otx2_mbox_dev *mdev;
91 struct mbox_hdr *rsp_hdr;
92 struct mbox_msghdr *msg;
93 struct otx2_mbox *mbox;
97 af_mbox = container_of(work, struct mbox, mbox_wrk);
98 mbox = &af_mbox->mbox;
100 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
101 if (af_mbox->num_msgs == 0)
103 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
105 for (id = 0; id < af_mbox->num_msgs; id++) {
106 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
107 otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
108 offset = mbox->rx_start + msg->next_msgoff;
109 if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
110 __otx2_mbox_reset(mbox, 0);
115 static int otx2vf_process_mbox_msg_up(struct otx2_nic *vf,
116 struct mbox_msghdr *req)
121 /* Check if valid, if not reply with a invalid msg */
122 if (req->sig != OTX2_MBOX_REQ_SIG) {
123 otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
128 case MBOX_MSG_CGX_LINK_EVENT:
129 rsp = (struct msg_rsp *)otx2_mbox_alloc_msg(
130 &vf->mbox.mbox_up, 0,
131 sizeof(struct msg_rsp));
135 rsp->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
136 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
137 rsp->hdr.pcifunc = 0;
139 err = otx2_mbox_up_handler_cgx_link_event(
140 vf, (struct cgx_link_info_msg *)req, rsp);
143 otx2_reply_invalid_msg(&vf->mbox.mbox_up, 0, 0, req->id);
149 static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
151 struct otx2_mbox_dev *mdev;
152 struct mbox_hdr *rsp_hdr;
153 struct mbox_msghdr *msg;
154 struct otx2_mbox *mbox;
155 struct mbox *vf_mbox;
159 vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
161 mbox = &vf_mbox->mbox_up;
162 mdev = &mbox->dev[0];
164 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
165 if (vf_mbox->up_num_msgs == 0)
168 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
170 for (id = 0; id < vf_mbox->up_num_msgs; id++) {
171 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
172 otx2vf_process_mbox_msg_up(vf, msg);
173 offset = mbox->rx_start + msg->next_msgoff;
176 otx2_mbox_msg_send(mbox, 0);
179 static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
181 struct otx2_nic *vf = (struct otx2_nic *)vf_irq;
182 struct otx2_mbox_dev *mdev;
183 struct otx2_mbox *mbox;
184 struct mbox_hdr *hdr;
187 otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
189 /* Read latest mbox data */
192 /* Check for PF => VF response messages */
193 mbox = &vf->mbox.mbox;
194 mdev = &mbox->dev[0];
195 otx2_sync_mbox_bbuf(mbox, 0);
197 trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
199 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
201 vf->mbox.num_msgs = hdr->num_msgs;
203 memset(mbox->hwbase + mbox->rx_start, 0,
204 ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
205 queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
207 /* Check for PF => VF notification messages */
208 mbox = &vf->mbox.mbox_up;
209 mdev = &mbox->dev[0];
210 otx2_sync_mbox_bbuf(mbox, 0);
212 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
214 vf->mbox.up_num_msgs = hdr->num_msgs;
216 memset(mbox->hwbase + mbox->rx_start, 0,
217 ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
218 queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
224 static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
226 int vector = pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX);
228 /* Disable VF => PF mailbox IRQ */
229 otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
230 free_irq(vector, vf);
233 static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
235 struct otx2_hw *hw = &vf->hw;
240 /* Register mailbox interrupt handler */
241 irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
242 snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
243 err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
244 otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
247 "RVUPF: IRQ registration failed for VFAF mbox irq\n");
251 /* Enable mailbox interrupt for msgs coming from PF.
252 * First clear to avoid spurious interrupts, if any.
254 otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
255 otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
260 /* Check mailbox communication with PF */
261 req = otx2_mbox_alloc_msg_ready(&vf->mbox);
263 otx2vf_disable_mbox_intr(vf);
267 err = otx2_sync_mbox_msg(&vf->mbox);
270 "AF not responding to mailbox, deferring probe\n");
271 otx2vf_disable_mbox_intr(vf);
272 return -EPROBE_DEFER;
277 static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
279 struct mbox *mbox = &vf->mbox;
282 destroy_workqueue(vf->mbox_wq);
286 if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
287 iounmap((void __iomem *)mbox->mbox.hwbase);
289 otx2_mbox_destroy(&mbox->mbox);
290 otx2_mbox_destroy(&mbox->mbox_up);
293 static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
295 struct mbox *mbox = &vf->mbox;
296 void __iomem *hwbase;
300 vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox",
301 WQ_UNBOUND | WQ_HIGHPRI |
306 if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
307 /* For cn10k platform, VF mailbox region is in its BAR2
310 hwbase = vf->reg_base + RVU_VF_MBOX_REGION;
312 /* Mailbox is a reserved memory (in RAM) region shared between
313 * admin function (i.e PF0) and this VF, shouldn't be mapped as
314 * device memory to allow unaligned accesses.
316 hwbase = ioremap_wc(pci_resource_start(vf->pdev,
318 pci_resource_len(vf->pdev,
321 dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
327 err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
332 err = otx2_mbox_init(&mbox->mbox_up, hwbase, vf->pdev, vf->reg_base,
333 MBOX_DIR_VFPF_UP, 1);
337 err = otx2_mbox_bbuf_init(mbox, vf->pdev);
341 INIT_WORK(&mbox->mbox_wrk, otx2vf_vfaf_mbox_handler);
342 INIT_WORK(&mbox->mbox_up_wrk, otx2vf_vfaf_mbox_up_handler);
343 mutex_init(&mbox->lock);
347 if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
349 destroy_workqueue(vf->mbox_wq);
353 static int otx2vf_open(struct net_device *netdev)
358 err = otx2_open(netdev);
362 /* LBKs do not receive link events so tell everyone we are up here */
363 vf = netdev_priv(netdev);
364 if (is_otx2_lbkvf(vf->pdev)) {
365 pr_info("%s NIC Link is UP\n", netdev->name);
366 netif_carrier_on(netdev);
367 netif_tx_start_all_queues(netdev);
373 static int otx2vf_stop(struct net_device *netdev)
375 return otx2_stop(netdev);
378 static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
380 struct otx2_nic *vf = netdev_priv(netdev);
381 int qidx = skb_get_queue_mapping(skb);
382 struct otx2_snd_queue *sq;
383 struct netdev_queue *txq;
385 sq = &vf->qset.sq[qidx];
386 txq = netdev_get_tx_queue(netdev, qidx);
388 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
389 netif_tx_stop_queue(txq);
391 /* Check again, incase SQBs got freed up */
393 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
395 netif_tx_wake_queue(txq);
397 return NETDEV_TX_BUSY;
403 static void otx2vf_set_rx_mode(struct net_device *netdev)
405 struct otx2_nic *vf = netdev_priv(netdev);
407 queue_work(vf->otx2_wq, &vf->rx_mode_work);
410 static void otx2vf_do_set_rx_mode(struct work_struct *work)
412 struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work);
413 struct net_device *netdev = vf->netdev;
414 unsigned int flags = netdev->flags;
415 struct nix_rx_mode *req;
417 mutex_lock(&vf->mbox.lock);
419 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox);
421 mutex_unlock(&vf->mbox.lock);
425 req->mode = NIX_RX_MODE_UCAST;
427 if (flags & IFF_PROMISC)
428 req->mode |= NIX_RX_MODE_PROMISC;
429 if (flags & (IFF_ALLMULTI | IFF_MULTICAST))
430 req->mode |= NIX_RX_MODE_ALLMULTI;
432 req->mode |= NIX_RX_MODE_USE_MCE;
434 otx2_sync_mbox_msg(&vf->mbox);
436 mutex_unlock(&vf->mbox.lock);
439 static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
441 bool if_up = netif_running(netdev);
447 netdev_info(netdev, "Changing MTU from %d to %d\n",
448 netdev->mtu, new_mtu);
449 netdev->mtu = new_mtu;
452 err = otx2vf_open(netdev);
457 static void otx2vf_reset_task(struct work_struct *work)
459 struct otx2_nic *vf = container_of(work, struct otx2_nic, reset_task);
463 if (netif_running(vf->netdev)) {
464 otx2vf_stop(vf->netdev);
466 otx2vf_open(vf->netdev);
472 static int otx2vf_set_features(struct net_device *netdev,
473 netdev_features_t features)
475 netdev_features_t changed = features ^ netdev->features;
476 bool ntuple_enabled = !!(features & NETIF_F_NTUPLE);
477 struct otx2_nic *vf = netdev_priv(netdev);
479 if (changed & NETIF_F_NTUPLE) {
480 if (!ntuple_enabled) {
481 otx2_mcam_flow_del(vf);
485 if (!otx2_get_maxflows(vf->flow_cfg)) {
487 "Can't enable NTUPLE, MCAM entries not allocated\n");
494 static const struct net_device_ops otx2vf_netdev_ops = {
495 .ndo_open = otx2vf_open,
496 .ndo_stop = otx2vf_stop,
497 .ndo_start_xmit = otx2vf_xmit,
498 .ndo_set_rx_mode = otx2vf_set_rx_mode,
499 .ndo_set_mac_address = otx2_set_mac_address,
500 .ndo_change_mtu = otx2vf_change_mtu,
501 .ndo_set_features = otx2vf_set_features,
502 .ndo_get_stats64 = otx2_get_stats64,
503 .ndo_tx_timeout = otx2_tx_timeout,
504 .ndo_do_ioctl = otx2_ioctl,
507 static int otx2_wq_init(struct otx2_nic *vf)
509 vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
513 INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode);
514 INIT_WORK(&vf->reset_task, otx2vf_reset_task);
518 static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
520 struct otx2_hw *hw = &vf->hw;
523 num_vec = hw->nix_msixoff;
524 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
526 otx2vf_disable_mbox_intr(vf);
527 pci_free_irq_vectors(hw->pdev);
528 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
530 dev_err(vf->dev, "%s: Failed to realloc %d IRQ vectors\n",
535 return otx2vf_register_mbox_intr(vf, false);
538 static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
540 int num_vec = pci_msix_vec_count(pdev);
541 struct device *dev = &pdev->dev;
542 struct net_device *netdev;
547 err = pcim_enable_device(pdev);
549 dev_err(dev, "Failed to enable PCI device\n");
553 err = pci_request_regions(pdev, DRV_NAME);
555 dev_err(dev, "PCI request regions failed 0x%x\n", err);
559 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
561 dev_err(dev, "DMA mask config failed, abort\n");
562 goto err_release_regions;
565 pci_set_master(pdev);
567 qcount = num_online_cpus();
568 netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount);
571 goto err_release_regions;
574 pci_set_drvdata(pdev, netdev);
575 SET_NETDEV_DEV(netdev, &pdev->dev);
576 vf = netdev_priv(netdev);
580 vf->iommu_domain = iommu_get_domain_for_dev(dev);
582 vf->flags |= OTX2_FLAG_INTF_DOWN;
585 hw->rx_queues = qcount;
586 hw->tx_queues = qcount;
587 hw->max_queues = qcount;
588 hw->tot_tx_queues = qcount;
590 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
594 goto err_free_netdev;
597 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
598 sizeof(cpumask_var_t), GFP_KERNEL);
599 if (!hw->affinity_mask) {
601 goto err_free_netdev;
604 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
606 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
608 goto err_free_netdev;
611 vf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
613 dev_err(dev, "Unable to map physical function CSRs, aborting\n");
615 goto err_free_irq_vectors;
618 otx2_setup_dev_hw_settings(vf);
619 /* Init VF <=> PF mailbox stuff */
620 err = otx2vf_vfaf_mbox_init(vf);
622 goto err_free_irq_vectors;
624 /* Register mailbox interrupt */
625 err = otx2vf_register_mbox_intr(vf, true);
627 goto err_mbox_destroy;
629 /* Request AF to attach NPA and LIX LFs to this AF */
630 err = otx2_attach_npa_nix(vf);
632 goto err_disable_mbox_intr;
634 err = otx2vf_realloc_msix_vectors(vf);
636 goto err_mbox_destroy;
638 err = otx2_set_real_num_queues(netdev, qcount, qcount);
640 goto err_detach_rsrc;
642 err = cn10k_lmtst_init(vf);
644 goto err_detach_rsrc;
646 /* Don't check for error. Proceed without ptp */
649 /* Assign default mac address */
650 otx2_get_mac_from_af(netdev);
652 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
653 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
654 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
656 netdev->features = netdev->hw_features;
657 /* Support TSO on tag interface */
658 netdev->vlan_features |= netdev->features;
659 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
660 NETIF_F_HW_VLAN_STAG_TX;
661 netdev->features |= netdev->hw_features;
663 netdev->hw_features |= NETIF_F_NTUPLE;
664 netdev->hw_features |= NETIF_F_RXALL;
666 netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
667 netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
669 netdev->netdev_ops = &otx2vf_netdev_ops;
671 netdev->min_mtu = OTX2_MIN_MTU;
672 netdev->max_mtu = otx2_get_max_mtu(vf);
674 /* To distinguish, for LBK VFs set netdev name explicitly */
675 if (is_otx2_lbkvf(vf->pdev)) {
678 n = (vf->pcifunc >> RVU_PFVF_FUNC_SHIFT) & RVU_PFVF_FUNC_MASK;
679 /* Need to subtract 1 to get proper VF number */
681 snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
684 err = register_netdev(netdev);
686 dev_err(dev, "Failed to register netdevice\n");
687 goto err_detach_rsrc;
690 err = otx2_wq_init(vf);
692 goto err_unreg_netdev;
694 otx2vf_set_ethtool_ops(netdev);
696 err = otx2vf_mcam_flow_init(vf);
698 goto err_unreg_netdev;
700 err = otx2_register_dl(vf);
702 goto err_unreg_netdev;
704 /* Enable pause frames by default */
705 vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
706 vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
711 unregister_netdev(netdev);
713 if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
714 qmem_free(vf->dev, vf->dync_lmt);
715 otx2_detach_resources(&vf->mbox);
716 err_disable_mbox_intr:
717 otx2vf_disable_mbox_intr(vf);
719 otx2vf_vfaf_mbox_destroy(vf);
720 err_free_irq_vectors:
721 pci_free_irq_vectors(hw->pdev);
723 pci_set_drvdata(pdev, NULL);
726 pci_release_regions(pdev);
730 static void otx2vf_remove(struct pci_dev *pdev)
732 struct net_device *netdev = pci_get_drvdata(pdev);
738 vf = netdev_priv(netdev);
740 cancel_work_sync(&vf->reset_task);
741 otx2_unregister_dl(vf);
742 unregister_netdev(netdev);
744 destroy_workqueue(vf->otx2_wq);
745 otx2vf_disable_mbox_intr(vf);
746 otx2_detach_resources(&vf->mbox);
747 if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
748 qmem_free(vf->dev, vf->dync_lmt);
749 otx2vf_vfaf_mbox_destroy(vf);
750 pci_free_irq_vectors(vf->pdev);
751 pci_set_drvdata(pdev, NULL);
754 pci_release_regions(pdev);
757 static struct pci_driver otx2vf_driver = {
759 .id_table = otx2_vf_id_table,
760 .probe = otx2vf_probe,
761 .remove = otx2vf_remove,
762 .shutdown = otx2vf_remove,
765 static int __init otx2vf_init_module(void)
767 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
769 return pci_register_driver(&otx2vf_driver);
772 static void __exit otx2vf_cleanup_module(void)
774 pci_unregister_driver(&otx2vf_driver);
777 module_init(otx2vf_init_module);
778 module_exit(otx2vf_cleanup_module);