1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <net/vxlan.h>
22 #include "liquidio_common.h"
23 #include "octeon_droq.h"
24 #include "octeon_iq.h"
25 #include "response_manager.h"
26 #include "octeon_device.h"
27 #include "octeon_nic.h"
28 #include "octeon_main.h"
29 #include "octeon_network.h"
30 #include "cn23xx_vf_device.h"
32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
34 MODULE_LICENSE("GPL");
36 static int debug = -1;
37 module_param(debug, int, 0644);
38 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
40 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
42 struct oct_timestamp_resp {
51 #ifdef __BIG_ENDIAN_BITFIELD
63 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
64 #define OCTNIC_GSO_MAX_SIZE \
65 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
68 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
69 static void liquidio_vf_remove(struct pci_dev *pdev);
70 static int octeon_device_init(struct octeon_device *oct);
71 static int liquidio_stop(struct net_device *netdev);
73 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
75 struct octeon_device_priv *oct_priv =
76 (struct octeon_device_priv *)oct->priv;
77 int retry = MAX_IO_PENDING_PKT_COUNT;
78 int pkt_cnt = 0, pending_pkts;
84 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
85 if (!(oct->io_qmask.oq & BIT_ULL(i)))
87 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
90 pending_pkts += pkt_cnt;
91 tasklet_schedule(&oct_priv->droq_tasklet);
94 schedule_timeout_uninterruptible(1);
96 } while (retry-- && pending_pkts);
102 * \brief Cause device to go quiet so it can be safely removed/reset/etc
103 * @param oct Pointer to Octeon device
105 static void pcierror_quiesce_device(struct octeon_device *oct)
109 /* Disable the input and output queues now. No more packets will
110 * arrive from Octeon, but we should wait for all packet processing
114 /* To allow for in-flight requests */
115 schedule_timeout_uninterruptible(100);
117 if (wait_for_pending_requests(oct))
118 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
120 /* Force all requests waiting to be fetched by OCTEON to complete. */
121 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
122 struct octeon_instr_queue *iq;
124 if (!(oct->io_qmask.iq & BIT_ULL(i)))
126 iq = oct->instr_queue[i];
128 if (atomic_read(&iq->instr_pending)) {
129 spin_lock_bh(&iq->lock);
131 iq->octeon_read_index = iq->host_write_index;
132 iq->stats.instr_processed +=
133 atomic_read(&iq->instr_pending);
134 lio_process_iq_request_list(oct, iq, 0);
135 spin_unlock_bh(&iq->lock);
139 /* Force all pending ordered list requests to time out. */
140 lio_process_ordered_list(oct, 1);
142 /* We do not need to wait for output queue packets to be processed. */
146 * \brief Cleanup PCI AER uncorrectable error status
147 * @param dev Pointer to PCI device
149 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
154 pr_info("%s :\n", __func__);
156 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
157 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
158 if (dev->error_state == pci_channel_io_normal)
159 status &= ~mask; /* Clear corresponding nonfatal bits */
161 status &= mask; /* Clear corresponding fatal bits */
162 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
166 * \brief Stop all PCI IO to a given device
167 * @param dev Pointer to Octeon device
169 static void stop_pci_io(struct octeon_device *oct)
171 struct msix_entry *msix_entries;
174 /* No more instructions will be forwarded. */
175 atomic_set(&oct->status, OCT_DEV_IN_RESET);
177 for (i = 0; i < oct->ifcount; i++)
178 netif_device_detach(oct->props[i].netdev);
180 /* Disable interrupts */
181 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
183 pcierror_quiesce_device(oct);
185 msix_entries = (struct msix_entry *)oct->msix_entries;
186 for (i = 0; i < oct->num_msix_irqs; i++) {
187 /* clear the affinity_cpumask */
188 irq_set_affinity_hint(msix_entries[i].vector,
190 free_irq(msix_entries[i].vector,
191 &oct->ioq_vector[i]);
193 pci_disable_msix(oct->pci_dev);
194 kfree(oct->msix_entries);
195 oct->msix_entries = NULL;
196 octeon_free_ioq_vector(oct);
198 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
199 lio_get_state_string(&oct->status));
201 /* making it a common function for all OCTEON models */
202 cleanup_aer_uncorrect_error_status(oct->pci_dev);
204 pci_disable_device(oct->pci_dev);
208 * \brief called when PCI error is detected
209 * @param pdev Pointer to PCI device
210 * @param state The current pci connection state
212 * This function is called after a PCI bus error affecting
213 * this device has been detected.
215 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
216 pci_channel_state_t state)
218 struct octeon_device *oct = pci_get_drvdata(pdev);
220 /* Non-correctable Non-fatal errors */
221 if (state == pci_channel_io_normal) {
222 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
223 cleanup_aer_uncorrect_error_status(oct->pci_dev);
224 return PCI_ERS_RESULT_CAN_RECOVER;
227 /* Non-correctable Fatal errors */
228 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
231 return PCI_ERS_RESULT_DISCONNECT;
234 /* For PCI-E Advanced Error Recovery (AER) Interface */
235 static const struct pci_error_handlers liquidio_vf_err_handler = {
236 .error_detected = liquidio_pcie_error_detected,
239 static const struct pci_device_id liquidio_vf_pci_tbl[] = {
241 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
248 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
250 static struct pci_driver liquidio_vf_pci_driver = {
251 .name = "LiquidIO_VF",
252 .id_table = liquidio_vf_pci_tbl,
253 .probe = liquidio_vf_probe,
254 .remove = liquidio_vf_remove,
255 .err_handler = &liquidio_vf_err_handler, /* For AER */
259 * \brief Print link information
260 * @param netdev network device
262 static void print_link_info(struct net_device *netdev)
264 struct lio *lio = GET_LIO(netdev);
266 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
267 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
268 struct oct_link_info *linfo = &lio->linfo;
270 if (linfo->link.s.link_up) {
271 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
273 (linfo->link.s.duplex) ? "Full" : "Half");
275 netif_info(lio, link, lio->netdev, "Link Down\n");
281 * \brief Routine to notify MTU change
282 * @param work work_struct data structure
284 static void octnet_link_status_change(struct work_struct *work)
286 struct cavium_wk *wk = (struct cavium_wk *)work;
287 struct lio *lio = (struct lio *)wk->ctxptr;
289 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
290 * this API is invoked only when new max-MTU of the interface is
291 * less than current MTU.
294 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
299 * \brief Sets up the mtu status change work
300 * @param netdev network device
302 static int setup_link_status_change_wq(struct net_device *netdev)
304 struct lio *lio = GET_LIO(netdev);
305 struct octeon_device *oct = lio->oct_dev;
307 lio->link_status_wq.wq = alloc_workqueue("link-status",
309 if (!lio->link_status_wq.wq) {
310 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
313 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
314 octnet_link_status_change);
315 lio->link_status_wq.wk.ctxptr = lio;
320 static void cleanup_link_status_change_wq(struct net_device *netdev)
322 struct lio *lio = GET_LIO(netdev);
324 if (lio->link_status_wq.wq) {
325 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
326 destroy_workqueue(lio->link_status_wq.wq);
331 * \brief Update link status
332 * @param netdev network device
333 * @param ls link status structure
335 * Called on receipt of a link status response from the core application to
336 * update each interface's link status.
338 static void update_link_status(struct net_device *netdev,
339 union oct_link_status *ls)
341 struct lio *lio = GET_LIO(netdev);
342 int current_max_mtu = lio->linfo.link.s.mtu;
343 struct octeon_device *oct = lio->oct_dev;
345 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
346 lio->linfo.link.u64 = ls->u64;
348 print_link_info(netdev);
351 if (lio->linfo.link.s.link_up) {
352 netif_carrier_on(netdev);
355 netif_carrier_off(netdev);
359 if (lio->linfo.link.s.mtu != current_max_mtu) {
360 dev_info(&oct->pci_dev->dev,
361 "Max MTU Changed from %d to %d\n",
362 current_max_mtu, lio->linfo.link.s.mtu);
363 netdev->max_mtu = lio->linfo.link.s.mtu;
366 if (lio->linfo.link.s.mtu < netdev->mtu) {
367 dev_warn(&oct->pci_dev->dev,
368 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
369 netdev->mtu, lio->linfo.link.s.mtu);
370 queue_delayed_work(lio->link_status_wq.wq,
371 &lio->link_status_wq.wk.work, 0);
377 * \brief PCI probe handler
378 * @param pdev PCI device structure
382 liquidio_vf_probe(struct pci_dev *pdev,
383 const struct pci_device_id *ent __attribute__((unused)))
385 struct octeon_device *oct_dev = NULL;
387 oct_dev = octeon_allocate_device(pdev->device,
388 sizeof(struct octeon_device_priv));
391 dev_err(&pdev->dev, "Unable to allocate device\n");
394 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
396 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
397 (u32)pdev->vendor, (u32)pdev->device);
399 /* Assign octeon_device for this device to the private data area. */
400 pci_set_drvdata(pdev, oct_dev);
402 /* set linux specific device pointer */
403 oct_dev->pci_dev = pdev;
405 oct_dev->subsystem_id = pdev->subsystem_vendor |
406 (pdev->subsystem_device << 16);
408 if (octeon_device_init(oct_dev)) {
409 liquidio_vf_remove(pdev);
413 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
419 * \brief PCI FLR for each Octeon device.
420 * @param oct octeon device
422 static void octeon_pci_flr(struct octeon_device *oct)
424 pci_save_state(oct->pci_dev);
426 pci_cfg_access_lock(oct->pci_dev);
428 /* Quiesce the device completely */
429 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
430 PCI_COMMAND_INTX_DISABLE);
432 pcie_flr(oct->pci_dev);
434 pci_cfg_access_unlock(oct->pci_dev);
436 pci_restore_state(oct->pci_dev);
440 *\brief Destroy resources associated with octeon device
441 * @param pdev PCI device structure
444 static void octeon_destroy_resources(struct octeon_device *oct)
446 struct octeon_device_priv *oct_priv =
447 (struct octeon_device_priv *)oct->priv;
448 struct msix_entry *msix_entries;
451 switch (atomic_read(&oct->status)) {
452 case OCT_DEV_RUNNING:
453 case OCT_DEV_CORE_OK:
454 /* No more instructions will be forwarded. */
455 atomic_set(&oct->status, OCT_DEV_IN_RESET);
457 oct->app_mode = CVM_DRV_INVALID_APP;
458 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
459 lio_get_state_string(&oct->status));
461 schedule_timeout_uninterruptible(HZ / 10);
464 case OCT_DEV_HOST_OK:
466 case OCT_DEV_IO_QUEUES_DONE:
467 if (lio_wait_for_instr_fetch(oct))
468 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
470 if (wait_for_pending_requests(oct))
471 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
473 /* Disable the input and output queues now. No more packets will
474 * arrive from Octeon, but we should wait for all packet
475 * processing to finish.
477 oct->fn_list.disable_io_queues(oct);
479 if (lio_wait_for_oq_pkts(oct))
480 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
482 /* Force all requests waiting to be fetched by OCTEON to
485 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
486 struct octeon_instr_queue *iq;
488 if (!(oct->io_qmask.iq & BIT_ULL(i)))
490 iq = oct->instr_queue[i];
492 if (atomic_read(&iq->instr_pending)) {
493 spin_lock_bh(&iq->lock);
495 iq->octeon_read_index = iq->host_write_index;
496 iq->stats.instr_processed +=
497 atomic_read(&iq->instr_pending);
498 lio_process_iq_request_list(oct, iq, 0);
499 spin_unlock_bh(&iq->lock);
503 lio_process_ordered_list(oct, 1);
504 octeon_free_sc_done_list(oct);
505 octeon_free_sc_zombie_list(oct);
508 case OCT_DEV_INTR_SET_DONE:
509 /* Disable interrupts */
510 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
513 msix_entries = (struct msix_entry *)oct->msix_entries;
514 for (i = 0; i < oct->num_msix_irqs; i++) {
515 if (oct->ioq_vector[i].vector) {
516 irq_set_affinity_hint(
517 msix_entries[i].vector,
519 free_irq(msix_entries[i].vector,
520 &oct->ioq_vector[i]);
521 oct->ioq_vector[i].vector = 0;
524 pci_disable_msix(oct->pci_dev);
525 kfree(oct->msix_entries);
526 oct->msix_entries = NULL;
527 kfree(oct->irq_name_storage);
528 oct->irq_name_storage = NULL;
530 /* Soft reset the octeon device before exiting */
531 if (oct->pci_dev->reset_fn)
534 cn23xx_vf_ask_pf_to_do_flr(oct);
537 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
538 octeon_free_ioq_vector(oct);
541 case OCT_DEV_MBOX_SETUP_DONE:
542 oct->fn_list.free_mbox(oct);
545 case OCT_DEV_IN_RESET:
546 case OCT_DEV_DROQ_INIT_DONE:
548 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
549 if (!(oct->io_qmask.oq & BIT_ULL(i)))
551 octeon_delete_droq(oct, i);
555 case OCT_DEV_RESP_LIST_INIT_DONE:
556 octeon_delete_response_list(oct);
559 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
560 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
561 if (!(oct->io_qmask.iq & BIT_ULL(i)))
563 octeon_delete_instr_queue(oct, i);
567 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
568 octeon_free_sc_buffer_pool(oct);
571 case OCT_DEV_DISPATCH_INIT_DONE:
572 octeon_delete_dispatch_list(oct);
573 cancel_delayed_work_sync(&oct->nic_poll_work.work);
576 case OCT_DEV_PCI_MAP_DONE:
577 octeon_unmap_pci_barx(oct, 0);
578 octeon_unmap_pci_barx(oct, 1);
581 case OCT_DEV_PCI_ENABLE_DONE:
582 pci_clear_master(oct->pci_dev);
583 /* Disable the device, releasing the PCI INT */
584 pci_disable_device(oct->pci_dev);
587 case OCT_DEV_BEGIN_STATE:
588 /* Nothing to be done here either */
592 tasklet_kill(&oct_priv->droq_tasklet);
596 * \brief Send Rx control command
597 * @param lio per-network private data
598 * @param start_stop whether to start or stop
600 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
602 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
603 struct octeon_soft_command *sc;
604 union octnet_cmd *ncmd;
607 if (oct->props[lio->ifidx].rx_on == start_stop)
610 sc = (struct octeon_soft_command *)
611 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
614 ncmd = (union octnet_cmd *)sc->virtdptr;
617 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
618 ncmd->s.param1 = start_stop;
620 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
622 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
624 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
625 OPCODE_NIC_CMD, 0, 0, 0);
627 init_completion(&sc->complete);
628 sc->sc_status = OCTEON_REQUEST_PENDING;
630 retval = octeon_send_soft_command(oct, sc);
631 if (retval == IQ_SEND_FAILED) {
632 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
633 octeon_free_soft_command(oct, sc);
635 /* Sleep on a wait queue till the cond flag indicates that the
636 * response arrived or timed-out.
638 retval = wait_for_sc_completion_timeout(oct, sc, 0);
642 oct->props[lio->ifidx].rx_on = start_stop;
643 WRITE_ONCE(sc->caller_is_done, true);
648 * \brief Destroy NIC device interface
649 * @param oct octeon device
650 * @param ifidx which interface to destroy
652 * Cleanup associated with each interface for an Octeon device when NIC
653 * module is being unloaded or if initialization fails during load.
655 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
657 struct net_device *netdev = oct->props[ifidx].netdev;
658 struct octeon_device_priv *oct_priv =
659 (struct octeon_device_priv *)oct->priv;
660 struct napi_struct *napi, *n;
664 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
669 lio = GET_LIO(netdev);
671 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
673 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
674 liquidio_stop(netdev);
676 if (oct->props[lio->ifidx].napi_enabled == 1) {
677 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
680 oct->props[lio->ifidx].napi_enabled = 0;
682 oct->droq[0]->ops.poll_mode = 0;
686 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
687 netif_napi_del(napi);
689 tasklet_enable(&oct_priv->droq_tasklet);
691 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
692 unregister_netdev(netdev);
694 cleanup_rx_oom_poll_fn(netdev);
696 cleanup_link_status_change_wq(netdev);
698 lio_delete_glists(lio);
702 oct->props[ifidx].gmxport = -1;
704 oct->props[ifidx].netdev = NULL;
708 * \brief Stop complete NIC functionality
709 * @param oct octeon device
711 static int liquidio_stop_nic_module(struct octeon_device *oct)
716 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
718 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
722 spin_lock_bh(&oct->cmd_resp_wqlock);
723 oct->cmd_resp_state = OCT_DRV_OFFLINE;
724 spin_unlock_bh(&oct->cmd_resp_wqlock);
726 for (i = 0; i < oct->ifcount; i++) {
727 lio = GET_LIO(oct->props[i].netdev);
728 for (j = 0; j < oct->num_oqs; j++)
729 octeon_unregister_droq_ops(oct,
730 lio->linfo.rxpciq[j].s.q_no);
733 for (i = 0; i < oct->ifcount; i++)
734 liquidio_destroy_nic_device(oct, i);
736 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
741 * \brief Cleans up resources at unload time
742 * @param pdev PCI device structure
744 static void liquidio_vf_remove(struct pci_dev *pdev)
746 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
748 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
750 if (oct_dev->app_mode == CVM_DRV_NIC_APP)
751 liquidio_stop_nic_module(oct_dev);
753 /* Reset the octeon device and cleanup all memory allocated for
754 * the octeon device by driver.
756 octeon_destroy_resources(oct_dev);
758 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
760 /* This octeon device has been removed. Update the global
761 * data structure to reflect this. Free the device structure.
763 octeon_free_device_mem(oct_dev);
767 * \brief PCI initialization for each Octeon device.
768 * @param oct octeon device
770 static int octeon_pci_os_setup(struct octeon_device *oct)
772 #ifdef CONFIG_PCI_IOV
773 /* setup PCI stuff first */
774 if (!oct->pci_dev->physfn)
778 if (pci_enable_device(oct->pci_dev)) {
779 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
783 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
784 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
785 pci_disable_device(oct->pci_dev);
789 /* Enable PCI DMA Master. */
790 pci_set_master(oct->pci_dev);
796 * \brief Unmap and free network buffer
799 static void free_netbuf(void *buf)
801 struct octnet_buf_free_info *finfo;
805 finfo = (struct octnet_buf_free_info *)buf;
809 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
816 * \brief Unmap and free gather buffer
819 static void free_netsgbuf(void *buf)
821 struct octnet_buf_free_info *finfo;
822 struct octnic_gather *g;
827 finfo = (struct octnet_buf_free_info *)buf;
831 frags = skb_shinfo(skb)->nr_frags;
833 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
834 g->sg[0].ptr[0], (skb->len - skb->data_len),
839 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
841 pci_unmap_page((lio->oct_dev)->pci_dev,
842 g->sg[(i >> 2)].ptr[(i & 3)],
843 skb_frag_size(frag), DMA_TO_DEVICE);
847 iq = skb_iq(lio->oct_dev, skb);
849 spin_lock(&lio->glist_lock[iq]);
850 list_add_tail(&g->list, &lio->glist[iq]);
851 spin_unlock(&lio->glist_lock[iq]);
857 * \brief Unmap and free gather buffer with response
860 static void free_netsgbuf_with_resp(void *buf)
862 struct octnet_buf_free_info *finfo;
863 struct octeon_soft_command *sc;
864 struct octnic_gather *g;
869 sc = (struct octeon_soft_command *)buf;
870 skb = (struct sk_buff *)sc->callback_arg;
871 finfo = (struct octnet_buf_free_info *)&skb->cb;
875 frags = skb_shinfo(skb)->nr_frags;
877 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
878 g->sg[0].ptr[0], (skb->len - skb->data_len),
883 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
885 pci_unmap_page((lio->oct_dev)->pci_dev,
886 g->sg[(i >> 2)].ptr[(i & 3)],
887 skb_frag_size(frag), DMA_TO_DEVICE);
891 iq = skb_iq(lio->oct_dev, skb);
893 spin_lock(&lio->glist_lock[iq]);
894 list_add_tail(&g->list, &lio->glist[iq]);
895 spin_unlock(&lio->glist_lock[iq]);
897 /* Don't free the skb yet */
901 * \brief Net device open for LiquidIO
902 * @param netdev network device
904 static int liquidio_open(struct net_device *netdev)
906 struct lio *lio = GET_LIO(netdev);
907 struct octeon_device *oct = lio->oct_dev;
908 struct octeon_device_priv *oct_priv =
909 (struct octeon_device_priv *)oct->priv;
910 struct napi_struct *napi, *n;
912 if (!oct->props[lio->ifidx].napi_enabled) {
913 tasklet_disable(&oct_priv->droq_tasklet);
915 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
918 oct->props[lio->ifidx].napi_enabled = 1;
920 oct->droq[0]->ops.poll_mode = 1;
923 ifstate_set(lio, LIO_IFSTATE_RUNNING);
925 /* Ready for link status updates */
928 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
931 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
932 lio->stats_wk.ctxptr = lio;
933 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
934 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
936 /* tell Octeon to start forwarding packets to host */
937 send_rx_ctrl_cmd(lio, 1);
939 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
945 * \brief Net device stop for LiquidIO
946 * @param netdev network device
948 static int liquidio_stop(struct net_device *netdev)
950 struct lio *lio = GET_LIO(netdev);
951 struct octeon_device *oct = lio->oct_dev;
952 struct octeon_device_priv *oct_priv =
953 (struct octeon_device_priv *)oct->priv;
954 struct napi_struct *napi, *n;
956 /* tell Octeon to stop forwarding packets to host */
957 send_rx_ctrl_cmd(lio, 0);
959 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
960 /* Inform that netif carrier is down */
962 lio->linfo.link.s.link_up = 0;
964 netif_carrier_off(netdev);
967 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
971 /* Wait for any pending Rx descriptors */
972 if (lio_wait_for_clean_oq(oct))
973 netif_info(lio, rx_err, lio->netdev,
974 "Proceeding with stop interface after partial RX desc processing\n");
976 if (oct->props[lio->ifidx].napi_enabled == 1) {
977 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
980 oct->props[lio->ifidx].napi_enabled = 0;
982 oct->droq[0]->ops.poll_mode = 0;
984 tasklet_enable(&oct_priv->droq_tasklet);
987 cancel_delayed_work_sync(&lio->stats_wk.work);
989 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
995 * \brief Converts a mask based on net device flags
996 * @param netdev network device
998 * This routine generates a octnet_ifflags mask from the net device flags
999 * received from the OS.
1001 static enum octnet_ifflags get_new_flags(struct net_device *netdev)
1003 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1005 if (netdev->flags & IFF_PROMISC)
1006 f |= OCTNET_IFFLAG_PROMISC;
1008 if (netdev->flags & IFF_ALLMULTI)
1009 f |= OCTNET_IFFLAG_ALLMULTI;
1011 if (netdev->flags & IFF_MULTICAST) {
1012 f |= OCTNET_IFFLAG_MULTICAST;
1014 /* Accept all multicast addresses if there are more than we
1017 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1018 f |= OCTNET_IFFLAG_ALLMULTI;
1021 if (netdev->flags & IFF_BROADCAST)
1022 f |= OCTNET_IFFLAG_BROADCAST;
1027 static void liquidio_set_uc_list(struct net_device *netdev)
1029 struct lio *lio = GET_LIO(netdev);
1030 struct octeon_device *oct = lio->oct_dev;
1031 struct octnic_ctrl_pkt nctrl;
1032 struct netdev_hw_addr *ha;
1035 if (lio->netdev_uc_count == netdev_uc_count(netdev))
1038 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
1039 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
1043 lio->netdev_uc_count = netdev_uc_count(netdev);
1045 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1046 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
1047 nctrl.ncmd.s.more = lio->netdev_uc_count;
1048 nctrl.ncmd.s.param1 = oct->vf_num;
1049 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1050 nctrl.netpndev = (u64)netdev;
1051 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1053 /* copy all the addresses into the udd */
1054 mac = &nctrl.udd[0];
1055 netdev_for_each_uc_addr(ha, netdev) {
1056 ether_addr_copy(((u8 *)mac) + 2, ha->addr);
1060 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1064 * \brief Net device set_multicast_list
1065 * @param netdev network device
1067 static void liquidio_set_mcast_list(struct net_device *netdev)
1069 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1070 struct lio *lio = GET_LIO(netdev);
1071 struct octeon_device *oct = lio->oct_dev;
1072 struct octnic_ctrl_pkt nctrl;
1073 struct netdev_hw_addr *ha;
1077 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1079 /* Create a ctrl pkt command to be sent to core app. */
1081 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1082 nctrl.ncmd.s.param1 = get_new_flags(netdev);
1083 nctrl.ncmd.s.param2 = mc_count;
1084 nctrl.ncmd.s.more = mc_count;
1085 nctrl.netpndev = (u64)netdev;
1086 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1088 /* copy all the addresses into the udd */
1090 netdev_for_each_mc_addr(ha, netdev) {
1092 ether_addr_copy(((u8 *)mc) + 2, ha->addr);
1093 /* no need to swap bytes */
1094 if (++mc > &nctrl.udd[mc_count])
1098 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1100 /* Apparently, any activity in this call from the kernel has to
1101 * be atomic. So we won't wait for response.
1104 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1106 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1110 liquidio_set_uc_list(netdev);
1114 * \brief Net device set_mac_address
1115 * @param netdev network device
1117 static int liquidio_set_mac(struct net_device *netdev, void *p)
1119 struct sockaddr *addr = (struct sockaddr *)p;
1120 struct lio *lio = GET_LIO(netdev);
1121 struct octeon_device *oct = lio->oct_dev;
1122 struct octnic_ctrl_pkt nctrl;
1125 if (!is_valid_ether_addr(addr->sa_data))
1126 return -EADDRNOTAVAIL;
1128 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
1131 if (lio->linfo.macaddr_is_admin_asgnd)
1134 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1137 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1138 nctrl.ncmd.s.param1 = 0;
1139 nctrl.ncmd.s.more = 1;
1140 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1141 nctrl.netpndev = (u64)netdev;
1144 /* The MAC Address is presented in network byte order. */
1145 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
1147 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1149 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
1153 if (nctrl.sc_status ==
1154 FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) {
1155 dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n");
1159 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1160 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1166 liquidio_get_stats64(struct net_device *netdev,
1167 struct rtnl_link_stats64 *lstats)
1169 struct lio *lio = GET_LIO(netdev);
1170 struct octeon_device *oct;
1171 u64 pkts = 0, drop = 0, bytes = 0;
1172 struct oct_droq_stats *oq_stats;
1173 struct oct_iq_stats *iq_stats;
1174 int i, iq_no, oq_no;
1178 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1181 for (i = 0; i < oct->num_iqs; i++) {
1182 iq_no = lio->linfo.txpciq[i].s.q_no;
1183 iq_stats = &oct->instr_queue[iq_no]->stats;
1184 pkts += iq_stats->tx_done;
1185 drop += iq_stats->tx_dropped;
1186 bytes += iq_stats->tx_tot_bytes;
1189 lstats->tx_packets = pkts;
1190 lstats->tx_bytes = bytes;
1191 lstats->tx_dropped = drop;
1197 for (i = 0; i < oct->num_oqs; i++) {
1198 oq_no = lio->linfo.rxpciq[i].s.q_no;
1199 oq_stats = &oct->droq[oq_no]->stats;
1200 pkts += oq_stats->rx_pkts_received;
1201 drop += (oq_stats->rx_dropped +
1202 oq_stats->dropped_nodispatch +
1203 oq_stats->dropped_toomany +
1204 oq_stats->dropped_nomem);
1205 bytes += oq_stats->rx_bytes_received;
1208 lstats->rx_bytes = bytes;
1209 lstats->rx_packets = pkts;
1210 lstats->rx_dropped = drop;
1212 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
1214 /* detailed rx_errors: */
1215 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
1216 /* recved pkt with crc error */
1217 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
1218 /* recv'd frame alignment error */
1219 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
1221 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
1222 lstats->rx_frame_errors;
1224 /* detailed tx_errors */
1225 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
1226 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
1228 lstats->tx_errors = lstats->tx_aborted_errors +
1229 lstats->tx_carrier_errors;
1233 * \brief Handler for SIOCSHWTSTAMP ioctl
1234 * @param netdev network device
1235 * @param ifr interface request
1236 * @param cmd command
1238 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
1240 struct lio *lio = GET_LIO(netdev);
1241 struct hwtstamp_config conf;
1243 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
1249 switch (conf.tx_type) {
1250 case HWTSTAMP_TX_ON:
1251 case HWTSTAMP_TX_OFF:
1257 switch (conf.rx_filter) {
1258 case HWTSTAMP_FILTER_NONE:
1260 case HWTSTAMP_FILTER_ALL:
1261 case HWTSTAMP_FILTER_SOME:
1262 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1263 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1264 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1265 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1266 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1267 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1268 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1269 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1270 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1271 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1272 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1273 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1274 case HWTSTAMP_FILTER_NTP_ALL:
1275 conf.rx_filter = HWTSTAMP_FILTER_ALL;
1281 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
1282 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1285 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1287 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
1291 * \brief ioctl handler
1292 * @param netdev network device
1293 * @param ifr interface request
1294 * @param cmd command
1296 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1300 return hwtstamp_ioctl(netdev, ifr);
1306 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
1308 struct sk_buff *skb = (struct sk_buff *)buf;
1309 struct octnet_buf_free_info *finfo;
1310 struct oct_timestamp_resp *resp;
1311 struct octeon_soft_command *sc;
1314 finfo = (struct octnet_buf_free_info *)skb->cb;
1318 resp = (struct oct_timestamp_resp *)sc->virtrptr;
1320 if (status != OCTEON_REQUEST_DONE) {
1321 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
1322 CVM_CAST64(status));
1323 resp->timestamp = 0;
1326 octeon_swap_8B_data(&resp->timestamp, 1);
1328 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
1329 struct skb_shared_hwtstamps ts;
1330 u64 ns = resp->timestamp;
1332 netif_info(lio, tx_done, lio->netdev,
1333 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
1334 skb, (unsigned long long)ns);
1335 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
1336 skb_tstamp_tx(skb, &ts);
1339 octeon_free_soft_command(oct, sc);
1340 tx_buffer_free(skb);
1343 /* \brief Send a data packet that will be timestamped
1344 * @param oct octeon device
1345 * @param ndata pointer to network data
1346 * @param finfo pointer to private network data
1348 static int send_nic_timestamp_pkt(struct octeon_device *oct,
1349 struct octnic_data_pkt *ndata,
1350 struct octnet_buf_free_info *finfo,
1353 struct octeon_soft_command *sc;
1361 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
1362 sizeof(struct oct_timestamp_resp));
1366 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
1367 return IQ_SEND_FAILED;
1370 if (ndata->reqtype == REQTYPE_NORESP_NET)
1371 ndata->reqtype = REQTYPE_RESP_NET;
1372 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
1373 ndata->reqtype = REQTYPE_RESP_NET_SG;
1375 sc->callback = handle_timestamp;
1376 sc->callback_arg = finfo->skb;
1377 sc->iq_no = ndata->q_no;
1379 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
1381 ring_doorbell = !xmit_more;
1383 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
1384 sc, len, ndata->reqtype);
1386 if (retval == IQ_SEND_FAILED) {
1387 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
1389 octeon_free_soft_command(oct, sc);
1391 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
1397 /** \brief Transmit networks packets to the Octeon interface
1398 * @param skbuff skbuff struct to be passed to network layer.
1399 * @param netdev pointer to network device
1400 * @returns whether the packet was transmitted to the device okay or not
1401 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
1403 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1405 struct octnet_buf_free_info *finfo;
1406 union octnic_cmd_setup cmdsetup;
1407 struct octnic_data_pkt ndata;
1408 struct octeon_instr_irh *irh;
1409 struct oct_iq_stats *stats;
1410 struct octeon_device *oct;
1411 int q_idx = 0, iq_no = 0;
1412 union tx_info *tx_info;
1420 lio = GET_LIO(netdev);
1423 q_idx = skb_iq(lio->oct_dev, skb);
1425 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1427 stats = &oct->instr_queue[iq_no]->stats;
1429 /* Check for all conditions in which the current packet cannot be
1432 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
1433 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
1434 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
1435 lio->linfo.link.s.link_up);
1436 goto lio_xmit_failed;
1439 /* Use space in skb->cb to store info used to unmap and
1442 finfo = (struct octnet_buf_free_info *)skb->cb;
1447 /* Prepare the attributes for the data to be passed to OSI. */
1448 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
1454 if (octnet_iq_is_full(oct, ndata.q_no)) {
1455 /* defer sending if queue is full */
1456 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1458 stats->tx_iq_busy++;
1459 return NETDEV_TX_BUSY;
1462 ndata.datasize = skb->len;
1465 cmdsetup.s.iq_no = iq_no;
1467 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1468 if (skb->encapsulation) {
1469 cmdsetup.s.tnl_csum = 1;
1472 cmdsetup.s.transport_csum = 1;
1475 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1476 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1477 cmdsetup.s.timestamp = 1;
1480 if (!skb_shinfo(skb)->nr_frags) {
1481 cmdsetup.s.u.datasize = skb->len;
1482 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1483 /* Offload checksum calculation for TCP/UDP packets */
1484 dptr = dma_map_single(&oct->pci_dev->dev,
1488 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
1489 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
1491 return NETDEV_TX_BUSY;
1494 ndata.cmd.cmd3.dptr = dptr;
1496 ndata.reqtype = REQTYPE_NORESP_NET;
1500 struct octnic_gather *g;
1503 spin_lock(&lio->glist_lock[q_idx]);
1504 g = (struct octnic_gather *)
1505 lio_list_delete_head(&lio->glist[q_idx]);
1506 spin_unlock(&lio->glist_lock[q_idx]);
1509 netif_info(lio, tx_err, lio->netdev,
1510 "Transmit scatter gather: glist null!\n");
1511 goto lio_xmit_failed;
1514 cmdsetup.s.gather = 1;
1515 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
1516 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1518 memset(g->sg, 0, g->sg_size);
1520 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
1522 (skb->len - skb->data_len),
1524 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
1525 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
1527 return NETDEV_TX_BUSY;
1529 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
1531 frags = skb_shinfo(skb)->nr_frags;
1534 frag = &skb_shinfo(skb)->frags[i - 1];
1536 g->sg[(i >> 2)].ptr[(i & 3)] =
1537 skb_frag_dma_map(&oct->pci_dev->dev,
1538 frag, 0, skb_frag_size(frag),
1540 if (dma_mapping_error(&oct->pci_dev->dev,
1541 g->sg[i >> 2].ptr[i & 3])) {
1542 dma_unmap_single(&oct->pci_dev->dev,
1544 skb->len - skb->data_len,
1546 for (j = 1; j < i; j++) {
1547 frag = &skb_shinfo(skb)->frags[j - 1];
1548 dma_unmap_page(&oct->pci_dev->dev,
1549 g->sg[j >> 2].ptr[j & 3],
1550 skb_frag_size(frag),
1553 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
1555 return NETDEV_TX_BUSY;
1558 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
1563 dptr = g->sg_dma_ptr;
1565 ndata.cmd.cmd3.dptr = dptr;
1569 ndata.reqtype = REQTYPE_NORESP_NET_SG;
1572 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
1573 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
1575 if (skb_shinfo(skb)->gso_size) {
1576 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
1577 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
1580 /* HW insert VLAN tag */
1581 if (skb_vlan_tag_present(skb)) {
1582 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
1583 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
1586 xmit_more = netdev_xmit_more();
1588 if (unlikely(cmdsetup.s.timestamp))
1589 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
1591 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
1592 if (status == IQ_SEND_FAILED)
1593 goto lio_xmit_failed;
1595 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
1597 if (status == IQ_SEND_STOP) {
1598 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
1600 netif_stop_subqueue(netdev, q_idx);
1603 netif_trans_update(netdev);
1605 if (tx_info->s.gso_segs)
1606 stats->tx_done += tx_info->s.gso_segs;
1609 stats->tx_tot_bytes += ndata.datasize;
1611 return NETDEV_TX_OK;
1614 stats->tx_dropped++;
1615 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
1616 iq_no, stats->tx_dropped);
1618 dma_unmap_single(&oct->pci_dev->dev, dptr,
1619 ndata.datasize, DMA_TO_DEVICE);
1621 octeon_ring_doorbell_locked(oct, iq_no);
1623 tx_buffer_free(skb);
1624 return NETDEV_TX_OK;
1627 /** \brief Network device Tx timeout
1628 * @param netdev pointer to network device
1630 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1634 lio = GET_LIO(netdev);
1636 netif_info(lio, tx_err, lio->netdev,
1637 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
1638 netdev->stats.tx_dropped);
1639 netif_trans_update(netdev);
1644 liquidio_vlan_rx_add_vid(struct net_device *netdev,
1645 __be16 proto __attribute__((unused)), u16 vid)
1647 struct lio *lio = GET_LIO(netdev);
1648 struct octeon_device *oct = lio->oct_dev;
1649 struct octnic_ctrl_pkt nctrl;
1652 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1655 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
1656 nctrl.ncmd.s.param1 = vid;
1657 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1658 nctrl.netpndev = (u64)netdev;
1659 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1661 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1663 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1672 liquidio_vlan_rx_kill_vid(struct net_device *netdev,
1673 __be16 proto __attribute__((unused)), u16 vid)
1675 struct lio *lio = GET_LIO(netdev);
1676 struct octeon_device *oct = lio->oct_dev;
1677 struct octnic_ctrl_pkt nctrl;
1680 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1683 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
1684 nctrl.ncmd.s.param1 = vid;
1685 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1686 nctrl.netpndev = (u64)netdev;
1687 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1689 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1691 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
1699 /** Sending command to enable/disable RX checksum offload
1700 * @param netdev pointer to network device
1701 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
1702 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
1703 * OCTNET_CMD_RXCSUM_DISABLE
1704 * @returns SUCCESS or FAILURE
1706 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
1709 struct lio *lio = GET_LIO(netdev);
1710 struct octeon_device *oct = lio->oct_dev;
1711 struct octnic_ctrl_pkt nctrl;
1714 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1717 nctrl.ncmd.s.cmd = command;
1718 nctrl.ncmd.s.param1 = rx_cmd;
1719 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1720 nctrl.netpndev = (u64)netdev;
1721 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1723 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1725 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
1733 /** Sending command to add/delete VxLAN UDP port to firmware
1734 * @param netdev pointer to network device
1735 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
1736 * @param vxlan_port VxLAN port to be added or deleted
1737 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
1738 * OCTNET_CMD_VXLAN_PORT_DEL
1739 * @returns SUCCESS or FAILURE
1741 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
1742 u16 vxlan_port, u8 vxlan_cmd_bit)
1744 struct lio *lio = GET_LIO(netdev);
1745 struct octeon_device *oct = lio->oct_dev;
1746 struct octnic_ctrl_pkt nctrl;
1749 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1752 nctrl.ncmd.s.cmd = command;
1753 nctrl.ncmd.s.more = vxlan_cmd_bit;
1754 nctrl.ncmd.s.param1 = vxlan_port;
1755 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1756 nctrl.netpndev = (u64)netdev;
1757 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1759 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1761 dev_err(&oct->pci_dev->dev,
1762 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
1770 static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
1771 unsigned int table, unsigned int entry,
1772 struct udp_tunnel_info *ti)
1774 return liquidio_vxlan_port_command(netdev,
1775 OCTNET_CMD_VXLAN_PORT_CONFIG,
1777 OCTNET_CMD_VXLAN_PORT_ADD);
1780 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
1783 struct udp_tunnel_info *ti)
1785 return liquidio_vxlan_port_command(netdev,
1786 OCTNET_CMD_VXLAN_PORT_CONFIG,
1788 OCTNET_CMD_VXLAN_PORT_DEL);
1791 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
1792 .set_port = liquidio_udp_tunnel_set_port,
1793 .unset_port = liquidio_udp_tunnel_unset_port,
1795 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
1799 /** \brief Net device fix features
1800 * @param netdev pointer to network device
1801 * @param request features requested
1802 * @returns updated features list
1804 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
1805 netdev_features_t request)
1807 struct lio *lio = netdev_priv(netdev);
1809 if ((request & NETIF_F_RXCSUM) &&
1810 !(lio->dev_capability & NETIF_F_RXCSUM))
1811 request &= ~NETIF_F_RXCSUM;
1813 if ((request & NETIF_F_HW_CSUM) &&
1814 !(lio->dev_capability & NETIF_F_HW_CSUM))
1815 request &= ~NETIF_F_HW_CSUM;
1817 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
1818 request &= ~NETIF_F_TSO;
1820 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
1821 request &= ~NETIF_F_TSO6;
1823 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
1824 request &= ~NETIF_F_LRO;
1826 /* Disable LRO if RXCSUM is off */
1827 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
1828 (lio->dev_capability & NETIF_F_LRO))
1829 request &= ~NETIF_F_LRO;
1834 /** \brief Net device set features
1835 * @param netdev pointer to network device
1836 * @param features features to enable/disable
1838 static int liquidio_set_features(struct net_device *netdev,
1839 netdev_features_t features)
1841 struct lio *lio = netdev_priv(netdev);
1843 if (!((netdev->features ^ features) & NETIF_F_LRO))
1846 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
1847 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
1848 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1849 else if (!(features & NETIF_F_LRO) &&
1850 (lio->dev_capability & NETIF_F_LRO))
1851 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
1852 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1853 if (!(netdev->features & NETIF_F_RXCSUM) &&
1854 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1855 (features & NETIF_F_RXCSUM))
1856 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1857 OCTNET_CMD_RXCSUM_ENABLE);
1858 else if ((netdev->features & NETIF_F_RXCSUM) &&
1859 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1860 !(features & NETIF_F_RXCSUM))
1861 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1862 OCTNET_CMD_RXCSUM_DISABLE);
1867 static const struct net_device_ops lionetdevops = {
1868 .ndo_open = liquidio_open,
1869 .ndo_stop = liquidio_stop,
1870 .ndo_start_xmit = liquidio_xmit,
1871 .ndo_get_stats64 = liquidio_get_stats64,
1872 .ndo_set_mac_address = liquidio_set_mac,
1873 .ndo_set_rx_mode = liquidio_set_mcast_list,
1874 .ndo_tx_timeout = liquidio_tx_timeout,
1875 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
1876 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
1877 .ndo_change_mtu = liquidio_change_mtu,
1878 .ndo_do_ioctl = liquidio_ioctl,
1879 .ndo_fix_features = liquidio_fix_features,
1880 .ndo_set_features = liquidio_set_features,
1881 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
1882 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
1885 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
1887 struct octeon_device *oct = (struct octeon_device *)buf;
1888 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1889 union oct_link_status *ls;
1893 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
1894 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1895 recv_pkt->buffer_size[0],
1896 recv_pkt->rh.r_nic_info.gmxport);
1900 gmxport = recv_pkt->rh.r_nic_info.gmxport;
1901 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
1902 OCT_DROQ_INFO_SIZE);
1904 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
1906 for (i = 0; i < oct->ifcount; i++) {
1907 if (oct->props[i].gmxport == gmxport) {
1908 update_link_status(oct->props[i].netdev, ls);
1914 for (i = 0; i < recv_pkt->buffer_count; i++)
1915 recv_buffer_free(recv_pkt->buffer_ptr[i]);
1916 octeon_free_recv_info(recv_info);
1921 * \brief Setup network interfaces
1922 * @param octeon_dev octeon device
1924 * Called during init time for each device. It assumes the NIC
1925 * is already up and running. The link information for each
1926 * interface is passed in link_info.
1928 static int setup_nic_devices(struct octeon_device *octeon_dev)
1930 int retval, num_iqueues, num_oqueues;
1931 u32 resp_size, data_size;
1932 struct liquidio_if_cfg_resp *resp;
1933 struct octeon_soft_command *sc;
1934 union oct_nic_if_cfg if_cfg;
1935 struct octdev_props *props;
1936 struct net_device *netdev;
1937 struct lio_version *vdata;
1938 struct lio *lio = NULL;
1939 u8 mac[ETH_ALEN], i, j;
1942 ifidx_or_pfnum = octeon_dev->pf_num;
1944 /* This is to handle link status changes */
1945 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
1946 lio_nic_info, octeon_dev);
1948 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
1949 * They are handled directly.
1951 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
1954 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
1957 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
1958 free_netsgbuf_with_resp);
1960 for (i = 0; i < octeon_dev->ifcount; i++) {
1961 resp_size = sizeof(struct liquidio_if_cfg_resp);
1962 data_size = sizeof(struct lio_version);
1963 sc = (struct octeon_soft_command *)
1964 octeon_alloc_soft_command(octeon_dev, data_size,
1966 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1967 vdata = (struct lio_version *)sc->virtdptr;
1969 *((u64 *)vdata) = 0;
1970 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1971 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1972 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1976 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
1977 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
1978 if_cfg.s.base_queue = 0;
1982 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
1983 OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
1986 init_completion(&sc->complete);
1987 sc->sc_status = OCTEON_REQUEST_PENDING;
1989 retval = octeon_send_soft_command(octeon_dev, sc);
1990 if (retval == IQ_SEND_FAILED) {
1991 dev_err(&octeon_dev->pci_dev->dev,
1992 "iq/oq config failed status: %x\n", retval);
1993 /* Soft instr is freed by driver in case of failure. */
1994 octeon_free_soft_command(octeon_dev, sc);
1998 /* Sleep on a wait queue till the cond flag indicates that the
1999 * response arrived or timed-out.
2001 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
2005 retval = resp->status;
2007 dev_err(&octeon_dev->pci_dev->dev,
2008 "iq/oq config failed, retval = %d\n", retval);
2009 WRITE_ONCE(sc->caller_is_done, true);
2013 snprintf(octeon_dev->fw_info.liquidio_firmware_version,
2015 resp->cfg_info.liquidio_firmware_version);
2017 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
2018 (sizeof(struct liquidio_if_cfg_info)) >> 3);
2020 num_iqueues = hweight64(resp->cfg_info.iqmask);
2021 num_oqueues = hweight64(resp->cfg_info.oqmask);
2023 if (!(num_iqueues) || !(num_oqueues)) {
2024 dev_err(&octeon_dev->pci_dev->dev,
2025 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
2026 resp->cfg_info.iqmask, resp->cfg_info.oqmask);
2027 WRITE_ONCE(sc->caller_is_done, true);
2028 goto setup_nic_dev_done;
2030 dev_dbg(&octeon_dev->pci_dev->dev,
2031 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
2032 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
2033 num_iqueues, num_oqueues);
2035 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
2038 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
2039 WRITE_ONCE(sc->caller_is_done, true);
2040 goto setup_nic_dev_done;
2043 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
2045 /* Associate the routines that will handle different
2048 netdev->netdev_ops = &lionetdevops;
2050 lio = GET_LIO(netdev);
2052 memset(lio, 0, sizeof(struct lio));
2054 lio->ifidx = ifidx_or_pfnum;
2056 props = &octeon_dev->props[i];
2057 props->gmxport = resp->cfg_info.linfo.gmxport;
2058 props->netdev = netdev;
2060 lio->linfo.num_rxpciq = num_oqueues;
2061 lio->linfo.num_txpciq = num_iqueues;
2063 for (j = 0; j < num_oqueues; j++) {
2064 lio->linfo.rxpciq[j].u64 =
2065 resp->cfg_info.linfo.rxpciq[j].u64;
2067 for (j = 0; j < num_iqueues; j++) {
2068 lio->linfo.txpciq[j].u64 =
2069 resp->cfg_info.linfo.txpciq[j].u64;
2072 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2073 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2074 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2075 lio->linfo.macaddr_is_admin_asgnd =
2076 resp->cfg_info.linfo.macaddr_is_admin_asgnd;
2077 lio->linfo.macaddr_spoofchk =
2078 resp->cfg_info.linfo.macaddr_spoofchk;
2080 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2082 lio->dev_capability = NETIF_F_HIGHDMA
2083 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2084 | NETIF_F_SG | NETIF_F_RXCSUM
2085 | NETIF_F_TSO | NETIF_F_TSO6
2088 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
2090 /* Copy of transmit encapsulation capabilities:
2091 * TSO, TSO6, Checksums for this device
2093 lio->enc_dev_capability = NETIF_F_IP_CSUM
2095 | NETIF_F_GSO_UDP_TUNNEL
2096 | NETIF_F_HW_CSUM | NETIF_F_SG
2098 | NETIF_F_TSO | NETIF_F_TSO6
2101 netdev->hw_enc_features =
2102 (lio->enc_dev_capability & ~NETIF_F_LRO);
2103 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
2105 netdev->vlan_features = lio->dev_capability;
2106 /* Add any unchangeable hw features */
2107 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2108 NETIF_F_HW_VLAN_CTAG_RX |
2109 NETIF_F_HW_VLAN_CTAG_TX;
2111 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2113 netdev->hw_features = lio->dev_capability;
2114 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2116 /* MTU range: 68 - 16000 */
2117 netdev->min_mtu = LIO_MIN_MTU_SIZE;
2118 netdev->max_mtu = LIO_MAX_MTU_SIZE;
2120 WRITE_ONCE(sc->caller_is_done, true);
2122 /* Point to the properties for octeon device to which this
2123 * interface belongs.
2125 lio->oct_dev = octeon_dev;
2126 lio->octprops = props;
2127 lio->netdev = netdev;
2129 dev_dbg(&octeon_dev->pci_dev->dev,
2130 "if%d gmx: %d hw_addr: 0x%llx\n", i,
2131 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2133 /* 64-bit swap required on LE machines */
2134 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2135 for (j = 0; j < ETH_ALEN; j++)
2136 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2138 /* Copy MAC Address to OS network device structure */
2139 ether_addr_copy(netdev->dev_addr, mac);
2141 if (liquidio_setup_io_queues(octeon_dev, i,
2142 lio->linfo.num_txpciq,
2143 lio->linfo.num_rxpciq)) {
2144 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
2145 goto setup_nic_dev_free;
2148 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2150 /* For VFs, enable Octeon device interrupts here,
2151 * as this is contingent upon IO queue setup
2153 octeon_dev->fn_list.enable_interrupt(octeon_dev,
2156 /* By default all interfaces on a single Octeon uses the same
2159 lio->txq = lio->linfo.txpciq[0].s.q_no;
2160 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2162 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2163 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2165 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
2166 dev_err(&octeon_dev->pci_dev->dev,
2167 "Gather list allocation failed\n");
2168 goto setup_nic_dev_free;
2171 /* Register ethtool support */
2172 liquidio_set_ethtool_ops(netdev);
2173 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2174 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
2176 octeon_dev->priv_flags = 0x0;
2178 if (netdev->features & NETIF_F_LRO)
2179 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2180 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2182 if (setup_link_status_change_wq(netdev))
2183 goto setup_nic_dev_free;
2185 if (setup_rx_oom_poll_fn(netdev))
2186 goto setup_nic_dev_free;
2188 /* Register the network device with the OS */
2189 if (register_netdev(netdev)) {
2190 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
2191 goto setup_nic_dev_free;
2194 dev_dbg(&octeon_dev->pci_dev->dev,
2195 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
2196 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2197 netif_carrier_off(netdev);
2198 lio->link_changes++;
2200 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
2202 /* Sending command to firmware to enable Rx checksum offload
2203 * by default at the time of setup of Liquidio driver for
2206 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2207 OCTNET_CMD_RXCSUM_ENABLE);
2208 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
2209 OCTNET_CMD_TXCSUM_ENABLE);
2211 dev_dbg(&octeon_dev->pci_dev->dev,
2212 "NIC ifidx:%d Setup successful\n", i);
2214 octeon_dev->no_speed_setting = 1;
2222 dev_err(&octeon_dev->pci_dev->dev,
2223 "NIC ifidx:%d Setup failed\n", i);
2224 liquidio_destroy_nic_device(octeon_dev, i);
2233 * \brief initialize the NIC
2234 * @param oct octeon device
2236 * This initialization routine is called once the Octeon device application is
2239 static int liquidio_init_nic_module(struct octeon_device *oct)
2241 int num_nic_ports = 1;
2244 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
2246 /* only default iq and oq were initialized
2247 * initialize the rest as well run port_config command for each port
2249 oct->ifcount = num_nic_ports;
2250 memset(oct->props, 0,
2251 sizeof(struct octdev_props) * num_nic_ports);
2253 for (i = 0; i < MAX_OCTEON_LINKS; i++)
2254 oct->props[i].gmxport = -1;
2256 retval = setup_nic_devices(oct);
2258 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
2259 goto octnet_init_failure;
2262 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
2266 octnet_init_failure:
2274 * \brief Device initialization for each Octeon device that is probed
2275 * @param octeon_dev octeon device
2277 static int octeon_device_init(struct octeon_device *oct)
2282 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
2284 /* Enable access to the octeon device and make its DMA capability
2287 if (octeon_pci_os_setup(oct))
2289 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
2291 oct->chip_id = OCTEON_CN23XX_VF_VID;
2292 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
2293 oct->rev_id = rev_id & 0xff;
2295 if (cn23xx_setup_octeon_vf_device(oct))
2298 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
2300 oct->app_mode = CVM_DRV_NIC_APP;
2302 /* Initialize the dispatch mechanism used to push packets arriving on
2303 * Octeon Output queues.
2305 if (octeon_init_dispatch_list(oct))
2308 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
2310 if (octeon_set_io_queues_off(oct)) {
2311 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
2315 if (oct->fn_list.setup_device_regs(oct)) {
2316 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
2320 /* Initialize soft command buffer pool */
2321 if (octeon_setup_sc_buffer_pool(oct)) {
2322 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
2325 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
2327 /* Setup the data structures that manage this Octeon's Input queues. */
2328 if (octeon_setup_instr_queues(oct)) {
2329 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
2332 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
2334 /* Initialize lists to manage the requests of different types that
2335 * arrive from user & kernel applications for this octeon device.
2337 if (octeon_setup_response_list(oct)) {
2338 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
2341 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
2343 if (octeon_setup_output_queues(oct)) {
2344 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
2347 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
2349 if (oct->fn_list.setup_mbox(oct)) {
2350 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
2353 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
2355 if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
2356 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
2359 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
2361 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n",
2362 oct->sriov_info.rings_per_vf);
2364 /* Setup the interrupt handler and record the INT SUM register address*/
2365 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
2368 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
2370 /* ***************************************************************
2371 * The interrupts need to be enabled for the PF<-->VF handshake.
2372 * They are [re]-enabled after the PF<-->VF handshake so that the
2373 * correct OQ tick value is used (i.e. the value retrieved from
2374 * the PF as part of the handshake).
2377 /* Enable Octeon device interrupts */
2378 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2380 if (cn23xx_octeon_pfvf_handshake(oct))
2383 /* Here we [re]-enable the interrupts so that the correct OQ tick value
2384 * is used (i.e. the value that was retrieved during the handshake)
2387 /* Enable Octeon device interrupts */
2388 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2389 /* *************************************************************** */
2391 /* Enable the input and output queues for this Octeon device */
2392 if (oct->fn_list.enable_io_queues(oct)) {
2393 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
2397 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
2399 atomic_set(&oct->status, OCT_DEV_HOST_OK);
2401 /* Send Credit for Octeon Output queues. Credits are always sent after
2402 * the output queue is enabled.
2404 for (j = 0; j < oct->num_oqs; j++)
2405 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
2407 /* Packets can start arriving on the output queues from this point. */
2409 atomic_set(&oct->status, OCT_DEV_CORE_OK);
2411 atomic_set(&oct->status, OCT_DEV_RUNNING);
2413 if (liquidio_init_nic_module(oct))
2419 static int __init liquidio_vf_init(void)
2421 octeon_init_device_list(0);
2422 return pci_register_driver(&liquidio_vf_pci_driver);
2425 static void __exit liquidio_vf_exit(void)
2427 pci_unregister_driver(&liquidio_vf_pci_driver);
2429 pr_info("LiquidIO_VF network module is now unloaded\n");
2432 module_init(liquidio_vf_init);
2433 module_exit(liquidio_vf_exit);