1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
29 /* OOM task polling interval */
30 #define LIO_OOM_POLL_INTERVAL_MS 250
32 #define OCTNIC_MAX_SG MAX_SKB_FRAGS
35 * \brief Delete gather lists
36 * @param lio per-network private data
38 void lio_delete_glists(struct lio *lio)
40 struct octnic_gather *g;
43 kfree(lio->glist_lock);
44 lio->glist_lock = NULL;
49 for (i = 0; i < lio->oct_dev->num_iqs; i++) {
51 g = (struct octnic_gather *)
52 lio_list_delete_head(&lio->glist[i]);
56 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
57 lio->glists_dma_base && lio->glists_dma_base[i]) {
58 lio_dma_free(lio->oct_dev,
59 lio->glist_entry_size * lio->tx_qsize,
60 lio->glists_virt_base[i],
61 lio->glists_dma_base[i]);
65 kfree(lio->glists_virt_base);
66 lio->glists_virt_base = NULL;
68 kfree(lio->glists_dma_base);
69 lio->glists_dma_base = NULL;
76 * \brief Setup gather lists
77 * @param lio per-network private data
79 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
81 struct octnic_gather *g;
85 kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
90 kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
92 kfree(lio->glist_lock);
93 lio->glist_lock = NULL;
97 lio->glist_entry_size =
98 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
100 /* allocate memory to store virtual and dma base address of
101 * per glist consistent memory
103 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
105 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
108 if (!lio->glists_virt_base || !lio->glists_dma_base) {
109 lio_delete_glists(lio);
113 for (i = 0; i < num_iqs; i++) {
114 int numa_node = dev_to_node(&oct->pci_dev->dev);
116 spin_lock_init(&lio->glist_lock[i]);
118 INIT_LIST_HEAD(&lio->glist[i]);
120 lio->glists_virt_base[i] =
122 lio->glist_entry_size * lio->tx_qsize,
123 &lio->glists_dma_base[i]);
125 if (!lio->glists_virt_base[i]) {
126 lio_delete_glists(lio);
130 for (j = 0; j < lio->tx_qsize; j++) {
131 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
134 g = kzalloc(sizeof(*g), GFP_KERNEL);
138 g->sg = lio->glists_virt_base[i] +
139 (j * lio->glist_entry_size);
141 g->sg_dma_ptr = lio->glists_dma_base[i] +
142 (j * lio->glist_entry_size);
144 list_add_tail(&g->list, &lio->glist[i]);
147 if (j != lio->tx_qsize) {
148 lio_delete_glists(lio);
156 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
158 struct lio *lio = GET_LIO(netdev);
159 struct octeon_device *oct = lio->oct_dev;
160 struct octnic_ctrl_pkt nctrl;
163 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
166 nctrl.ncmd.s.cmd = cmd;
167 nctrl.ncmd.s.param1 = param1;
168 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
169 nctrl.netpndev = (u64)netdev;
170 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
172 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
174 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
182 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
183 unsigned int bytes_compl)
185 struct netdev_queue *netdev_queue = txq;
187 netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
190 void octeon_update_tx_completion_counters(void *buf, int reqtype,
191 unsigned int *pkts_compl,
192 unsigned int *bytes_compl)
194 struct octnet_buf_free_info *finfo;
195 struct sk_buff *skb = NULL;
196 struct octeon_soft_command *sc;
199 case REQTYPE_NORESP_NET:
200 case REQTYPE_NORESP_NET_SG:
205 case REQTYPE_RESP_NET_SG:
206 case REQTYPE_RESP_NET:
208 skb = sc->callback_arg;
216 *bytes_compl += skb->len;
219 int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
221 struct octnet_buf_free_info *finfo;
223 struct octeon_soft_command *sc;
224 struct netdev_queue *txq;
227 case REQTYPE_NORESP_NET:
228 case REQTYPE_NORESP_NET_SG:
233 case REQTYPE_RESP_NET_SG:
234 case REQTYPE_RESP_NET:
236 skb = sc->callback_arg;
243 txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
244 netdev_tx_sent_queue(txq, skb->len);
246 return netif_xmit_stopped(txq);
249 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
251 struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
252 struct net_device *netdev = (struct net_device *)nctrl->netpndev;
253 struct lio *lio = GET_LIO(netdev);
254 struct octeon_device *oct = lio->oct_dev;
257 if (nctrl->sc_status)
260 switch (nctrl->ncmd.s.cmd) {
261 case OCTNET_CMD_CHANGE_DEVFLAGS:
262 case OCTNET_CMD_SET_MULTI_LIST:
263 case OCTNET_CMD_SET_UC_LIST:
266 case OCTNET_CMD_CHANGE_MACADDR:
267 mac = ((u8 *)&nctrl->udd[0]) + 2;
268 if (nctrl->ncmd.s.param1) {
269 /* vfidx is 0 based, but vf_num (param1) is 1 based */
270 int vfidx = nctrl->ncmd.s.param1 - 1;
271 bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
273 if (mac_is_admin_assigned)
274 netif_info(lio, probe, lio->netdev,
275 "MAC Address %pM is configured for VF %d\n",
278 netif_info(lio, probe, lio->netdev,
279 " MACAddr changed to %pM\n",
284 case OCTNET_CMD_GPIO_ACCESS:
285 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
289 case OCTNET_CMD_ID_ACTIVE:
290 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
294 case OCTNET_CMD_LRO_ENABLE:
295 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
298 case OCTNET_CMD_LRO_DISABLE:
299 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
303 case OCTNET_CMD_VERBOSE_ENABLE:
304 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
308 case OCTNET_CMD_VERBOSE_DISABLE:
309 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
313 case OCTNET_CMD_VLAN_FILTER_CTL:
314 if (nctrl->ncmd.s.param1)
315 dev_info(&oct->pci_dev->dev,
316 "%s VLAN filter enabled\n", netdev->name);
318 dev_info(&oct->pci_dev->dev,
319 "%s VLAN filter disabled\n", netdev->name);
322 case OCTNET_CMD_ADD_VLAN_FILTER:
323 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
324 netdev->name, nctrl->ncmd.s.param1);
327 case OCTNET_CMD_DEL_VLAN_FILTER:
328 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
329 netdev->name, nctrl->ncmd.s.param1);
332 case OCTNET_CMD_SET_SETTINGS:
333 dev_info(&oct->pci_dev->dev, "%s settings changed\n",
338 /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
339 * Command passed by NIC driver
341 case OCTNET_CMD_TNL_RX_CSUM_CTL:
342 if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
343 netif_info(lio, probe, lio->netdev,
344 "RX Checksum Offload Enabled\n");
345 } else if (nctrl->ncmd.s.param1 ==
346 OCTNET_CMD_RXCSUM_DISABLE) {
347 netif_info(lio, probe, lio->netdev,
348 "RX Checksum Offload Disabled\n");
352 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
353 * Command passed by NIC driver
355 case OCTNET_CMD_TNL_TX_CSUM_CTL:
356 if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
357 netif_info(lio, probe, lio->netdev,
358 "TX Checksum Offload Enabled\n");
359 } else if (nctrl->ncmd.s.param1 ==
360 OCTNET_CMD_TXCSUM_DISABLE) {
361 netif_info(lio, probe, lio->netdev,
362 "TX Checksum Offload Disabled\n");
366 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
367 * Command passed by NIC driver
369 case OCTNET_CMD_VXLAN_PORT_CONFIG:
370 if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
371 netif_info(lio, probe, lio->netdev,
372 "VxLAN Destination UDP PORT:%d ADDED\n",
373 nctrl->ncmd.s.param1);
374 } else if (nctrl->ncmd.s.more ==
375 OCTNET_CMD_VXLAN_PORT_DEL) {
376 netif_info(lio, probe, lio->netdev,
377 "VxLAN Destination UDP PORT:%d DELETED\n",
378 nctrl->ncmd.s.param1);
382 case OCTNET_CMD_SET_FLOW_CTL:
383 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
386 case OCTNET_CMD_QUEUE_COUNT_CTL:
387 netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
388 nctrl->ncmd.s.param1);
392 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
397 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
399 bool macaddr_changed = false;
400 struct net_device *netdev;
405 netdev = oct->props[0].netdev;
406 lio = GET_LIO(netdev);
408 lio->linfo.macaddr_is_admin_asgnd = true;
410 if (!ether_addr_equal(netdev->dev_addr, mac)) {
411 macaddr_changed = true;
412 ether_addr_copy(netdev->dev_addr, mac);
413 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
414 call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
420 dev_info(&oct->pci_dev->dev,
421 "PF changed VF's MAC address to %pM\n", mac);
423 /* no need to notify the firmware of the macaddr change because
424 * the PF did that already
428 static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
430 struct cavium_wk *wk = (struct cavium_wk *)work;
431 struct lio *lio = (struct lio *)wk->ctxptr;
432 struct octeon_device *oct = lio->oct_dev;
433 struct octeon_droq *droq;
436 if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
437 for (q = 0; q < lio->linfo.num_rxpciq; q++) {
438 q_no = lio->linfo.rxpciq[q].s.q_no;
439 droq = oct->droq[q_no];
442 octeon_droq_check_oom(droq);
445 queue_delayed_work(lio->rxq_status_wq.wq,
446 &lio->rxq_status_wq.wk.work,
447 msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
450 int setup_rx_oom_poll_fn(struct net_device *netdev)
452 struct lio *lio = GET_LIO(netdev);
453 struct octeon_device *oct = lio->oct_dev;
455 lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
457 if (!lio->rxq_status_wq.wq) {
458 dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
461 INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
462 octnet_poll_check_rxq_oom_status);
463 lio->rxq_status_wq.wk.ctxptr = lio;
464 queue_delayed_work(lio->rxq_status_wq.wq,
465 &lio->rxq_status_wq.wk.work,
466 msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
470 void cleanup_rx_oom_poll_fn(struct net_device *netdev)
472 struct lio *lio = GET_LIO(netdev);
474 if (lio->rxq_status_wq.wq) {
475 cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
476 flush_workqueue(lio->rxq_status_wq.wq);
477 destroy_workqueue(lio->rxq_status_wq.wq);
481 /* Runs in interrupt context. */
482 static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
484 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
485 struct net_device *netdev;
488 netdev = oct->props[iq->ifidx].netdev;
490 /* This is needed because the first IQ does not have
491 * a netdev associated with it.
496 lio = GET_LIO(netdev);
497 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
498 lio->linfo.link.s.link_up &&
499 (!octnet_iq_is_full(oct, iq_num))) {
500 netif_wake_subqueue(netdev, iq->q_index);
501 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
507 * \brief Setup output queue
508 * @param oct octeon device
509 * @param q_no which queue
510 * @param num_descs how many descriptors
511 * @param desc_size size of each descriptor
512 * @param app_ctx application context
514 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
515 int desc_size, void *app_ctx)
519 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
520 /* droq creation and local register settings. */
521 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
526 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
530 /* Enable the droq queues */
531 octeon_set_droq_pkt_op(oct, q_no, 1);
533 /* Send Credit for Octeon Output queues. Credits are always
534 * sent after the output queue is enabled.
536 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
541 /** Routine to push packets arriving on Octeon interface upto network layer.
542 * @param oct_id - octeon device id.
543 * @param skbuff - skbuff struct to be passed to network layer.
544 * @param len - size of total data received.
545 * @param rh - Control header associated with the packet
546 * @param param - additional control data with the packet
547 * @param arg - farg registered in droq_ops
550 liquidio_push_packet(u32 octeon_id __attribute__((unused)),
557 struct net_device *netdev = (struct net_device *)arg;
558 struct octeon_droq *droq =
559 container_of(param, struct octeon_droq, napi);
560 struct sk_buff *skb = (struct sk_buff *)skbuff;
561 struct skb_shared_hwtstamps *shhwtstamps;
562 struct napi_struct *napi = param;
568 struct lio *lio = GET_LIO(netdev);
569 struct octeon_device *oct = lio->oct_dev;
571 /* Do not proceed if the interface is not in RUNNING state. */
572 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
573 recv_buffer_free(skb);
574 droq->stats.rx_dropped++;
580 skb_record_rx_queue(skb, droq->q_no);
581 if (likely(len > MIN_SKB_SIZE)) {
582 struct octeon_skb_page_info *pg_info;
585 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
587 /* For Paged allocation use the frags */
588 va = page_address(pg_info->page) +
589 pg_info->page_offset;
590 memcpy(skb->data, va, MIN_SKB_SIZE);
591 skb_put(skb, MIN_SKB_SIZE);
592 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
594 pg_info->page_offset +
600 struct octeon_skb_page_info *pg_info =
601 ((struct octeon_skb_page_info *)(skb->cb));
602 skb_copy_to_linear_data(skb, page_address(pg_info->page)
603 + pg_info->page_offset, len);
605 put_page(pg_info->page);
608 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
610 if (oct->ptp_enable) {
611 if (rh->r_dh.has_hwtstamp) {
612 /* timestamp is included from the hardware at
613 * the beginning of the packet.
617 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
618 /* Nanoseconds are in the first 64-bits
621 memcpy(&ns, (skb->data + r_dh_off),
623 r_dh_off -= BYTES_PER_DHLEN_UNIT;
624 shhwtstamps = skb_hwtstamps(skb);
625 shhwtstamps->hwtstamp =
632 if (rh->r_dh.has_hash) {
633 __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
634 u32 hash = be32_to_cpu(*hash_be);
636 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
637 r_dh_off -= BYTES_PER_DHLEN_UNIT;
640 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
641 skb->protocol = eth_type_trans(skb, skb->dev);
643 if ((netdev->features & NETIF_F_RXCSUM) &&
644 (((rh->r_dh.encap_on) &&
645 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
646 (!(rh->r_dh.encap_on) &&
647 (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
648 /* checksum has already been verified */
649 skb->ip_summed = CHECKSUM_UNNECESSARY;
651 skb->ip_summed = CHECKSUM_NONE;
653 /* Setting Encapsulation field on basis of status received
656 if (rh->r_dh.encap_on) {
657 skb->encapsulation = 1;
659 droq->stats.rx_vxlan++;
662 /* inbound VLAN tag */
663 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
665 u16 priority = rh->r_dh.priority;
666 u16 vid = rh->r_dh.vlan;
668 vtag = (priority << VLAN_PRIO_SHIFT) | vid;
669 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
672 napi_gro_receive(napi, skb);
674 droq->stats.rx_bytes_received += len -
675 rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
676 droq->stats.rx_pkts_received++;
678 recv_buffer_free(skb);
683 * \brief wrapper for calling napi_schedule
684 * @param param parameters to pass to napi_schedule
686 * Used when scheduling on different CPUs
688 static void napi_schedule_wrapper(void *param)
690 struct napi_struct *napi = param;
696 * \brief callback when receive interrupt occurs and we are in NAPI mode
697 * @param arg pointer to octeon output queue
699 static void liquidio_napi_drv_callback(void *arg)
701 struct octeon_device *oct;
702 struct octeon_droq *droq = arg;
703 int this_cpu = smp_processor_id();
707 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
708 droq->cpu_id == this_cpu) {
709 napi_schedule_irqoff(&droq->napi);
711 call_single_data_t *csd = &droq->csd;
713 csd->func = napi_schedule_wrapper;
714 csd->info = &droq->napi;
717 smp_call_function_single_async(droq->cpu_id, csd);
722 * \brief Entry point for NAPI polling
723 * @param napi NAPI structure
724 * @param budget maximum number of items to process
726 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
728 struct octeon_instr_queue *iq;
729 struct octeon_device *oct;
730 struct octeon_droq *droq;
731 int tx_done = 0, iq_no;
734 droq = container_of(napi, struct octeon_droq, napi);
738 /* Handle Droq descriptors */
739 work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
741 /* Flush the instruction queue */
742 iq = oct->instr_queue[iq_no];
744 /* TODO: move this check to inside octeon_flush_iq,
745 * once check_db_timeout is removed
747 if (atomic_read(&iq->instr_pending))
748 /* Process iq buffers with in the budget limits */
749 tx_done = octeon_flush_iq(oct, iq, budget);
752 /* Update iq read-index rather than waiting for next interrupt.
753 * Return back if tx_done is false.
755 /* sub-queue status update */
756 lio_update_txq_status(oct, iq_no);
758 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
762 #define MAX_REG_CNT 2000000U
763 /* force enable interrupt if reg cnts are high to avoid wraparound */
764 if ((work_done < budget && tx_done) ||
765 (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
766 (droq->pkt_count >= MAX_REG_CNT)) {
768 napi_complete_done(napi, work_done);
770 octeon_enable_irq(droq->oct_dev, droq->q_no);
774 return (!tx_done) ? (budget) : (work_done);
778 * \brief Setup input and output queues
779 * @param octeon_dev octeon device
780 * @param ifidx Interface index
782 * Note: Queues are with respect to the octeon device. Thus
783 * an input queue is for egress packets, and output queues
784 * are for ingress packets.
786 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
787 u32 num_iqs, u32 num_oqs)
789 struct octeon_droq_ops droq_ops;
790 struct net_device *netdev;
791 struct octeon_droq *droq;
792 struct napi_struct *napi;
800 netdev = octeon_dev->props[ifidx].netdev;
802 lio = GET_LIO(netdev);
804 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
806 droq_ops.fptr = liquidio_push_packet;
807 droq_ops.farg = netdev;
809 droq_ops.poll_mode = 1;
810 droq_ops.napi_fn = liquidio_napi_drv_callback;
812 cpu_id_modulus = num_present_cpus();
815 for (q = 0; q < num_oqs; q++) {
816 q_no = lio->linfo.rxpciq[q].s.q_no;
817 dev_dbg(&octeon_dev->pci_dev->dev,
818 "%s index:%d linfo.rxpciq.s.q_no:%d\n",
820 retval = octeon_setup_droq(
822 CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
824 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
828 dev_err(&octeon_dev->pci_dev->dev,
829 "%s : Runtime DROQ(RxQ) creation failed.\n",
834 droq = octeon_dev->droq[q_no];
836 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
837 (u64)netdev, (u64)octeon_dev);
838 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
840 /* designate a CPU for this droq */
841 droq->cpu_id = cpu_id;
843 if (cpu_id >= cpu_id_modulus)
846 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
849 if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
850 /* 23XX PF/VF can send/recv control messages (via the first
851 * PF/VF-owned droq) from the firmware even if the ethX
852 * interface is down, so that's why poll_mode must be off
853 * for the first droq.
855 octeon_dev->droq[0]->ops.poll_mode = 0;
859 for (q = 0; q < num_iqs; q++) {
860 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
861 octeon_get_conf(octeon_dev), lio->ifidx);
862 retval = octeon_setup_iq(octeon_dev, ifidx, q,
863 lio->linfo.txpciq[q], num_tx_descs,
864 netdev_get_tx_queue(netdev, q));
866 dev_err(&octeon_dev->pci_dev->dev,
867 " %s : Runtime IQ(TxQ) creation failed.\n",
873 if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
874 octeon_dev->ioq_vector) {
875 struct octeon_ioq_vector *ioq_vector;
877 ioq_vector = &octeon_dev->ioq_vector[q];
878 netif_set_xps_queue(netdev,
879 &ioq_vector->affinity_mask,
880 ioq_vector->iq_index);
888 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
890 struct octeon_device *oct = droq->oct_dev;
891 struct octeon_device_priv *oct_priv =
892 (struct octeon_device_priv *)oct->priv;
894 if (droq->ops.poll_mode) {
895 droq->ops.napi_fn(droq);
897 if (ret & MSIX_PO_INT) {
898 if (OCTEON_CN23XX_VF(oct))
899 dev_err(&oct->pci_dev->dev,
900 "should not come here should not get rx when poll mode = 0 for vf\n");
901 tasklet_schedule(&oct_priv->droq_tasklet);
904 /* this will be flushed periodically by check iq db */
905 if (ret & MSIX_PI_INT)
913 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
915 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
916 struct octeon_device *oct = ioq_vector->oct_dev;
917 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
920 ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
922 if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
923 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
929 * \brief Droq packet processor sceduler
930 * @param oct octeon device
932 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
934 struct octeon_device_priv *oct_priv =
935 (struct octeon_device_priv *)oct->priv;
936 struct octeon_droq *droq;
939 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
940 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
942 if (!(oct->droq_intr & BIT_ULL(oq_no)))
945 droq = oct->droq[oq_no];
947 if (droq->ops.poll_mode) {
948 droq->ops.napi_fn(droq);
949 oct_priv->napi_mask |= (1 << oq_no);
951 tasklet_schedule(&oct_priv->droq_tasklet);
958 * \brief Interrupt handler for octeon
960 * @param dev octeon device
963 irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
966 struct octeon_device *oct = (struct octeon_device *)dev;
969 /* Disable our interrupts for the duration of ISR */
970 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
972 ret = oct->fn_list.process_interrupt_regs(oct);
974 if (ret == IRQ_HANDLED)
975 liquidio_schedule_droq_pkt_handlers(oct);
977 /* Re-enable our interrupts */
978 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
979 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
985 * \brief Setup interrupt for octeon device
986 * @param oct octeon device
988 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
990 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
992 struct msix_entry *msix_entries;
993 char *queue_irq_names = NULL;
994 int i, num_interrupts = 0;
995 int num_alloc_ioq_vectors;
996 char *aux_irq_name = NULL;
1001 oct->num_msix_irqs = num_ioqs;
1002 if (OCTEON_CN23XX_PF(oct)) {
1003 num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
1005 /* one non ioq interrupt for handling
1006 * sli_mac_pf_int_sum
1008 oct->num_msix_irqs += 1;
1009 } else if (OCTEON_CN23XX_VF(oct)) {
1010 num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
1013 /* allocate storage for the names assigned to each irq */
1014 oct->irq_name_storage =
1015 kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
1016 if (!oct->irq_name_storage) {
1017 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1021 queue_irq_names = oct->irq_name_storage;
1023 if (OCTEON_CN23XX_PF(oct))
1024 aux_irq_name = &queue_irq_names
1025 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1027 oct->msix_entries = kcalloc(oct->num_msix_irqs,
1028 sizeof(struct msix_entry),
1030 if (!oct->msix_entries) {
1031 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1032 kfree(oct->irq_name_storage);
1033 oct->irq_name_storage = NULL;
1037 msix_entries = (struct msix_entry *)oct->msix_entries;
1039 /*Assumption is that pf msix vectors start from pf srn to pf to
1040 * trs and not from 0. if not change this code
1042 if (OCTEON_CN23XX_PF(oct)) {
1043 for (i = 0; i < oct->num_msix_irqs - 1; i++)
1044 msix_entries[i].entry =
1045 oct->sriov_info.pf_srn + i;
1047 msix_entries[oct->num_msix_irqs - 1].entry =
1048 oct->sriov_info.trs;
1049 } else if (OCTEON_CN23XX_VF(oct)) {
1050 for (i = 0; i < oct->num_msix_irqs; i++)
1051 msix_entries[i].entry = i;
1053 num_alloc_ioq_vectors = pci_enable_msix_range(
1054 oct->pci_dev, msix_entries,
1056 oct->num_msix_irqs);
1057 if (num_alloc_ioq_vectors < 0) {
1058 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1059 kfree(oct->msix_entries);
1060 oct->msix_entries = NULL;
1061 kfree(oct->irq_name_storage);
1062 oct->irq_name_storage = NULL;
1063 return num_alloc_ioq_vectors;
1066 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1068 num_ioq_vectors = oct->num_msix_irqs;
1069 /** For PF, there is one non-ioq interrupt handler */
1070 if (OCTEON_CN23XX_PF(oct)) {
1071 num_ioq_vectors -= 1;
1073 snprintf(aux_irq_name, INTRNAMSIZ,
1074 "LiquidIO%u-pf%u-aux", oct->octeon_id,
1076 irqret = request_irq(
1077 msix_entries[num_ioq_vectors].vector,
1078 liquidio_legacy_intr_handler, 0,
1081 dev_err(&oct->pci_dev->dev,
1082 "Request_irq failed for MSIX interrupt Error: %d\n",
1084 pci_disable_msix(oct->pci_dev);
1085 kfree(oct->msix_entries);
1086 kfree(oct->irq_name_storage);
1087 oct->irq_name_storage = NULL;
1088 oct->msix_entries = NULL;
1092 for (i = 0 ; i < num_ioq_vectors ; i++) {
1093 if (OCTEON_CN23XX_PF(oct))
1094 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1095 INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1096 oct->octeon_id, oct->pf_num, i);
1098 if (OCTEON_CN23XX_VF(oct))
1099 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1100 INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1101 oct->octeon_id, oct->vf_num, i);
1103 irqret = request_irq(msix_entries[i].vector,
1104 liquidio_msix_intr_handler, 0,
1105 &queue_irq_names[IRQ_NAME_OFF(i)],
1106 &oct->ioq_vector[i]);
1109 dev_err(&oct->pci_dev->dev,
1110 "Request_irq failed for MSIX interrupt Error: %d\n",
1112 /** Freeing the non-ioq irq vector here . */
1113 free_irq(msix_entries[num_ioq_vectors].vector,
1118 /** clearing affinity mask. */
1119 irq_set_affinity_hint(
1120 msix_entries[i].vector,
1122 free_irq(msix_entries[i].vector,
1123 &oct->ioq_vector[i]);
1125 pci_disable_msix(oct->pci_dev);
1126 kfree(oct->msix_entries);
1127 kfree(oct->irq_name_storage);
1128 oct->irq_name_storage = NULL;
1129 oct->msix_entries = NULL;
1132 oct->ioq_vector[i].vector = msix_entries[i].vector;
1133 /* assign the cpu mask for this msix interrupt vector */
1134 irq_set_affinity_hint(msix_entries[i].vector,
1135 &oct->ioq_vector[i].affinity_mask
1138 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1141 err = pci_enable_msi(oct->pci_dev);
1143 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1146 oct->flags |= LIO_FLAG_MSI_ENABLED;
1148 /* allocate storage for the names assigned to the irq */
1149 oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
1150 if (!oct->irq_name_storage)
1153 queue_irq_names = oct->irq_name_storage;
1155 if (OCTEON_CN23XX_PF(oct))
1156 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1157 "LiquidIO%u-pf%u-rxtx-%u",
1158 oct->octeon_id, oct->pf_num, 0);
1160 if (OCTEON_CN23XX_VF(oct))
1161 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1162 "LiquidIO%u-vf%u-rxtx-%u",
1163 oct->octeon_id, oct->vf_num, 0);
1165 irqret = request_irq(oct->pci_dev->irq,
1166 liquidio_legacy_intr_handler,
1168 &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1170 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1171 pci_disable_msi(oct->pci_dev);
1172 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1174 kfree(oct->irq_name_storage);
1175 oct->irq_name_storage = NULL;
1183 * \brief Net device change_mtu
1184 * @param netdev network device
1186 int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
1188 struct lio *lio = GET_LIO(netdev);
1189 struct octeon_device *oct = lio->oct_dev;
1190 struct octeon_soft_command *sc;
1191 union octnet_cmd *ncmd;
1194 sc = (struct octeon_soft_command *)
1195 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
1197 ncmd = (union octnet_cmd *)sc->virtdptr;
1199 init_completion(&sc->complete);
1200 sc->sc_status = OCTEON_REQUEST_PENDING;
1203 ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
1204 ncmd->s.param1 = new_mtu;
1206 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1208 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1210 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1211 OPCODE_NIC_CMD, 0, 0, 0);
1213 ret = octeon_send_soft_command(oct, sc);
1214 if (ret == IQ_SEND_FAILED) {
1215 netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
1216 octeon_free_soft_command(oct, sc);
1219 /* Sleep on a wait queue till the cond flag indicates that the
1220 * response arrived or timed-out.
1222 ret = wait_for_sc_completion_timeout(oct, sc, 0);
1226 if (sc->sc_status) {
1227 WRITE_ONCE(sc->caller_is_done, true);
1231 netdev->mtu = new_mtu;
1234 WRITE_ONCE(sc->caller_is_done, true);
1238 int lio_wait_for_clean_oq(struct octeon_device *oct)
1240 int retry = 100, pending_pkts = 0;
1246 for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
1247 if (!(oct->io_qmask.oq & BIT_ULL(idx)))
1250 atomic_read(&oct->droq[idx]->pkts_pending);
1253 if (pending_pkts > 0)
1254 schedule_timeout_uninterruptible(1);
1256 } while (retry-- && pending_pkts);
1258 return pending_pkts;
1262 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1263 u32 status, void *ptr)
1265 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1266 struct oct_nic_stats_resp *resp =
1267 (struct oct_nic_stats_resp *)sc->virtrptr;
1268 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1269 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1270 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1271 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1273 if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1274 octeon_swap_8B_data((u64 *)&resp->stats,
1275 (sizeof(struct oct_link_stats)) >> 3);
1277 /* RX link-level stats */
1278 rstats->total_rcvd = rsp_rstats->total_rcvd;
1279 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1280 rstats->total_bcst = rsp_rstats->total_bcst;
1281 rstats->total_mcst = rsp_rstats->total_mcst;
1282 rstats->runts = rsp_rstats->runts;
1283 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1284 /* Accounts for over/under-run of buffers */
1285 rstats->fifo_err = rsp_rstats->fifo_err;
1286 rstats->dmac_drop = rsp_rstats->dmac_drop;
1287 rstats->fcs_err = rsp_rstats->fcs_err;
1288 rstats->jabber_err = rsp_rstats->jabber_err;
1289 rstats->l2_err = rsp_rstats->l2_err;
1290 rstats->frame_err = rsp_rstats->frame_err;
1291 rstats->red_drops = rsp_rstats->red_drops;
1293 /* RX firmware stats */
1294 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1295 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1296 rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
1297 rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
1298 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1299 rstats->fw_err_link = rsp_rstats->fw_err_link;
1300 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1301 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1302 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1304 /* Number of packets that are LROed */
1305 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1306 /* Number of octets that are LROed */
1307 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1308 /* Number of LRO packets formed */
1309 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1310 /* Number of times lRO of packet aborted */
1311 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1312 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1313 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1314 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1315 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1316 /* intrmod: packet forward rate */
1317 rstats->fwd_rate = rsp_rstats->fwd_rate;
1319 /* TX link-level stats */
1320 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1321 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1322 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1323 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1324 tstats->ctl_sent = rsp_tstats->ctl_sent;
1325 /* Packets sent after one collision*/
1326 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1327 /* Packets sent after multiple collision*/
1328 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1329 /* Packets not sent due to max collisions */
1330 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1331 /* Packets not sent due to max deferrals */
1332 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1333 /* Accounts for over/under-run of buffers */
1334 tstats->fifo_err = rsp_tstats->fifo_err;
1335 tstats->runts = rsp_tstats->runts;
1336 /* Total number of collisions detected */
1337 tstats->total_collisions = rsp_tstats->total_collisions;
1339 /* firmware stats */
1340 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1341 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1342 tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
1343 tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
1344 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1345 tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1346 tstats->fw_err_link = rsp_tstats->fw_err_link;
1347 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1348 tstats->fw_tso = rsp_tstats->fw_tso;
1349 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1350 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1351 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1355 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1360 int lio_fetch_vf_stats(struct lio *lio)
1362 struct octeon_device *oct_dev = lio->oct_dev;
1363 struct octeon_soft_command *sc;
1364 struct oct_nic_vf_stats_resp *resp;
1368 /* Alloc soft command */
1369 sc = (struct octeon_soft_command *)
1370 octeon_alloc_soft_command(oct_dev,
1372 sizeof(struct oct_nic_vf_stats_resp),
1376 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1378 goto lio_fetch_vf_stats_exit;
1381 resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr;
1382 memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp));
1384 init_completion(&sc->complete);
1385 sc->sc_status = OCTEON_REQUEST_PENDING;
1387 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1389 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1390 OPCODE_NIC_VF_PORT_STATS, 0, 0, 0);
1392 retval = octeon_send_soft_command(oct_dev, sc);
1393 if (retval == IQ_SEND_FAILED) {
1394 octeon_free_soft_command(oct_dev, sc);
1395 goto lio_fetch_vf_stats_exit;
1399 wait_for_sc_completion_timeout(oct_dev, sc,
1400 (2 * LIO_SC_MAX_TMO_MS));
1402 dev_err(&oct_dev->pci_dev->dev,
1403 "sc OPCODE_NIC_VF_PORT_STATS command failed\n");
1404 goto lio_fetch_vf_stats_exit;
1407 if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1408 octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt,
1409 (sizeof(u64)) >> 3);
1411 if (resp->spoofmac_cnt != 0) {
1412 dev_warn(&oct_dev->pci_dev->dev,
1413 "%llu Spoofed packets detected\n",
1414 resp->spoofmac_cnt);
1417 WRITE_ONCE(sc->caller_is_done, 1);
1419 lio_fetch_vf_stats_exit:
1423 void lio_fetch_stats(struct work_struct *work)
1425 struct cavium_wk *wk = (struct cavium_wk *)work;
1426 struct lio *lio = wk->ctxptr;
1427 struct octeon_device *oct_dev = lio->oct_dev;
1428 struct octeon_soft_command *sc;
1429 struct oct_nic_stats_resp *resp;
1430 unsigned long time_in_jiffies;
1433 if (OCTEON_CN23XX_PF(oct_dev)) {
1434 /* report spoofchk every 2 seconds */
1435 if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) &&
1436 (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) &&
1437 oct_dev->sriov_info.num_vfs_alloced) {
1438 lio_fetch_vf_stats(lio);
1441 oct_dev->vfstats_poll++;
1444 /* Alloc soft command */
1445 sc = (struct octeon_soft_command *)
1446 octeon_alloc_soft_command(oct_dev,
1448 sizeof(struct oct_nic_stats_resp),
1452 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1453 goto lio_fetch_stats_exit;
1456 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1457 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1459 init_completion(&sc->complete);
1460 sc->sc_status = OCTEON_REQUEST_PENDING;
1462 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1464 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1465 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1467 retval = octeon_send_soft_command(oct_dev, sc);
1468 if (retval == IQ_SEND_FAILED) {
1469 octeon_free_soft_command(oct_dev, sc);
1470 goto lio_fetch_stats_exit;
1473 retval = wait_for_sc_completion_timeout(oct_dev, sc,
1474 (2 * LIO_SC_MAX_TMO_MS));
1476 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1477 goto lio_fetch_stats_exit;
1480 octnet_nic_stats_callback(oct_dev, sc->sc_status, sc);
1481 WRITE_ONCE(sc->caller_is_done, true);
1483 lio_fetch_stats_exit:
1484 time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS);
1485 if (ifstate_check(lio, LIO_IFSTATE_RUNNING))
1486 schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies);
1491 int liquidio_set_speed(struct lio *lio, int speed)
1493 struct octeon_device *oct = lio->oct_dev;
1494 struct oct_nic_seapi_resp *resp;
1495 struct octeon_soft_command *sc;
1496 union octnet_cmd *ncmd;
1500 if (oct->speed_setting == speed)
1503 if (!OCTEON_CN23XX_PF(oct)) {
1504 dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
1509 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1510 sizeof(struct oct_nic_seapi_resp),
1515 ncmd = sc->virtdptr;
1516 resp = sc->virtrptr;
1517 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1519 init_completion(&sc->complete);
1520 sc->sc_status = OCTEON_REQUEST_PENDING;
1523 ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
1524 ncmd->s.param1 = speed;
1526 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1528 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1530 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1531 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1533 retval = octeon_send_soft_command(oct, sc);
1534 if (retval == IQ_SEND_FAILED) {
1535 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1536 octeon_free_soft_command(oct, sc);
1539 /* Wait for response or timeout */
1540 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1544 retval = resp->status;
1547 dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
1549 WRITE_ONCE(sc->caller_is_done, true);
1554 var = be32_to_cpu((__force __be32)resp->speed);
1556 dev_err(&oct->pci_dev->dev,
1557 "%s: setting failed speed= %x, expect %x\n",
1558 __func__, var, speed);
1561 oct->speed_setting = var;
1562 WRITE_ONCE(sc->caller_is_done, true);
1568 int liquidio_get_speed(struct lio *lio)
1570 struct octeon_device *oct = lio->oct_dev;
1571 struct oct_nic_seapi_resp *resp;
1572 struct octeon_soft_command *sc;
1573 union octnet_cmd *ncmd;
1576 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1577 sizeof(struct oct_nic_seapi_resp),
1582 ncmd = sc->virtdptr;
1583 resp = sc->virtrptr;
1584 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1586 init_completion(&sc->complete);
1587 sc->sc_status = OCTEON_REQUEST_PENDING;
1590 ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
1592 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1594 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1596 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1597 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1599 retval = octeon_send_soft_command(oct, sc);
1600 if (retval == IQ_SEND_FAILED) {
1601 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1602 octeon_free_soft_command(oct, sc);
1605 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1609 retval = resp->status;
1611 dev_err(&oct->pci_dev->dev,
1612 "%s failed retval=%d\n", __func__, retval);
1617 var = be32_to_cpu((__force __be32)resp->speed);
1618 oct->speed_setting = var;
1619 if (var == 0xffff) {
1620 /* unable to access boot variables
1621 * get the default value based on the NIC type
1623 if (oct->subsystem_id ==
1624 OCTEON_CN2350_25GB_SUBSYS_ID ||
1625 oct->subsystem_id ==
1626 OCTEON_CN2360_25GB_SUBSYS_ID) {
1627 oct->no_speed_setting = 1;
1628 oct->speed_setting = 25;
1630 oct->speed_setting = 10;
1635 WRITE_ONCE(sc->caller_is_done, true);