liquidio: link and control commands
[linux-2.6-block.git] / drivers / net / ethernet / cavium / liquidio / lio_main.c
1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 *          Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT.  See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/pci.h>
24 #include <linux/firmware.h>
25 #include <linux/ptp_clock_kernel.h>
26 #include <net/vxlan.h>
27 #include "liquidio_common.h"
28 #include "octeon_droq.h"
29 #include "octeon_iq.h"
30 #include "response_manager.h"
31 #include "octeon_device.h"
32 #include "octeon_nic.h"
33 #include "octeon_main.h"
34 #include "octeon_network.h"
35 #include "cn66xx_regs.h"
36 #include "cn66xx_device.h"
37 #include "cn68xx_device.h"
38 #include "cn23xx_pf_device.h"
39 #include "liquidio_image.h"
40
41 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
42 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
43 MODULE_LICENSE("GPL");
44 MODULE_VERSION(LIQUIDIO_VERSION);
45 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
47 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
48
49 static int ddr_timeout = 10000;
50 module_param(ddr_timeout, int, 0644);
51 MODULE_PARM_DESC(ddr_timeout,
52                  "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
53
54 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
55
56 #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count)  \
57         (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
58
59 static int debug = -1;
60 module_param(debug, int, 0644);
61 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
62
63 static char fw_type[LIO_MAX_FW_TYPE_LEN];
64 module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
65 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
66
67 static int conf_type;
68 module_param(conf_type, int, 0);
69 MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
70
71 static int ptp_enable = 1;
72
73 /* Bit mask values for lio->ifstate */
74 #define   LIO_IFSTATE_DROQ_OPS             0x01
75 #define   LIO_IFSTATE_REGISTERED           0x02
76 #define   LIO_IFSTATE_RUNNING              0x04
77 #define   LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
78
79 /* Polling interval for determining when NIC application is alive */
80 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
81
82 /* runtime link query interval */
83 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
84
85 struct liquidio_if_cfg_context {
86         int octeon_id;
87
88         wait_queue_head_t wc;
89
90         int cond;
91 };
92
93 struct liquidio_if_cfg_resp {
94         u64 rh;
95         struct liquidio_if_cfg_info cfg_info;
96         u64 status;
97 };
98
99 struct oct_link_status_resp {
100         u64 rh;
101         struct oct_link_info link_info;
102         u64 status;
103 };
104
105 struct oct_timestamp_resp {
106         u64 rh;
107         u64 timestamp;
108         u64 status;
109 };
110
111 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
112
113 union tx_info {
114         u64 u64;
115         struct {
116 #ifdef __BIG_ENDIAN_BITFIELD
117                 u16 gso_size;
118                 u16 gso_segs;
119                 u32 reserved;
120 #else
121                 u32 reserved;
122                 u16 gso_segs;
123                 u16 gso_size;
124 #endif
125         } s;
126 };
127
128 /** Octeon device properties to be used by the NIC module.
129  * Each octeon device in the system will be represented
130  * by this structure in the NIC module.
131  */
132
133 #define OCTNIC_MAX_SG  (MAX_SKB_FRAGS)
134
135 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
136 #define OCTNIC_GSO_MAX_SIZE                                                    \
137         (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
138
139 /** Structure of a node in list of gather components maintained by
140  * NIC driver for each network device.
141  */
142 struct octnic_gather {
143         /** List manipulation. Next and prev pointers. */
144         struct list_head list;
145
146         /** Size of the gather component at sg in bytes. */
147         int sg_size;
148
149         /** Number of bytes that sg was adjusted to make it 8B-aligned. */
150         int adjust;
151
152         /** Gather component that can accommodate max sized fragment list
153          *  received from the IP layer.
154          */
155         struct octeon_sg_entry *sg;
156
157         u64 sg_dma_ptr;
158 };
159
160 struct handshake {
161         struct completion init;
162         struct completion started;
163         struct pci_dev *pci_dev;
164         int init_ok;
165         int started_ok;
166 };
167
168 struct octeon_device_priv {
169         /** Tasklet structures for this device. */
170         struct tasklet_struct droq_tasklet;
171         unsigned long napi_mask;
172 };
173
174 static int octeon_device_init(struct octeon_device *);
175 static int liquidio_stop(struct net_device *netdev);
176 static void liquidio_remove(struct pci_dev *pdev);
177 static int liquidio_probe(struct pci_dev *pdev,
178                           const struct pci_device_id *ent);
179
180 static struct handshake handshake[MAX_OCTEON_DEVICES];
181 static struct completion first_stage;
182
183 static void octeon_droq_bh(unsigned long pdev)
184 {
185         int q_no;
186         int reschedule = 0;
187         struct octeon_device *oct = (struct octeon_device *)pdev;
188         struct octeon_device_priv *oct_priv =
189                 (struct octeon_device_priv *)oct->priv;
190
191         /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
192         for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
193                 if (!(oct->io_qmask.oq & (1ULL << q_no)))
194                         continue;
195                 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
196                                                           MAX_PACKET_BUDGET);
197                 lio_enable_irq(oct->droq[q_no], NULL);
198
199                 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
200                         /* set time and cnt interrupt thresholds for this DROQ
201                          * for NAPI
202                          */
203                         int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
204
205                         octeon_write_csr64(
206                             oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
207                             0x5700000040ULL);
208                         octeon_write_csr64(
209                             oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
210                 }
211         }
212
213         if (reschedule)
214                 tasklet_schedule(&oct_priv->droq_tasklet);
215 }
216
217 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
218 {
219         struct octeon_device_priv *oct_priv =
220                 (struct octeon_device_priv *)oct->priv;
221         int retry = 100, pkt_cnt = 0, pending_pkts = 0;
222         int i;
223
224         do {
225                 pending_pkts = 0;
226
227                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
228                         if (!(oct->io_qmask.oq & (1ULL << i)))
229                                 continue;
230                         pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
231                 }
232                 if (pkt_cnt > 0) {
233                         pending_pkts += pkt_cnt;
234                         tasklet_schedule(&oct_priv->droq_tasklet);
235                 }
236                 pkt_cnt = 0;
237                 schedule_timeout_uninterruptible(1);
238
239         } while (retry-- && pending_pkts);
240
241         return pkt_cnt;
242 }
243
244 /**
245  * \brief Forces all IO queues off on a given device
246  * @param oct Pointer to Octeon device
247  */
248 static void force_io_queues_off(struct octeon_device *oct)
249 {
250         if ((oct->chip_id == OCTEON_CN66XX) ||
251             (oct->chip_id == OCTEON_CN68XX)) {
252                 /* Reset the Enable bits for Input Queues. */
253                 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
254
255                 /* Reset the Enable bits for Output Queues. */
256                 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
257         }
258 }
259
260 /**
261  * \brief wait for all pending requests to complete
262  * @param oct Pointer to Octeon device
263  *
264  * Called during shutdown sequence
265  */
266 static int wait_for_pending_requests(struct octeon_device *oct)
267 {
268         int i, pcount = 0;
269
270         for (i = 0; i < 100; i++) {
271                 pcount =
272                         atomic_read(&oct->response_list
273                                 [OCTEON_ORDERED_SC_LIST].pending_req_count);
274                 if (pcount)
275                         schedule_timeout_uninterruptible(HZ / 10);
276                 else
277                         break;
278         }
279
280         if (pcount)
281                 return 1;
282
283         return 0;
284 }
285
286 /**
287  * \brief Cause device to go quiet so it can be safely removed/reset/etc
288  * @param oct Pointer to Octeon device
289  */
290 static inline void pcierror_quiesce_device(struct octeon_device *oct)
291 {
292         int i;
293
294         /* Disable the input and output queues now. No more packets will
295          * arrive from Octeon, but we should wait for all packet processing
296          * to finish.
297          */
298         force_io_queues_off(oct);
299
300         /* To allow for in-flight requests */
301         schedule_timeout_uninterruptible(100);
302
303         if (wait_for_pending_requests(oct))
304                 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
305
306         /* Force all requests waiting to be fetched by OCTEON to complete. */
307         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
308                 struct octeon_instr_queue *iq;
309
310                 if (!(oct->io_qmask.iq & (1ULL << i)))
311                         continue;
312                 iq = oct->instr_queue[i];
313
314                 if (atomic_read(&iq->instr_pending)) {
315                         spin_lock_bh(&iq->lock);
316                         iq->fill_cnt = 0;
317                         iq->octeon_read_index = iq->host_write_index;
318                         iq->stats.instr_processed +=
319                                 atomic_read(&iq->instr_pending);
320                         lio_process_iq_request_list(oct, iq, 0);
321                         spin_unlock_bh(&iq->lock);
322                 }
323         }
324
325         /* Force all pending ordered list requests to time out. */
326         lio_process_ordered_list(oct, 1);
327
328         /* We do not need to wait for output queue packets to be processed. */
329 }
330
331 /**
332  * \brief Cleanup PCI AER uncorrectable error status
333  * @param dev Pointer to PCI device
334  */
335 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
336 {
337         int pos = 0x100;
338         u32 status, mask;
339
340         pr_info("%s :\n", __func__);
341
342         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
343         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
344         if (dev->error_state == pci_channel_io_normal)
345                 status &= ~mask;        /* Clear corresponding nonfatal bits */
346         else
347                 status &= mask;         /* Clear corresponding fatal bits */
348         pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
349 }
350
351 /**
352  * \brief Stop all PCI IO to a given device
353  * @param dev Pointer to Octeon device
354  */
355 static void stop_pci_io(struct octeon_device *oct)
356 {
357         /* No more instructions will be forwarded. */
358         atomic_set(&oct->status, OCT_DEV_IN_RESET);
359
360         pci_disable_device(oct->pci_dev);
361
362         /* Disable interrupts  */
363         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
364
365         pcierror_quiesce_device(oct);
366
367         /* Release the interrupt line */
368         free_irq(oct->pci_dev->irq, oct);
369
370         if (oct->flags & LIO_FLAG_MSI_ENABLED)
371                 pci_disable_msi(oct->pci_dev);
372
373         dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
374                 lio_get_state_string(&oct->status));
375
376         /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
377         /* making it a common function for all OCTEON models */
378         cleanup_aer_uncorrect_error_status(oct->pci_dev);
379 }
380
381 /**
382  * \brief called when PCI error is detected
383  * @param pdev Pointer to PCI device
384  * @param state The current pci connection state
385  *
386  * This function is called after a PCI bus error affecting
387  * this device has been detected.
388  */
389 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
390                                                      pci_channel_state_t state)
391 {
392         struct octeon_device *oct = pci_get_drvdata(pdev);
393
394         /* Non-correctable Non-fatal errors */
395         if (state == pci_channel_io_normal) {
396                 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
397                 cleanup_aer_uncorrect_error_status(oct->pci_dev);
398                 return PCI_ERS_RESULT_CAN_RECOVER;
399         }
400
401         /* Non-correctable Fatal errors */
402         dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
403         stop_pci_io(oct);
404
405         /* Always return a DISCONNECT. There is no support for recovery but only
406          * for a clean shutdown.
407          */
408         return PCI_ERS_RESULT_DISCONNECT;
409 }
410
411 /**
412  * \brief mmio handler
413  * @param pdev Pointer to PCI device
414  */
415 static pci_ers_result_t liquidio_pcie_mmio_enabled(
416                                 struct pci_dev *pdev __attribute__((unused)))
417 {
418         /* We should never hit this since we never ask for a reset for a Fatal
419          * Error. We always return DISCONNECT in io_error above.
420          * But play safe and return RECOVERED for now.
421          */
422         return PCI_ERS_RESULT_RECOVERED;
423 }
424
425 /**
426  * \brief called after the pci bus has been reset.
427  * @param pdev Pointer to PCI device
428  *
429  * Restart the card from scratch, as if from a cold-boot. Implementation
430  * resembles the first-half of the octeon_resume routine.
431  */
432 static pci_ers_result_t liquidio_pcie_slot_reset(
433                                 struct pci_dev *pdev __attribute__((unused)))
434 {
435         /* We should never hit this since we never ask for a reset for a Fatal
436          * Error. We always return DISCONNECT in io_error above.
437          * But play safe and return RECOVERED for now.
438          */
439         return PCI_ERS_RESULT_RECOVERED;
440 }
441
442 /**
443  * \brief called when traffic can start flowing again.
444  * @param pdev Pointer to PCI device
445  *
446  * This callback is called when the error recovery driver tells us that
447  * its OK to resume normal operation. Implementation resembles the
448  * second-half of the octeon_resume routine.
449  */
450 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
451 {
452         /* Nothing to be done here. */
453 }
454
455 #ifdef CONFIG_PM
456 /**
457  * \brief called when suspending
458  * @param pdev Pointer to PCI device
459  * @param state state to suspend to
460  */
461 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
462                             pm_message_t state __attribute__((unused)))
463 {
464         return 0;
465 }
466
467 /**
468  * \brief called when resuming
469  * @param pdev Pointer to PCI device
470  */
471 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
472 {
473         return 0;
474 }
475 #endif
476
477 /* For PCI-E Advanced Error Recovery (AER) Interface */
478 static const struct pci_error_handlers liquidio_err_handler = {
479         .error_detected = liquidio_pcie_error_detected,
480         .mmio_enabled   = liquidio_pcie_mmio_enabled,
481         .slot_reset     = liquidio_pcie_slot_reset,
482         .resume         = liquidio_pcie_resume,
483 };
484
485 static const struct pci_device_id liquidio_pci_tbl[] = {
486         {       /* 68xx */
487                 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
488         },
489         {       /* 66xx */
490                 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
491         },
492         {       /* 23xx pf */
493                 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
494         },
495         {
496                 0, 0, 0, 0, 0, 0, 0
497         }
498 };
499 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
500
501 static struct pci_driver liquidio_pci_driver = {
502         .name           = "LiquidIO",
503         .id_table       = liquidio_pci_tbl,
504         .probe          = liquidio_probe,
505         .remove         = liquidio_remove,
506         .err_handler    = &liquidio_err_handler,    /* For AER */
507
508 #ifdef CONFIG_PM
509         .suspend        = liquidio_suspend,
510         .resume         = liquidio_resume,
511 #endif
512 };
513
514 /**
515  * \brief register PCI driver
516  */
517 static int liquidio_init_pci(void)
518 {
519         return pci_register_driver(&liquidio_pci_driver);
520 }
521
522 /**
523  * \brief unregister PCI driver
524  */
525 static void liquidio_deinit_pci(void)
526 {
527         pci_unregister_driver(&liquidio_pci_driver);
528 }
529
530 /**
531  * \brief check interface state
532  * @param lio per-network private data
533  * @param state_flag flag state to check
534  */
535 static inline int ifstate_check(struct lio *lio, int state_flag)
536 {
537         return atomic_read(&lio->ifstate) & state_flag;
538 }
539
540 /**
541  * \brief set interface state
542  * @param lio per-network private data
543  * @param state_flag flag state to set
544  */
545 static inline void ifstate_set(struct lio *lio, int state_flag)
546 {
547         atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
548 }
549
550 /**
551  * \brief clear interface state
552  * @param lio per-network private data
553  * @param state_flag flag state to clear
554  */
555 static inline void ifstate_reset(struct lio *lio, int state_flag)
556 {
557         atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
558 }
559
560 /**
561  * \brief Stop Tx queues
562  * @param netdev network device
563  */
564 static inline void txqs_stop(struct net_device *netdev)
565 {
566         if (netif_is_multiqueue(netdev)) {
567                 int i;
568
569                 for (i = 0; i < netdev->num_tx_queues; i++)
570                         netif_stop_subqueue(netdev, i);
571         } else {
572                 netif_stop_queue(netdev);
573         }
574 }
575
576 /**
577  * \brief Start Tx queues
578  * @param netdev network device
579  */
580 static inline void txqs_start(struct net_device *netdev)
581 {
582         if (netif_is_multiqueue(netdev)) {
583                 int i;
584
585                 for (i = 0; i < netdev->num_tx_queues; i++)
586                         netif_start_subqueue(netdev, i);
587         } else {
588                 netif_start_queue(netdev);
589         }
590 }
591
592 /**
593  * \brief Wake Tx queues
594  * @param netdev network device
595  */
596 static inline void txqs_wake(struct net_device *netdev)
597 {
598         struct lio *lio = GET_LIO(netdev);
599
600         if (netif_is_multiqueue(netdev)) {
601                 int i;
602
603                 for (i = 0; i < netdev->num_tx_queues; i++) {
604                         int qno = lio->linfo.txpciq[i %
605                                 (lio->linfo.num_txpciq)].s.q_no;
606
607                         if (__netif_subqueue_stopped(netdev, i)) {
608                                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
609                                                           tx_restart, 1);
610                                 netif_wake_subqueue(netdev, i);
611                         }
612                 }
613         } else {
614                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
615                                           tx_restart, 1);
616                 netif_wake_queue(netdev);
617         }
618 }
619
620 /**
621  * \brief Stop Tx queue
622  * @param netdev network device
623  */
624 static void stop_txq(struct net_device *netdev)
625 {
626         txqs_stop(netdev);
627 }
628
629 /**
630  * \brief Start Tx queue
631  * @param netdev network device
632  */
633 static void start_txq(struct net_device *netdev)
634 {
635         struct lio *lio = GET_LIO(netdev);
636
637         if (lio->linfo.link.s.link_up) {
638                 txqs_start(netdev);
639                 return;
640         }
641 }
642
643 /**
644  * \brief Wake a queue
645  * @param netdev network device
646  * @param q which queue to wake
647  */
648 static inline void wake_q(struct net_device *netdev, int q)
649 {
650         if (netif_is_multiqueue(netdev))
651                 netif_wake_subqueue(netdev, q);
652         else
653                 netif_wake_queue(netdev);
654 }
655
656 /**
657  * \brief Stop a queue
658  * @param netdev network device
659  * @param q which queue to stop
660  */
661 static inline void stop_q(struct net_device *netdev, int q)
662 {
663         if (netif_is_multiqueue(netdev))
664                 netif_stop_subqueue(netdev, q);
665         else
666                 netif_stop_queue(netdev);
667 }
668
669 /**
670  * \brief Check Tx queue status, and take appropriate action
671  * @param lio per-network private data
672  * @returns 0 if full, number of queues woken up otherwise
673  */
674 static inline int check_txq_status(struct lio *lio)
675 {
676         int ret_val = 0;
677
678         if (netif_is_multiqueue(lio->netdev)) {
679                 int numqs = lio->netdev->num_tx_queues;
680                 int q, iq = 0;
681
682                 /* check each sub-queue state */
683                 for (q = 0; q < numqs; q++) {
684                         iq = lio->linfo.txpciq[q %
685                                 (lio->linfo.num_txpciq)].s.q_no;
686                         if (octnet_iq_is_full(lio->oct_dev, iq))
687                                 continue;
688                         if (__netif_subqueue_stopped(lio->netdev, q)) {
689                                 wake_q(lio->netdev, q);
690                                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
691                                                           tx_restart, 1);
692                                 ret_val++;
693                         }
694                 }
695         } else {
696                 if (octnet_iq_is_full(lio->oct_dev, lio->txq))
697                         return 0;
698                 wake_q(lio->netdev, lio->txq);
699                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
700                                           tx_restart, 1);
701                 ret_val = 1;
702         }
703         return ret_val;
704 }
705
706 /**
707  * Remove the node at the head of the list. The list would be empty at
708  * the end of this call if there are no more nodes in the list.
709  */
710 static inline struct list_head *list_delete_head(struct list_head *root)
711 {
712         struct list_head *node;
713
714         if ((root->prev == root) && (root->next == root))
715                 node = NULL;
716         else
717                 node = root->next;
718
719         if (node)
720                 list_del(node);
721
722         return node;
723 }
724
725 /**
726  * \brief Delete gather lists
727  * @param lio per-network private data
728  */
729 static void delete_glists(struct lio *lio)
730 {
731         struct octnic_gather *g;
732         int i;
733
734         if (!lio->glist)
735                 return;
736
737         for (i = 0; i < lio->linfo.num_txpciq; i++) {
738                 do {
739                         g = (struct octnic_gather *)
740                                 list_delete_head(&lio->glist[i]);
741                         if (g) {
742                                 if (g->sg) {
743                                         dma_unmap_single(&lio->oct_dev->
744                                                          pci_dev->dev,
745                                                          g->sg_dma_ptr,
746                                                          g->sg_size,
747                                                          DMA_TO_DEVICE);
748                                         kfree((void *)((unsigned long)g->sg -
749                                                        g->adjust));
750                                 }
751                                 kfree(g);
752                         }
753                 } while (g);
754         }
755
756         kfree((void *)lio->glist);
757 }
758
759 /**
760  * \brief Setup gather lists
761  * @param lio per-network private data
762  */
763 static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
764 {
765         int i, j;
766         struct octnic_gather *g;
767
768         lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
769                                   GFP_KERNEL);
770         if (!lio->glist_lock)
771                 return 1;
772
773         lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
774                              GFP_KERNEL);
775         if (!lio->glist) {
776                 kfree((void *)lio->glist_lock);
777                 return 1;
778         }
779
780         for (i = 0; i < num_iqs; i++) {
781                 int numa_node = cpu_to_node(i % num_online_cpus());
782
783                 spin_lock_init(&lio->glist_lock[i]);
784
785                 INIT_LIST_HEAD(&lio->glist[i]);
786
787                 for (j = 0; j < lio->tx_qsize; j++) {
788                         g = kzalloc_node(sizeof(*g), GFP_KERNEL,
789                                          numa_node);
790                         if (!g)
791                                 g = kzalloc(sizeof(*g), GFP_KERNEL);
792                         if (!g)
793                                 break;
794
795                         g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
796                                       OCT_SG_ENTRY_SIZE);
797
798                         g->sg = kmalloc_node(g->sg_size + 8,
799                                              GFP_KERNEL, numa_node);
800                         if (!g->sg)
801                                 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
802                         if (!g->sg) {
803                                 kfree(g);
804                                 break;
805                         }
806
807                         /* The gather component should be aligned on 64-bit
808                          * boundary
809                          */
810                         if (((unsigned long)g->sg) & 7) {
811                                 g->adjust = 8 - (((unsigned long)g->sg) & 7);
812                                 g->sg = (struct octeon_sg_entry *)
813                                         ((unsigned long)g->sg + g->adjust);
814                         }
815                         g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
816                                                        g->sg, g->sg_size,
817                                                        DMA_TO_DEVICE);
818                         if (dma_mapping_error(&oct->pci_dev->dev,
819                                               g->sg_dma_ptr)) {
820                                 kfree((void *)((unsigned long)g->sg -
821                                                g->adjust));
822                                 kfree(g);
823                                 break;
824                         }
825
826                         list_add_tail(&g->list, &lio->glist[i]);
827                 }
828
829                 if (j != lio->tx_qsize) {
830                         delete_glists(lio);
831                         return 1;
832                 }
833         }
834
835         return 0;
836 }
837
838 /**
839  * \brief Print link information
840  * @param netdev network device
841  */
842 static void print_link_info(struct net_device *netdev)
843 {
844         struct lio *lio = GET_LIO(netdev);
845
846         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
847                 struct oct_link_info *linfo = &lio->linfo;
848
849                 if (linfo->link.s.link_up) {
850                         netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
851                                    linfo->link.s.speed,
852                                    (linfo->link.s.duplex) ? "Full" : "Half");
853                 } else {
854                         netif_info(lio, link, lio->netdev, "Link Down\n");
855                 }
856         }
857 }
858
859 /**
860  * \brief Routine to notify MTU change
861  * @param work work_struct data structure
862  */
863 static void octnet_link_status_change(struct work_struct *work)
864 {
865         struct cavium_wk *wk = (struct cavium_wk *)work;
866         struct lio *lio = (struct lio *)wk->ctxptr;
867
868         rtnl_lock();
869         call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
870         rtnl_unlock();
871 }
872
873 /**
874  * \brief Sets up the mtu status change work
875  * @param netdev network device
876  */
877 static inline int setup_link_status_change_wq(struct net_device *netdev)
878 {
879         struct lio *lio = GET_LIO(netdev);
880         struct octeon_device *oct = lio->oct_dev;
881
882         lio->link_status_wq.wq = alloc_workqueue("link-status",
883                                                  WQ_MEM_RECLAIM, 0);
884         if (!lio->link_status_wq.wq) {
885                 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
886                 return -1;
887         }
888         INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
889                           octnet_link_status_change);
890         lio->link_status_wq.wk.ctxptr = lio;
891
892         return 0;
893 }
894
895 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
896 {
897         struct lio *lio = GET_LIO(netdev);
898
899         if (lio->link_status_wq.wq) {
900                 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
901                 destroy_workqueue(lio->link_status_wq.wq);
902         }
903 }
904
905 /**
906  * \brief Update link status
907  * @param netdev network device
908  * @param ls link status structure
909  *
910  * Called on receipt of a link status response from the core application to
911  * update each interface's link status.
912  */
913 static inline void update_link_status(struct net_device *netdev,
914                                       union oct_link_status *ls)
915 {
916         struct lio *lio = GET_LIO(netdev);
917         int changed = (lio->linfo.link.u64 != ls->u64);
918
919         lio->linfo.link.u64 = ls->u64;
920
921         if ((lio->intf_open) && (changed)) {
922                 print_link_info(netdev);
923                 lio->link_changes++;
924
925                 if (lio->linfo.link.s.link_up) {
926                         netif_carrier_on(netdev);
927                         /* start_txq(netdev); */
928                         txqs_wake(netdev);
929                 } else {
930                         netif_carrier_off(netdev);
931                         stop_txq(netdev);
932                 }
933         }
934 }
935
936 /* Runs in interrupt context. */
937 static void update_txq_status(struct octeon_device *oct, int iq_num)
938 {
939         struct net_device *netdev;
940         struct lio *lio;
941         struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
942
943         /*octeon_update_iq_read_idx(oct, iq);*/
944
945         netdev = oct->props[iq->ifidx].netdev;
946
947         /* This is needed because the first IQ does not have
948          * a netdev associated with it.
949          */
950         if (!netdev)
951                 return;
952
953         lio = GET_LIO(netdev);
954         if (netif_is_multiqueue(netdev)) {
955                 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
956                     lio->linfo.link.s.link_up &&
957                     (!octnet_iq_is_full(oct, iq_num))) {
958                         INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
959                                                   tx_restart, 1);
960                         netif_wake_subqueue(netdev, iq->q_index);
961                 } else {
962                         if (!octnet_iq_is_full(oct, lio->txq)) {
963                                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
964                                                           lio->txq,
965                                                           tx_restart, 1);
966                                 wake_q(netdev, lio->txq);
967                         }
968                 }
969         }
970 }
971
972 static
973 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
974 {
975         struct octeon_device *oct = droq->oct_dev;
976         struct octeon_device_priv *oct_priv =
977             (struct octeon_device_priv *)oct->priv;
978
979         if (droq->ops.poll_mode) {
980                 droq->ops.napi_fn(droq);
981         } else {
982                 if (ret & MSIX_PO_INT) {
983                         tasklet_schedule(&oct_priv->droq_tasklet);
984                         return 1;
985                 }
986                 /* this will be flushed periodically by check iq db */
987                 if (ret & MSIX_PI_INT)
988                         return 0;
989         }
990         return 0;
991 }
992
993 /**
994  * \brief Droq packet processor sceduler
995  * @param oct octeon device
996  */
997 static
998 void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
999 {
1000         struct octeon_device_priv *oct_priv =
1001                 (struct octeon_device_priv *)oct->priv;
1002         u64 oq_no;
1003         struct octeon_droq *droq;
1004
1005         if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
1006                 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
1007                      oq_no++) {
1008                         if (!(oct->droq_intr & (1ULL << oq_no)))
1009                                 continue;
1010
1011                         droq = oct->droq[oq_no];
1012
1013                         if (droq->ops.poll_mode) {
1014                                 droq->ops.napi_fn(droq);
1015                                 oct_priv->napi_mask |= (1 << oq_no);
1016                         } else {
1017                                 tasklet_schedule(&oct_priv->droq_tasklet);
1018                         }
1019                 }
1020         }
1021 }
1022
1023 static irqreturn_t
1024 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
1025 {
1026         u64 ret;
1027         struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
1028         struct octeon_device *oct = ioq_vector->oct_dev;
1029         struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
1030
1031         ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
1032
1033         if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
1034                 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
1035
1036         return IRQ_HANDLED;
1037 }
1038
1039 /**
1040  * \brief Interrupt handler for octeon
1041  * @param irq unused
1042  * @param dev octeon device
1043  */
1044 static
1045 irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
1046                                          void *dev)
1047 {
1048         struct octeon_device *oct = (struct octeon_device *)dev;
1049         irqreturn_t ret;
1050
1051         /* Disable our interrupts for the duration of ISR */
1052         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1053
1054         ret = oct->fn_list.process_interrupt_regs(oct);
1055
1056         if (ret == IRQ_HANDLED)
1057                 liquidio_schedule_droq_pkt_handlers(oct);
1058
1059         /* Re-enable our interrupts  */
1060         if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
1061                 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
1062
1063         return ret;
1064 }
1065
1066 /**
1067  * \brief Setup interrupt for octeon device
1068  * @param oct octeon device
1069  *
1070  *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
1071  */
1072 static int octeon_setup_interrupt(struct octeon_device *oct)
1073 {
1074         int irqret, err;
1075         struct msix_entry *msix_entries;
1076         int i;
1077         int num_ioq_vectors;
1078         int num_alloc_ioq_vectors;
1079
1080         if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
1081                 oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
1082                 /* one non ioq interrupt for handling sli_mac_pf_int_sum */
1083                 oct->num_msix_irqs += 1;
1084
1085                 oct->msix_entries = kcalloc(
1086                     oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
1087                 if (!oct->msix_entries)
1088                         return 1;
1089
1090                 msix_entries = (struct msix_entry *)oct->msix_entries;
1091                 /*Assumption is that pf msix vectors start from pf srn to pf to
1092                  * trs and not from 0. if not change this code
1093                  */
1094                 for (i = 0; i < oct->num_msix_irqs - 1; i++)
1095                         msix_entries[i].entry = oct->sriov_info.pf_srn + i;
1096                 msix_entries[oct->num_msix_irqs - 1].entry =
1097                     oct->sriov_info.trs;
1098                 num_alloc_ioq_vectors = pci_enable_msix_range(
1099                                                 oct->pci_dev, msix_entries,
1100                                                 oct->num_msix_irqs,
1101                                                 oct->num_msix_irqs);
1102                 if (num_alloc_ioq_vectors < 0) {
1103                         dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1104                         kfree(oct->msix_entries);
1105                         oct->msix_entries = NULL;
1106                         return 1;
1107                 }
1108                 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1109
1110                 num_ioq_vectors = oct->num_msix_irqs;
1111
1112                 /** For PF, there is one non-ioq interrupt handler */
1113                 num_ioq_vectors -= 1;
1114                 irqret = request_irq(msix_entries[num_ioq_vectors].vector,
1115                                      liquidio_legacy_intr_handler, 0, "octeon",
1116                                      oct);
1117                 if (irqret) {
1118                         dev_err(&oct->pci_dev->dev,
1119                                 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1120                                 irqret);
1121                         pci_disable_msix(oct->pci_dev);
1122                         kfree(oct->msix_entries);
1123                         oct->msix_entries = NULL;
1124                         return 1;
1125                 }
1126
1127                 for (i = 0; i < num_ioq_vectors; i++) {
1128                         irqret = request_irq(msix_entries[i].vector,
1129                                              liquidio_msix_intr_handler, 0,
1130                                              "octeon", &oct->ioq_vector[i]);
1131                         if (irqret) {
1132                                 dev_err(&oct->pci_dev->dev,
1133                                         "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1134                                         irqret);
1135                                 /** Freeing the non-ioq irq vector here . */
1136                                 free_irq(msix_entries[num_ioq_vectors].vector,
1137                                          oct);
1138
1139                                 while (i) {
1140                                         i--;
1141                                         /** clearing affinity mask. */
1142                                         irq_set_affinity_hint(
1143                                                 msix_entries[i].vector, NULL);
1144                                         free_irq(msix_entries[i].vector,
1145                                                  &oct->ioq_vector[i]);
1146                                 }
1147                                 pci_disable_msix(oct->pci_dev);
1148                                 kfree(oct->msix_entries);
1149                                 oct->msix_entries = NULL;
1150                                 return 1;
1151                         }
1152                         oct->ioq_vector[i].vector = msix_entries[i].vector;
1153                         /* assign the cpu mask for this msix interrupt vector */
1154                         irq_set_affinity_hint(
1155                                         msix_entries[i].vector,
1156                                         (&oct->ioq_vector[i].affinity_mask));
1157                 }
1158                 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1159                         oct->octeon_id);
1160         } else {
1161                 err = pci_enable_msi(oct->pci_dev);
1162                 if (err)
1163                         dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1164                                  err);
1165                 else
1166                         oct->flags |= LIO_FLAG_MSI_ENABLED;
1167
1168                 irqret = request_irq(oct->pci_dev->irq,
1169                                      liquidio_legacy_intr_handler, IRQF_SHARED,
1170                                      "octeon", oct);
1171                 if (irqret) {
1172                         if (oct->flags & LIO_FLAG_MSI_ENABLED)
1173                                 pci_disable_msi(oct->pci_dev);
1174                         dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1175                                 irqret);
1176                         return 1;
1177                 }
1178         }
1179         return 0;
1180 }
1181
1182 /**
1183  * \brief PCI probe handler
1184  * @param pdev PCI device structure
1185  * @param ent unused
1186  */
1187 static int
1188 liquidio_probe(struct pci_dev *pdev,
1189                const struct pci_device_id *ent __attribute__((unused)))
1190 {
1191         struct octeon_device *oct_dev = NULL;
1192         struct handshake *hs;
1193
1194         oct_dev = octeon_allocate_device(pdev->device,
1195                                          sizeof(struct octeon_device_priv));
1196         if (!oct_dev) {
1197                 dev_err(&pdev->dev, "Unable to allocate device\n");
1198                 return -ENOMEM;
1199         }
1200
1201         if (pdev->device == OCTEON_CN23XX_PF_VID)
1202                 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
1203
1204         dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1205                  (u32)pdev->vendor, (u32)pdev->device);
1206
1207         /* Assign octeon_device for this device to the private data area. */
1208         pci_set_drvdata(pdev, oct_dev);
1209
1210         /* set linux specific device pointer */
1211         oct_dev->pci_dev = (void *)pdev;
1212
1213         hs = &handshake[oct_dev->octeon_id];
1214         init_completion(&hs->init);
1215         init_completion(&hs->started);
1216         hs->pci_dev = pdev;
1217
1218         if (oct_dev->octeon_id == 0)
1219                 /* first LiquidIO NIC is detected */
1220                 complete(&first_stage);
1221
1222         if (octeon_device_init(oct_dev)) {
1223                 liquidio_remove(pdev);
1224                 return -ENOMEM;
1225         }
1226
1227         oct_dev->rx_pause = 1;
1228         oct_dev->tx_pause = 1;
1229
1230         dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1231
1232         return 0;
1233 }
1234
1235 /**
1236  *\brief Destroy resources associated with octeon device
1237  * @param pdev PCI device structure
1238  * @param ent unused
1239  */
1240 static void octeon_destroy_resources(struct octeon_device *oct)
1241 {
1242         int i;
1243         struct msix_entry *msix_entries;
1244         struct octeon_device_priv *oct_priv =
1245                 (struct octeon_device_priv *)oct->priv;
1246
1247         struct handshake *hs;
1248
1249         switch (atomic_read(&oct->status)) {
1250         case OCT_DEV_RUNNING:
1251         case OCT_DEV_CORE_OK:
1252
1253                 /* No more instructions will be forwarded. */
1254                 atomic_set(&oct->status, OCT_DEV_IN_RESET);
1255
1256                 oct->app_mode = CVM_DRV_INVALID_APP;
1257                 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1258                         lio_get_state_string(&oct->status));
1259
1260                 schedule_timeout_uninterruptible(HZ / 10);
1261
1262                 /* fallthrough */
1263         case OCT_DEV_HOST_OK:
1264
1265                 /* fallthrough */
1266         case OCT_DEV_CONSOLE_INIT_DONE:
1267                 /* Remove any consoles */
1268                 octeon_remove_consoles(oct);
1269
1270                 /* fallthrough */
1271         case OCT_DEV_IO_QUEUES_DONE:
1272                 if (wait_for_pending_requests(oct))
1273                         dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1274
1275                 if (lio_wait_for_instr_fetch(oct))
1276                         dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1277
1278                 /* Disable the input and output queues now. No more packets will
1279                  * arrive from Octeon, but we should wait for all packet
1280                  * processing to finish.
1281                  */
1282                 oct->fn_list.disable_io_queues(oct);
1283
1284                 if (lio_wait_for_oq_pkts(oct))
1285                         dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1286
1287                 /* Disable interrupts  */
1288                 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1289
1290                 if (oct->msix_on) {
1291                         msix_entries = (struct msix_entry *)oct->msix_entries;
1292                         for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1293                                 /* clear the affinity_cpumask */
1294                                 irq_set_affinity_hint(msix_entries[i].vector,
1295                                                       NULL);
1296                                 free_irq(msix_entries[i].vector,
1297                                          &oct->ioq_vector[i]);
1298                         }
1299                         /* non-iov vector's argument is oct struct */
1300                         free_irq(msix_entries[i].vector, oct);
1301
1302                         pci_disable_msix(oct->pci_dev);
1303                         kfree(oct->msix_entries);
1304                         oct->msix_entries = NULL;
1305                 } else {
1306                         /* Release the interrupt line */
1307                         free_irq(oct->pci_dev->irq, oct);
1308
1309                         if (oct->flags & LIO_FLAG_MSI_ENABLED)
1310                                 pci_disable_msi(oct->pci_dev);
1311                 }
1312
1313                 if (OCTEON_CN23XX_PF(oct))
1314                         octeon_free_ioq_vector(oct);
1315         /* fallthrough */
1316         case OCT_DEV_IN_RESET:
1317         case OCT_DEV_DROQ_INIT_DONE:
1318                 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
1319                 mdelay(100);
1320                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1321                         if (!(oct->io_qmask.oq & BIT_ULL(i)))
1322                                 continue;
1323                         octeon_delete_droq(oct, i);
1324                 }
1325
1326                 /* Force any pending handshakes to complete */
1327                 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1328                         hs = &handshake[i];
1329
1330                         if (hs->pci_dev) {
1331                                 handshake[oct->octeon_id].init_ok = 0;
1332                                 complete(&handshake[oct->octeon_id].init);
1333                                 handshake[oct->octeon_id].started_ok = 0;
1334                                 complete(&handshake[oct->octeon_id].started);
1335                         }
1336                 }
1337
1338                 /* fallthrough */
1339         case OCT_DEV_RESP_LIST_INIT_DONE:
1340                 octeon_delete_response_list(oct);
1341
1342                 /* fallthrough */
1343         case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1344                 octeon_free_sc_buffer_pool(oct);
1345
1346                 /* fallthrough */
1347         case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1348                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1349                         if (!(oct->io_qmask.iq & (1ULL << i)))
1350                                 continue;
1351                         octeon_delete_instr_queue(oct, i);
1352                 }
1353
1354                 /* fallthrough */
1355         case OCT_DEV_DISPATCH_INIT_DONE:
1356                 octeon_delete_dispatch_list(oct);
1357                 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1358
1359                 /* fallthrough */
1360         case OCT_DEV_PCI_MAP_DONE:
1361                 /* Soft reset the octeon device before exiting */
1362                 if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
1363                         oct->fn_list.soft_reset(oct);
1364
1365                 octeon_unmap_pci_barx(oct, 0);
1366                 octeon_unmap_pci_barx(oct, 1);
1367
1368                 /* fallthrough */
1369         case OCT_DEV_BEGIN_STATE:
1370                 /* Disable the device, releasing the PCI INT */
1371                 pci_disable_device(oct->pci_dev);
1372
1373                 /* Nothing to be done here either */
1374                 break;
1375         }                       /* end switch (oct->status) */
1376
1377         tasklet_kill(&oct_priv->droq_tasklet);
1378 }
1379
1380 /**
1381  * \brief Send Rx control command
1382  * @param lio per-network private data
1383  * @param start_stop whether to start or stop
1384  */
1385 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1386 {
1387         struct octnic_ctrl_pkt nctrl;
1388
1389         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1390
1391         nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
1392         nctrl.ncmd.s.param1 = start_stop;
1393         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1394         nctrl.netpndev = (u64)lio->netdev;
1395
1396         if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0)
1397                 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1398 }
1399
1400 /**
1401  * \brief Destroy NIC device interface
1402  * @param oct octeon device
1403  * @param ifidx which interface to destroy
1404  *
1405  * Cleanup associated with each interface for an Octeon device  when NIC
1406  * module is being unloaded or if initialization fails during load.
1407  */
1408 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1409 {
1410         struct net_device *netdev = oct->props[ifidx].netdev;
1411         struct lio *lio;
1412         struct napi_struct *napi, *n;
1413
1414         if (!netdev) {
1415                 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1416                         __func__, ifidx);
1417                 return;
1418         }
1419
1420         lio = GET_LIO(netdev);
1421
1422         dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1423
1424         send_rx_ctrl_cmd(lio, 0);
1425
1426         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1427                 txqs_stop(netdev);
1428
1429         if (oct->props[lio->ifidx].napi_enabled == 1) {
1430                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1431                         napi_disable(napi);
1432
1433                 oct->props[lio->ifidx].napi_enabled = 0;
1434
1435                 if (OCTEON_CN23XX_PF(oct))
1436                         oct->droq[0]->ops.poll_mode = 0;
1437         }
1438
1439         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1440                 unregister_netdev(netdev);
1441
1442         cleanup_link_status_change_wq(netdev);
1443
1444         delete_glists(lio);
1445
1446         free_netdev(netdev);
1447
1448         oct->props[ifidx].gmxport = -1;
1449
1450         oct->props[ifidx].netdev = NULL;
1451 }
1452
1453 /**
1454  * \brief Stop complete NIC functionality
1455  * @param oct octeon device
1456  */
1457 static int liquidio_stop_nic_module(struct octeon_device *oct)
1458 {
1459         int i, j;
1460         struct lio *lio;
1461
1462         dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1463         if (!oct->ifcount) {
1464                 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1465                 return 1;
1466         }
1467
1468         spin_lock_bh(&oct->cmd_resp_wqlock);
1469         oct->cmd_resp_state = OCT_DRV_OFFLINE;
1470         spin_unlock_bh(&oct->cmd_resp_wqlock);
1471
1472         for (i = 0; i < oct->ifcount; i++) {
1473                 lio = GET_LIO(oct->props[i].netdev);
1474                 for (j = 0; j < lio->linfo.num_rxpciq; j++)
1475                         octeon_unregister_droq_ops(oct,
1476                                                    lio->linfo.rxpciq[j].s.q_no);
1477         }
1478
1479         for (i = 0; i < oct->ifcount; i++)
1480                 liquidio_destroy_nic_device(oct, i);
1481
1482         dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1483         return 0;
1484 }
1485
1486 /**
1487  * \brief Cleans up resources at unload time
1488  * @param pdev PCI device structure
1489  */
1490 static void liquidio_remove(struct pci_dev *pdev)
1491 {
1492         struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1493
1494         dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1495
1496         if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1497                 liquidio_stop_nic_module(oct_dev);
1498
1499         /* Reset the octeon device and cleanup all memory allocated for
1500          * the octeon device by driver.
1501          */
1502         octeon_destroy_resources(oct_dev);
1503
1504         dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1505
1506         /* This octeon device has been removed. Update the global
1507          * data structure to reflect this. Free the device structure.
1508          */
1509         octeon_free_device_mem(oct_dev);
1510 }
1511
1512 /**
1513  * \brief Identify the Octeon device and to map the BAR address space
1514  * @param oct octeon device
1515  */
1516 static int octeon_chip_specific_setup(struct octeon_device *oct)
1517 {
1518         u32 dev_id, rev_id;
1519         int ret = 1;
1520         char *s;
1521
1522         pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1523         pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1524         oct->rev_id = rev_id & 0xff;
1525
1526         switch (dev_id) {
1527         case OCTEON_CN68XX_PCIID:
1528                 oct->chip_id = OCTEON_CN68XX;
1529                 ret = lio_setup_cn68xx_octeon_device(oct);
1530                 s = "CN68XX";
1531                 break;
1532
1533         case OCTEON_CN66XX_PCIID:
1534                 oct->chip_id = OCTEON_CN66XX;
1535                 ret = lio_setup_cn66xx_octeon_device(oct);
1536                 s = "CN66XX";
1537                 break;
1538
1539         case OCTEON_CN23XX_PCIID_PF:
1540                 oct->chip_id = OCTEON_CN23XX_PF_VID;
1541                 ret = setup_cn23xx_octeon_pf_device(oct);
1542                 s = "CN23XX";
1543                 break;
1544
1545         default:
1546                 s = "?";
1547                 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1548                         dev_id);
1549         }
1550
1551         if (!ret)
1552                 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1553                          OCTEON_MAJOR_REV(oct),
1554                          OCTEON_MINOR_REV(oct),
1555                          octeon_get_conf(oct)->card_name,
1556                          LIQUIDIO_VERSION);
1557
1558         return ret;
1559 }
1560
1561 /**
1562  * \brief PCI initialization for each Octeon device.
1563  * @param oct octeon device
1564  */
1565 static int octeon_pci_os_setup(struct octeon_device *oct)
1566 {
1567         /* setup PCI stuff first */
1568         if (pci_enable_device(oct->pci_dev)) {
1569                 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1570                 return 1;
1571         }
1572
1573         if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1574                 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1575                 return 1;
1576         }
1577
1578         /* Enable PCI DMA Master. */
1579         pci_set_master(oct->pci_dev);
1580
1581         return 0;
1582 }
1583
1584 static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1585 {
1586         int q = 0;
1587
1588         if (netif_is_multiqueue(lio->netdev))
1589                 q = skb->queue_mapping % lio->linfo.num_txpciq;
1590
1591         return q;
1592 }
1593
1594 /**
1595  * \brief Check Tx queue state for a given network buffer
1596  * @param lio per-network private data
1597  * @param skb network buffer
1598  */
1599 static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
1600 {
1601         int q = 0, iq = 0;
1602
1603         if (netif_is_multiqueue(lio->netdev)) {
1604                 q = skb->queue_mapping;
1605                 iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
1606         } else {
1607                 iq = lio->txq;
1608                 q = iq;
1609         }
1610
1611         if (octnet_iq_is_full(lio->oct_dev, iq))
1612                 return 0;
1613
1614         if (__netif_subqueue_stopped(lio->netdev, q)) {
1615                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
1616                 wake_q(lio->netdev, q);
1617         }
1618         return 1;
1619 }
1620
1621 /**
1622  * \brief Unmap and free network buffer
1623  * @param buf buffer
1624  */
1625 static void free_netbuf(void *buf)
1626 {
1627         struct sk_buff *skb;
1628         struct octnet_buf_free_info *finfo;
1629         struct lio *lio;
1630
1631         finfo = (struct octnet_buf_free_info *)buf;
1632         skb = finfo->skb;
1633         lio = finfo->lio;
1634
1635         dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1636                          DMA_TO_DEVICE);
1637
1638         check_txq_state(lio, skb);
1639
1640         tx_buffer_free(skb);
1641 }
1642
1643 /**
1644  * \brief Unmap and free gather buffer
1645  * @param buf buffer
1646  */
1647 static void free_netsgbuf(void *buf)
1648 {
1649         struct octnet_buf_free_info *finfo;
1650         struct sk_buff *skb;
1651         struct lio *lio;
1652         struct octnic_gather *g;
1653         int i, frags, iq;
1654
1655         finfo = (struct octnet_buf_free_info *)buf;
1656         skb = finfo->skb;
1657         lio = finfo->lio;
1658         g = finfo->g;
1659         frags = skb_shinfo(skb)->nr_frags;
1660
1661         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1662                          g->sg[0].ptr[0], (skb->len - skb->data_len),
1663                          DMA_TO_DEVICE);
1664
1665         i = 1;
1666         while (frags--) {
1667                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1668
1669                 pci_unmap_page((lio->oct_dev)->pci_dev,
1670                                g->sg[(i >> 2)].ptr[(i & 3)],
1671                                frag->size, DMA_TO_DEVICE);
1672                 i++;
1673         }
1674
1675         dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1676                                 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1677
1678         iq = skb_iq(lio, skb);
1679         spin_lock(&lio->glist_lock[iq]);
1680         list_add_tail(&g->list, &lio->glist[iq]);
1681         spin_unlock(&lio->glist_lock[iq]);
1682
1683         check_txq_state(lio, skb);     /* mq support: sub-queue state check */
1684
1685         tx_buffer_free(skb);
1686 }
1687
1688 /**
1689  * \brief Unmap and free gather buffer with response
1690  * @param buf buffer
1691  */
1692 static void free_netsgbuf_with_resp(void *buf)
1693 {
1694         struct octeon_soft_command *sc;
1695         struct octnet_buf_free_info *finfo;
1696         struct sk_buff *skb;
1697         struct lio *lio;
1698         struct octnic_gather *g;
1699         int i, frags, iq;
1700
1701         sc = (struct octeon_soft_command *)buf;
1702         skb = (struct sk_buff *)sc->callback_arg;
1703         finfo = (struct octnet_buf_free_info *)&skb->cb;
1704
1705         lio = finfo->lio;
1706         g = finfo->g;
1707         frags = skb_shinfo(skb)->nr_frags;
1708
1709         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1710                          g->sg[0].ptr[0], (skb->len - skb->data_len),
1711                          DMA_TO_DEVICE);
1712
1713         i = 1;
1714         while (frags--) {
1715                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1716
1717                 pci_unmap_page((lio->oct_dev)->pci_dev,
1718                                g->sg[(i >> 2)].ptr[(i & 3)],
1719                                frag->size, DMA_TO_DEVICE);
1720                 i++;
1721         }
1722
1723         dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1724                                 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1725
1726         iq = skb_iq(lio, skb);
1727
1728         spin_lock(&lio->glist_lock[iq]);
1729         list_add_tail(&g->list, &lio->glist[iq]);
1730         spin_unlock(&lio->glist_lock[iq]);
1731
1732         /* Don't free the skb yet */
1733
1734         check_txq_state(lio, skb);
1735 }
1736
1737 /**
1738  * \brief Adjust ptp frequency
1739  * @param ptp PTP clock info
1740  * @param ppb how much to adjust by, in parts-per-billion
1741  */
1742 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1743 {
1744         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1745         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1746         u64 comp, delta;
1747         unsigned long flags;
1748         bool neg_adj = false;
1749
1750         if (ppb < 0) {
1751                 neg_adj = true;
1752                 ppb = -ppb;
1753         }
1754
1755         /* The hardware adds the clock compensation value to the
1756          * PTP clock on every coprocessor clock cycle, so we
1757          * compute the delta in terms of coprocessor clocks.
1758          */
1759         delta = (u64)ppb << 32;
1760         do_div(delta, oct->coproc_clock_rate);
1761
1762         spin_lock_irqsave(&lio->ptp_lock, flags);
1763         comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1764         if (neg_adj)
1765                 comp -= delta;
1766         else
1767                 comp += delta;
1768         lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1769         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1770
1771         return 0;
1772 }
1773
1774 /**
1775  * \brief Adjust ptp time
1776  * @param ptp PTP clock info
1777  * @param delta how much to adjust by, in nanosecs
1778  */
1779 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1780 {
1781         unsigned long flags;
1782         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1783
1784         spin_lock_irqsave(&lio->ptp_lock, flags);
1785         lio->ptp_adjust += delta;
1786         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1787
1788         return 0;
1789 }
1790
1791 /**
1792  * \brief Get hardware clock time, including any adjustment
1793  * @param ptp PTP clock info
1794  * @param ts timespec
1795  */
1796 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1797                                 struct timespec64 *ts)
1798 {
1799         u64 ns;
1800         unsigned long flags;
1801         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1802         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1803
1804         spin_lock_irqsave(&lio->ptp_lock, flags);
1805         ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1806         ns += lio->ptp_adjust;
1807         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1808
1809         *ts = ns_to_timespec64(ns);
1810
1811         return 0;
1812 }
1813
1814 /**
1815  * \brief Set hardware clock time. Reset adjustment
1816  * @param ptp PTP clock info
1817  * @param ts timespec
1818  */
1819 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1820                                 const struct timespec64 *ts)
1821 {
1822         u64 ns;
1823         unsigned long flags;
1824         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1825         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1826
1827         ns = timespec_to_ns(ts);
1828
1829         spin_lock_irqsave(&lio->ptp_lock, flags);
1830         lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1831         lio->ptp_adjust = 0;
1832         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1833
1834         return 0;
1835 }
1836
1837 /**
1838  * \brief Check if PTP is enabled
1839  * @param ptp PTP clock info
1840  * @param rq request
1841  * @param on is it on
1842  */
1843 static int
1844 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1845                     struct ptp_clock_request *rq __attribute__((unused)),
1846                     int on __attribute__((unused)))
1847 {
1848         return -EOPNOTSUPP;
1849 }
1850
1851 /**
1852  * \brief Open PTP clock source
1853  * @param netdev network device
1854  */
1855 static void oct_ptp_open(struct net_device *netdev)
1856 {
1857         struct lio *lio = GET_LIO(netdev);
1858         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1859
1860         spin_lock_init(&lio->ptp_lock);
1861
1862         snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1863         lio->ptp_info.owner = THIS_MODULE;
1864         lio->ptp_info.max_adj = 250000000;
1865         lio->ptp_info.n_alarm = 0;
1866         lio->ptp_info.n_ext_ts = 0;
1867         lio->ptp_info.n_per_out = 0;
1868         lio->ptp_info.pps = 0;
1869         lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1870         lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1871         lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1872         lio->ptp_info.settime64 = liquidio_ptp_settime;
1873         lio->ptp_info.enable = liquidio_ptp_enable;
1874
1875         lio->ptp_adjust = 0;
1876
1877         lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1878                                              &oct->pci_dev->dev);
1879
1880         if (IS_ERR(lio->ptp_clock))
1881                 lio->ptp_clock = NULL;
1882 }
1883
1884 /**
1885  * \brief Init PTP clock
1886  * @param oct octeon device
1887  */
1888 static void liquidio_ptp_init(struct octeon_device *oct)
1889 {
1890         u64 clock_comp, cfg;
1891
1892         clock_comp = (u64)NSEC_PER_SEC << 32;
1893         do_div(clock_comp, oct->coproc_clock_rate);
1894         lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1895
1896         /* Enable */
1897         cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1898         lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1899 }
1900
1901 /**
1902  * \brief Load firmware to device
1903  * @param oct octeon device
1904  *
1905  * Maps device to firmware filename, requests firmware, and downloads it
1906  */
1907 static int load_firmware(struct octeon_device *oct)
1908 {
1909         int ret = 0;
1910         const struct firmware *fw;
1911         char fw_name[LIO_MAX_FW_FILENAME_LEN];
1912         char *tmp_fw_type;
1913
1914         if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
1915                     sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
1916                 dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
1917                 return ret;
1918         }
1919
1920         if (fw_type[0] == '\0')
1921                 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1922         else
1923                 tmp_fw_type = fw_type;
1924
1925         sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1926                 octeon_get_conf(oct)->card_name, tmp_fw_type,
1927                 LIO_FW_NAME_SUFFIX);
1928
1929         ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1930         if (ret) {
1931                 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
1932                         fw_name);
1933                 release_firmware(fw);
1934                 return ret;
1935         }
1936
1937         ret = octeon_download_firmware(oct, fw->data, fw->size);
1938
1939         release_firmware(fw);
1940
1941         return ret;
1942 }
1943
1944 /**
1945  * \brief Setup output queue
1946  * @param oct octeon device
1947  * @param q_no which queue
1948  * @param num_descs how many descriptors
1949  * @param desc_size size of each descriptor
1950  * @param app_ctx application context
1951  */
1952 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
1953                              int desc_size, void *app_ctx)
1954 {
1955         int ret_val = 0;
1956
1957         dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
1958         /* droq creation and local register settings. */
1959         ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
1960         if (ret_val < 0)
1961                 return ret_val;
1962
1963         if (ret_val == 1) {
1964                 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
1965                 return 0;
1966         }
1967         /* tasklet creation for the droq */
1968
1969         /* Enable the droq queues */
1970         octeon_set_droq_pkt_op(oct, q_no, 1);
1971
1972         /* Send Credit for Octeon Output queues. Credits are always
1973          * sent after the output queue is enabled.
1974          */
1975         writel(oct->droq[q_no]->max_count,
1976                oct->droq[q_no]->pkts_credit_reg);
1977
1978         return ret_val;
1979 }
1980
1981 /**
1982  * \brief Callback for getting interface configuration
1983  * @param status status of request
1984  * @param buf pointer to resp structure
1985  */
1986 static void if_cfg_callback(struct octeon_device *oct,
1987                             u32 status __attribute__((unused)),
1988                             void *buf)
1989 {
1990         struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1991         struct liquidio_if_cfg_resp *resp;
1992         struct liquidio_if_cfg_context *ctx;
1993
1994         resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1995         ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
1996
1997         oct = lio_get_device(ctx->octeon_id);
1998         if (resp->status)
1999                 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
2000                         CVM_CAST64(resp->status));
2001         WRITE_ONCE(ctx->cond, 1);
2002
2003         snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
2004                  resp->cfg_info.liquidio_firmware_version);
2005
2006         /* This barrier is required to be sure that the response has been
2007          * written fully before waking up the handler
2008          */
2009         wmb();
2010
2011         wake_up_interruptible(&ctx->wc);
2012 }
2013
2014 /**
2015  * \brief Select queue based on hash
2016  * @param dev Net device
2017  * @param skb sk_buff structure
2018  * @returns selected queue number
2019  */
2020 static u16 select_q(struct net_device *dev, struct sk_buff *skb,
2021                     void *accel_priv __attribute__((unused)),
2022                     select_queue_fallback_t fallback __attribute__((unused)))
2023 {
2024         u32 qindex = 0;
2025         struct lio *lio;
2026
2027         lio = GET_LIO(dev);
2028         qindex = skb_tx_hash(dev, skb);
2029
2030         return (u16)(qindex % (lio->linfo.num_txpciq));
2031 }
2032
2033 /** Routine to push packets arriving on Octeon interface upto network layer.
2034  * @param oct_id   - octeon device id.
2035  * @param skbuff   - skbuff struct to be passed to network layer.
2036  * @param len      - size of total data received.
2037  * @param rh       - Control header associated with the packet
2038  * @param param    - additional control data with the packet
2039  * @param arg      - farg registered in droq_ops
2040  */
2041 static void
2042 liquidio_push_packet(u32 octeon_id __attribute__((unused)),
2043                      void *skbuff,
2044                      u32 len,
2045                      union octeon_rh *rh,
2046                      void *param,
2047                      void *arg)
2048 {
2049         struct napi_struct *napi = param;
2050         struct sk_buff *skb = (struct sk_buff *)skbuff;
2051         struct skb_shared_hwtstamps *shhwtstamps;
2052         u64 ns;
2053         u16 vtag = 0;
2054         struct net_device *netdev = (struct net_device *)arg;
2055         struct octeon_droq *droq = container_of(param, struct octeon_droq,
2056                                                 napi);
2057         if (netdev) {
2058                 int packet_was_received;
2059                 struct lio *lio = GET_LIO(netdev);
2060                 struct octeon_device *oct = lio->oct_dev;
2061
2062                 /* Do not proceed if the interface is not in RUNNING state. */
2063                 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
2064                         recv_buffer_free(skb);
2065                         droq->stats.rx_dropped++;
2066                         return;
2067                 }
2068
2069                 skb->dev = netdev;
2070
2071                 skb_record_rx_queue(skb, droq->q_no);
2072                 if (likely(len > MIN_SKB_SIZE)) {
2073                         struct octeon_skb_page_info *pg_info;
2074                         unsigned char *va;
2075
2076                         pg_info = ((struct octeon_skb_page_info *)(skb->cb));
2077                         if (pg_info->page) {
2078                                 /* For Paged allocation use the frags */
2079                                 va = page_address(pg_info->page) +
2080                                         pg_info->page_offset;
2081                                 memcpy(skb->data, va, MIN_SKB_SIZE);
2082                                 skb_put(skb, MIN_SKB_SIZE);
2083                                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2084                                                 pg_info->page,
2085                                                 pg_info->page_offset +
2086                                                 MIN_SKB_SIZE,
2087                                                 len - MIN_SKB_SIZE,
2088                                                 LIO_RXBUFFER_SZ);
2089                         }
2090                 } else {
2091                         struct octeon_skb_page_info *pg_info =
2092                                 ((struct octeon_skb_page_info *)(skb->cb));
2093                         skb_copy_to_linear_data(skb, page_address(pg_info->page)
2094                                                 + pg_info->page_offset, len);
2095                         skb_put(skb, len);
2096                         put_page(pg_info->page);
2097                 }
2098
2099                 if (((oct->chip_id == OCTEON_CN66XX) ||
2100                      (oct->chip_id == OCTEON_CN68XX)) &&
2101                     ptp_enable) {
2102                         if (rh->r_dh.has_hwtstamp) {
2103                                 /* timestamp is included from the hardware at
2104                                  * the beginning of the packet.
2105                                  */
2106                                 if (ifstate_check
2107                                     (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
2108                                         /* Nanoseconds are in the first 64-bits
2109                                          * of the packet.
2110                                          */
2111                                         memcpy(&ns, (skb->data), sizeof(ns));
2112                                         shhwtstamps = skb_hwtstamps(skb);
2113                                         shhwtstamps->hwtstamp =
2114                                                 ns_to_ktime(ns +
2115                                                             lio->ptp_adjust);
2116                                 }
2117                                 skb_pull(skb, sizeof(ns));
2118                         }
2119                 }
2120
2121                 skb->protocol = eth_type_trans(skb, skb->dev);
2122                 if ((netdev->features & NETIF_F_RXCSUM) &&
2123                     (((rh->r_dh.encap_on) &&
2124                       (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
2125                      (!(rh->r_dh.encap_on) &&
2126                       (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
2127                         /* checksum has already been verified */
2128                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2129                 else
2130                         skb->ip_summed = CHECKSUM_NONE;
2131
2132                 /* Setting Encapsulation field on basis of status received
2133                  * from the firmware
2134                  */
2135                 if (rh->r_dh.encap_on) {
2136                         skb->encapsulation = 1;
2137                         skb->csum_level = 1;
2138                         droq->stats.rx_vxlan++;
2139                 }
2140
2141                 /* inbound VLAN tag */
2142                 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2143                     (rh->r_dh.vlan != 0)) {
2144                         u16 vid = rh->r_dh.vlan;
2145                         u16 priority = rh->r_dh.priority;
2146
2147                         vtag = priority << 13 | vid;
2148                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
2149                 }
2150
2151                 packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
2152
2153                 if (packet_was_received) {
2154                         droq->stats.rx_bytes_received += len;
2155                         droq->stats.rx_pkts_received++;
2156                         netdev->last_rx = jiffies;
2157                 } else {
2158                         droq->stats.rx_dropped++;
2159                         netif_info(lio, rx_err, lio->netdev,
2160                                    "droq:%d  error rx_dropped:%llu\n",
2161                                    droq->q_no, droq->stats.rx_dropped);
2162                 }
2163
2164         } else {
2165                 recv_buffer_free(skb);
2166         }
2167 }
2168
2169 /**
2170  * \brief wrapper for calling napi_schedule
2171  * @param param parameters to pass to napi_schedule
2172  *
2173  * Used when scheduling on different CPUs
2174  */
2175 static void napi_schedule_wrapper(void *param)
2176 {
2177         struct napi_struct *napi = param;
2178
2179         napi_schedule(napi);
2180 }
2181
2182 /**
2183  * \brief callback when receive interrupt occurs and we are in NAPI mode
2184  * @param arg pointer to octeon output queue
2185  */
2186 static void liquidio_napi_drv_callback(void *arg)
2187 {
2188         struct octeon_droq *droq = arg;
2189         int this_cpu = smp_processor_id();
2190
2191         if (droq->cpu_id == this_cpu) {
2192                 napi_schedule(&droq->napi);
2193         } else {
2194                 struct call_single_data *csd = &droq->csd;
2195
2196                 csd->func = napi_schedule_wrapper;
2197                 csd->info = &droq->napi;
2198                 csd->flags = 0;
2199
2200                 smp_call_function_single_async(droq->cpu_id, csd);
2201         }
2202 }
2203
2204 /**
2205  * \brief Entry point for NAPI polling
2206  * @param napi NAPI structure
2207  * @param budget maximum number of items to process
2208  */
2209 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
2210 {
2211         struct octeon_droq *droq;
2212         int work_done;
2213         int tx_done = 0, iq_no;
2214         struct octeon_instr_queue *iq;
2215         struct octeon_device *oct;
2216
2217         droq = container_of(napi, struct octeon_droq, napi);
2218         oct = droq->oct_dev;
2219         iq_no = droq->q_no;
2220         /* Handle Droq descriptors */
2221         work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
2222                                                  POLL_EVENT_PROCESS_PKTS,
2223                                                  budget);
2224
2225         /* Flush the instruction queue */
2226         iq = oct->instr_queue[iq_no];
2227         if (iq) {
2228                 /* Process iq buffers with in the budget limits */
2229                 tx_done = octeon_flush_iq(oct, iq, 1, budget);
2230                 /* Update iq read-index rather than waiting for next interrupt.
2231                  * Return back if tx_done is false.
2232                  */
2233                 update_txq_status(oct, iq_no);
2234                 /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
2235         } else {
2236                 dev_err(&oct->pci_dev->dev, "%s:  iq (%d) num invalid\n",
2237                         __func__, iq_no);
2238         }
2239
2240         if ((work_done < budget) && (tx_done)) {
2241                 napi_complete(napi);
2242                 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
2243                                              POLL_EVENT_ENABLE_INTR, 0);
2244                 return 0;
2245         }
2246
2247         return (!tx_done) ? (budget) : (work_done);
2248 }
2249
2250 /**
2251  * \brief Setup input and output queues
2252  * @param octeon_dev octeon device
2253  * @param ifidx  Interface Index
2254  *
2255  * Note: Queues are with respect to the octeon device. Thus
2256  * an input queue is for egress packets, and output queues
2257  * are for ingress packets.
2258  */
2259 static inline int setup_io_queues(struct octeon_device *octeon_dev,
2260                                   int ifidx)
2261 {
2262         struct octeon_droq_ops droq_ops;
2263         struct net_device *netdev;
2264         static int cpu_id;
2265         static int cpu_id_modulus;
2266         struct octeon_droq *droq;
2267         struct napi_struct *napi;
2268         int q, q_no, retval = 0;
2269         struct lio *lio;
2270         int num_tx_descs;
2271
2272         netdev = octeon_dev->props[ifidx].netdev;
2273
2274         lio = GET_LIO(netdev);
2275
2276         memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
2277
2278         droq_ops.fptr = liquidio_push_packet;
2279         droq_ops.farg = (void *)netdev;
2280
2281         droq_ops.poll_mode = 1;
2282         droq_ops.napi_fn = liquidio_napi_drv_callback;
2283         cpu_id = 0;
2284         cpu_id_modulus = num_present_cpus();
2285
2286         /* set up DROQs. */
2287         for (q = 0; q < lio->linfo.num_rxpciq; q++) {
2288                 q_no = lio->linfo.rxpciq[q].s.q_no;
2289                 dev_dbg(&octeon_dev->pci_dev->dev,
2290                         "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2291                         q, q_no);
2292                 retval = octeon_setup_droq(octeon_dev, q_no,
2293                                            CFG_GET_NUM_RX_DESCS_NIC_IF
2294                                                    (octeon_get_conf(octeon_dev),
2295                                                    lio->ifidx),
2296                                            CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
2297                                                    (octeon_get_conf(octeon_dev),
2298                                                    lio->ifidx), NULL);
2299                 if (retval) {
2300                         dev_err(&octeon_dev->pci_dev->dev,
2301                                 "%s : Runtime DROQ(RxQ) creation failed.\n",
2302                                 __func__);
2303                         return 1;
2304                 }
2305
2306                 droq = octeon_dev->droq[q_no];
2307                 napi = &droq->napi;
2308                 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
2309                         (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
2310                 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
2311
2312                 /* designate a CPU for this droq */
2313                 droq->cpu_id = cpu_id;
2314                 cpu_id++;
2315                 if (cpu_id >= cpu_id_modulus)
2316                         cpu_id = 0;
2317
2318                 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
2319         }
2320
2321         if (OCTEON_CN23XX_PF(octeon_dev)) {
2322                 /* 23XX PF can receive control messages (via the first PF-owned
2323                  * droq) from the firmware even if the ethX interface is down,
2324                  * so that's why poll_mode must be off for the first droq.
2325                  */
2326                 octeon_dev->droq[0]->ops.poll_mode = 0;
2327         }
2328
2329         /* set up IQs. */
2330         for (q = 0; q < lio->linfo.num_txpciq; q++) {
2331                 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
2332                                                            (octeon_dev),
2333                                                            lio->ifidx);
2334                 retval = octeon_setup_iq(octeon_dev, ifidx, q,
2335                                          lio->linfo.txpciq[q], num_tx_descs,
2336                                          netdev_get_tx_queue(netdev, q));
2337                 if (retval) {
2338                         dev_err(&octeon_dev->pci_dev->dev,
2339                                 " %s : Runtime IQ(TxQ) creation failed.\n",
2340                                 __func__);
2341                         return 1;
2342                 }
2343         }
2344
2345         return 0;
2346 }
2347
2348 /**
2349  * \brief Poll routine for checking transmit queue status
2350  * @param work work_struct data structure
2351  */
2352 static void octnet_poll_check_txq_status(struct work_struct *work)
2353 {
2354         struct cavium_wk *wk = (struct cavium_wk *)work;
2355         struct lio *lio = (struct lio *)wk->ctxptr;
2356
2357         if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
2358                 return;
2359
2360         check_txq_status(lio);
2361         queue_delayed_work(lio->txq_status_wq.wq,
2362                            &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2363 }
2364
2365 /**
2366  * \brief Sets up the txq poll check
2367  * @param netdev network device
2368  */
2369 static inline int setup_tx_poll_fn(struct net_device *netdev)
2370 {
2371         struct lio *lio = GET_LIO(netdev);
2372         struct octeon_device *oct = lio->oct_dev;
2373
2374         lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2375                                                 WQ_MEM_RECLAIM, 0);
2376         if (!lio->txq_status_wq.wq) {
2377                 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
2378                 return -1;
2379         }
2380         INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2381                           octnet_poll_check_txq_status);
2382         lio->txq_status_wq.wk.ctxptr = lio;
2383         queue_delayed_work(lio->txq_status_wq.wq,
2384                            &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2385         return 0;
2386 }
2387
2388 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2389 {
2390         struct lio *lio = GET_LIO(netdev);
2391
2392         if (lio->txq_status_wq.wq) {
2393                 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2394                 destroy_workqueue(lio->txq_status_wq.wq);
2395         }
2396 }
2397
2398 /**
2399  * \brief Net device open for LiquidIO
2400  * @param netdev network device
2401  */
2402 static int liquidio_open(struct net_device *netdev)
2403 {
2404         struct lio *lio = GET_LIO(netdev);
2405         struct octeon_device *oct = lio->oct_dev;
2406         struct napi_struct *napi, *n;
2407
2408         if (oct->props[lio->ifidx].napi_enabled == 0) {
2409                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2410                         napi_enable(napi);
2411
2412                 oct->props[lio->ifidx].napi_enabled = 1;
2413
2414                 if (OCTEON_CN23XX_PF(oct))
2415                         oct->droq[0]->ops.poll_mode = 1;
2416         }
2417
2418         oct_ptp_open(netdev);
2419
2420         ifstate_set(lio, LIO_IFSTATE_RUNNING);
2421
2422         /* Ready for link status updates */
2423         lio->intf_open = 1;
2424
2425         netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2426
2427         if (OCTEON_CN23XX_PF(oct)) {
2428                 if (!oct->msix_on)
2429                         if (setup_tx_poll_fn(netdev))
2430                                 return -1;
2431         } else {
2432                 if (setup_tx_poll_fn(netdev))
2433                         return -1;
2434         }
2435
2436         start_txq(netdev);
2437
2438         /* tell Octeon to start forwarding packets to host */
2439         send_rx_ctrl_cmd(lio, 1);
2440
2441         dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2442                  netdev->name);
2443
2444         return 0;
2445 }
2446
2447 /**
2448  * \brief Net device stop for LiquidIO
2449  * @param netdev network device
2450  */
2451 static int liquidio_stop(struct net_device *netdev)
2452 {
2453         struct lio *lio = GET_LIO(netdev);
2454         struct octeon_device *oct = lio->oct_dev;
2455
2456         ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2457
2458         netif_tx_disable(netdev);
2459
2460         /* Inform that netif carrier is down */
2461         netif_carrier_off(netdev);
2462         lio->intf_open = 0;
2463         lio->linfo.link.s.link_up = 0;
2464         lio->link_changes++;
2465
2466         /* Pause for a moment and wait for Octeon to flush out (to the wire) any
2467          * egress packets that are in-flight.
2468          */
2469         set_current_state(TASK_INTERRUPTIBLE);
2470         schedule_timeout(msecs_to_jiffies(100));
2471
2472         /* Now it should be safe to tell Octeon that nic interface is down. */
2473         send_rx_ctrl_cmd(lio, 0);
2474
2475         if (OCTEON_CN23XX_PF(oct)) {
2476                 if (!oct->msix_on)
2477                         cleanup_tx_poll_fn(netdev);
2478         } else {
2479                 cleanup_tx_poll_fn(netdev);
2480         }
2481
2482         if (lio->ptp_clock) {
2483                 ptp_clock_unregister(lio->ptp_clock);
2484                 lio->ptp_clock = NULL;
2485         }
2486
2487         dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
2488
2489         return 0;
2490 }
2491
2492 /**
2493  * \brief Converts a mask based on net device flags
2494  * @param netdev network device
2495  *
2496  * This routine generates a octnet_ifflags mask from the net device flags
2497  * received from the OS.
2498  */
2499 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2500 {
2501         enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2502
2503         if (netdev->flags & IFF_PROMISC)
2504                 f |= OCTNET_IFFLAG_PROMISC;
2505
2506         if (netdev->flags & IFF_ALLMULTI)
2507                 f |= OCTNET_IFFLAG_ALLMULTI;
2508
2509         if (netdev->flags & IFF_MULTICAST) {
2510                 f |= OCTNET_IFFLAG_MULTICAST;
2511
2512                 /* Accept all multicast addresses if there are more than we
2513                  * can handle
2514                  */
2515                 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2516                         f |= OCTNET_IFFLAG_ALLMULTI;
2517         }
2518
2519         if (netdev->flags & IFF_BROADCAST)
2520                 f |= OCTNET_IFFLAG_BROADCAST;
2521
2522         return f;
2523 }
2524
2525 /**
2526  * \brief Net device set_multicast_list
2527  * @param netdev network device
2528  */
2529 static void liquidio_set_mcast_list(struct net_device *netdev)
2530 {
2531         struct lio *lio = GET_LIO(netdev);
2532         struct octeon_device *oct = lio->oct_dev;
2533         struct octnic_ctrl_pkt nctrl;
2534         struct netdev_hw_addr *ha;
2535         u64 *mc;
2536         int ret;
2537         int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2538
2539         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2540
2541         /* Create a ctrl pkt command to be sent to core app. */
2542         nctrl.ncmd.u64 = 0;
2543         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
2544         nctrl.ncmd.s.param1 = get_new_flags(netdev);
2545         nctrl.ncmd.s.param2 = mc_count;
2546         nctrl.ncmd.s.more = mc_count;
2547         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2548         nctrl.netpndev = (u64)netdev;
2549         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2550
2551         /* copy all the addresses into the udd */
2552         mc = &nctrl.udd[0];
2553         netdev_for_each_mc_addr(ha, netdev) {
2554                 *mc = 0;
2555                 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2556                 /* no need to swap bytes */
2557
2558                 if (++mc > &nctrl.udd[mc_count])
2559                         break;
2560         }
2561
2562         /* Apparently, any activity in this call from the kernel has to
2563          * be atomic. So we won't wait for response.
2564          */
2565         nctrl.wait_time = 0;
2566
2567         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2568         if (ret < 0) {
2569                 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2570                         ret);
2571         }
2572 }
2573
2574 /**
2575  * \brief Net device set_mac_address
2576  * @param netdev network device
2577  */
2578 static int liquidio_set_mac(struct net_device *netdev, void *p)
2579 {
2580         int ret = 0;
2581         struct lio *lio = GET_LIO(netdev);
2582         struct octeon_device *oct = lio->oct_dev;
2583         struct sockaddr *addr = (struct sockaddr *)p;
2584         struct octnic_ctrl_pkt nctrl;
2585
2586         if (!is_valid_ether_addr(addr->sa_data))
2587                 return -EADDRNOTAVAIL;
2588
2589         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2590
2591         nctrl.ncmd.u64 = 0;
2592         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2593         nctrl.ncmd.s.param1 = 0;
2594         nctrl.ncmd.s.more = 1;
2595         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2596         nctrl.netpndev = (u64)netdev;
2597         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2598         nctrl.wait_time = 100;
2599
2600         nctrl.udd[0] = 0;
2601         /* The MAC Address is presented in network byte order. */
2602         memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2603
2604         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2605         if (ret < 0) {
2606                 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2607                 return -ENOMEM;
2608         }
2609         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2610         memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2611
2612         return 0;
2613 }
2614
2615 /**
2616  * \brief Net device get_stats
2617  * @param netdev network device
2618  */
2619 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2620 {
2621         struct lio *lio = GET_LIO(netdev);
2622         struct net_device_stats *stats = &netdev->stats;
2623         struct octeon_device *oct;
2624         u64 pkts = 0, drop = 0, bytes = 0;
2625         struct oct_droq_stats *oq_stats;
2626         struct oct_iq_stats *iq_stats;
2627         int i, iq_no, oq_no;
2628
2629         oct = lio->oct_dev;
2630
2631         for (i = 0; i < lio->linfo.num_txpciq; i++) {
2632                 iq_no = lio->linfo.txpciq[i].s.q_no;
2633                 iq_stats = &oct->instr_queue[iq_no]->stats;
2634                 pkts += iq_stats->tx_done;
2635                 drop += iq_stats->tx_dropped;
2636                 bytes += iq_stats->tx_tot_bytes;
2637         }
2638
2639         stats->tx_packets = pkts;
2640         stats->tx_bytes = bytes;
2641         stats->tx_dropped = drop;
2642
2643         pkts = 0;
2644         drop = 0;
2645         bytes = 0;
2646
2647         for (i = 0; i < lio->linfo.num_rxpciq; i++) {
2648                 oq_no = lio->linfo.rxpciq[i].s.q_no;
2649                 oq_stats = &oct->droq[oq_no]->stats;
2650                 pkts += oq_stats->rx_pkts_received;
2651                 drop += (oq_stats->rx_dropped +
2652                          oq_stats->dropped_nodispatch +
2653                          oq_stats->dropped_toomany +
2654                          oq_stats->dropped_nomem);
2655                 bytes += oq_stats->rx_bytes_received;
2656         }
2657
2658         stats->rx_bytes = bytes;
2659         stats->rx_packets = pkts;
2660         stats->rx_dropped = drop;
2661
2662         return stats;
2663 }
2664
2665 /**
2666  * \brief Net device change_mtu
2667  * @param netdev network device
2668  */
2669 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2670 {
2671         struct lio *lio = GET_LIO(netdev);
2672         struct octeon_device *oct = lio->oct_dev;
2673         struct octnic_ctrl_pkt nctrl;
2674         int ret = 0;
2675
2676         /* Limit the MTU to make sure the ethernet packets are between 68 bytes
2677          * and 16000 bytes
2678          */
2679         if ((new_mtu < LIO_MIN_MTU_SIZE) ||
2680             (new_mtu > LIO_MAX_MTU_SIZE)) {
2681                 dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
2682                 dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
2683                         LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
2684                 return -EINVAL;
2685         }
2686
2687         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2688
2689         nctrl.ncmd.u64 = 0;
2690         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
2691         nctrl.ncmd.s.param1 = new_mtu;
2692         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2693         nctrl.wait_time = 100;
2694         nctrl.netpndev = (u64)netdev;
2695         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2696
2697         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2698         if (ret < 0) {
2699                 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
2700                 return -1;
2701         }
2702
2703         lio->mtu = new_mtu;
2704
2705         return 0;
2706 }
2707
2708 /**
2709  * \brief Handler for SIOCSHWTSTAMP ioctl
2710  * @param netdev network device
2711  * @param ifr interface request
2712  * @param cmd command
2713  */
2714 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2715 {
2716         struct hwtstamp_config conf;
2717         struct lio *lio = GET_LIO(netdev);
2718
2719         if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2720                 return -EFAULT;
2721
2722         if (conf.flags)
2723                 return -EINVAL;
2724
2725         switch (conf.tx_type) {
2726         case HWTSTAMP_TX_ON:
2727         case HWTSTAMP_TX_OFF:
2728                 break;
2729         default:
2730                 return -ERANGE;
2731         }
2732
2733         switch (conf.rx_filter) {
2734         case HWTSTAMP_FILTER_NONE:
2735                 break;
2736         case HWTSTAMP_FILTER_ALL:
2737         case HWTSTAMP_FILTER_SOME:
2738         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2739         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2740         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2741         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2742         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2743         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2744         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2745         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2746         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2747         case HWTSTAMP_FILTER_PTP_V2_EVENT:
2748         case HWTSTAMP_FILTER_PTP_V2_SYNC:
2749         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2750                 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2751                 break;
2752         default:
2753                 return -ERANGE;
2754         }
2755
2756         if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2757                 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2758
2759         else
2760                 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2761
2762         return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2763 }
2764
2765 /**
2766  * \brief ioctl handler
2767  * @param netdev network device
2768  * @param ifr interface request
2769  * @param cmd command
2770  */
2771 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2772 {
2773         switch (cmd) {
2774         case SIOCSHWTSTAMP:
2775                 return hwtstamp_ioctl(netdev, ifr);
2776         default:
2777                 return -EOPNOTSUPP;
2778         }
2779 }
2780
2781 /**
2782  * \brief handle a Tx timestamp response
2783  * @param status response status
2784  * @param buf pointer to skb
2785  */
2786 static void handle_timestamp(struct octeon_device *oct,
2787                              u32 status,
2788                              void *buf)
2789 {
2790         struct octnet_buf_free_info *finfo;
2791         struct octeon_soft_command *sc;
2792         struct oct_timestamp_resp *resp;
2793         struct lio *lio;
2794         struct sk_buff *skb = (struct sk_buff *)buf;
2795
2796         finfo = (struct octnet_buf_free_info *)skb->cb;
2797         lio = finfo->lio;
2798         sc = finfo->sc;
2799         oct = lio->oct_dev;
2800         resp = (struct oct_timestamp_resp *)sc->virtrptr;
2801
2802         if (status != OCTEON_REQUEST_DONE) {
2803                 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2804                         CVM_CAST64(status));
2805                 resp->timestamp = 0;
2806         }
2807
2808         octeon_swap_8B_data(&resp->timestamp, 1);
2809
2810         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2811                 struct skb_shared_hwtstamps ts;
2812                 u64 ns = resp->timestamp;
2813
2814                 netif_info(lio, tx_done, lio->netdev,
2815                            "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2816                            skb, (unsigned long long)ns);
2817                 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2818                 skb_tstamp_tx(skb, &ts);
2819         }
2820
2821         octeon_free_soft_command(oct, sc);
2822         tx_buffer_free(skb);
2823 }
2824
2825 /* \brief Send a data packet that will be timestamped
2826  * @param oct octeon device
2827  * @param ndata pointer to network data
2828  * @param finfo pointer to private network data
2829  */
2830 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2831                                          struct octnic_data_pkt *ndata,
2832                                          struct octnet_buf_free_info *finfo)
2833 {
2834         int retval;
2835         struct octeon_soft_command *sc;
2836         struct lio *lio;
2837         int ring_doorbell;
2838         u32 len;
2839
2840         lio = finfo->lio;
2841
2842         sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2843                                             sizeof(struct oct_timestamp_resp));
2844         finfo->sc = sc;
2845
2846         if (!sc) {
2847                 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2848                 return IQ_SEND_FAILED;
2849         }
2850
2851         if (ndata->reqtype == REQTYPE_NORESP_NET)
2852                 ndata->reqtype = REQTYPE_RESP_NET;
2853         else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2854                 ndata->reqtype = REQTYPE_RESP_NET_SG;
2855
2856         sc->callback = handle_timestamp;
2857         sc->callback_arg = finfo->skb;
2858         sc->iq_no = ndata->q_no;
2859
2860         len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
2861
2862         ring_doorbell = 1;
2863         retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2864                                      sc, len, ndata->reqtype);
2865
2866         if (retval == IQ_SEND_FAILED) {
2867                 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2868                         retval);
2869                 octeon_free_soft_command(oct, sc);
2870         } else {
2871                 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2872         }
2873
2874         return retval;
2875 }
2876
2877 /** \brief Transmit networks packets to the Octeon interface
2878  * @param skbuff   skbuff struct to be passed to network layer.
2879  * @param netdev    pointer to network device
2880  * @returns whether the packet was transmitted to the device okay or not
2881  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2882  */
2883 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2884 {
2885         struct lio *lio;
2886         struct octnet_buf_free_info *finfo;
2887         union octnic_cmd_setup cmdsetup;
2888         struct octnic_data_pkt ndata;
2889         struct octeon_device *oct;
2890         struct oct_iq_stats *stats;
2891         struct octeon_instr_irh *irh;
2892         union tx_info *tx_info;
2893         int status = 0;
2894         int q_idx = 0, iq_no = 0;
2895         int j;
2896         u64 dptr = 0;
2897         u32 tag = 0;
2898
2899         lio = GET_LIO(netdev);
2900         oct = lio->oct_dev;
2901
2902         if (netif_is_multiqueue(netdev)) {
2903                 q_idx = skb->queue_mapping;
2904                 q_idx = (q_idx % (lio->linfo.num_txpciq));
2905                 tag = q_idx;
2906                 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2907         } else {
2908                 iq_no = lio->txq;
2909         }
2910
2911         stats = &oct->instr_queue[iq_no]->stats;
2912
2913         /* Check for all conditions in which the current packet cannot be
2914          * transmitted.
2915          */
2916         if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2917             (!lio->linfo.link.s.link_up) ||
2918             (skb->len <= 0)) {
2919                 netif_info(lio, tx_err, lio->netdev,
2920                            "Transmit failed link_status : %d\n",
2921                            lio->linfo.link.s.link_up);
2922                 goto lio_xmit_failed;
2923         }
2924
2925         /* Use space in skb->cb to store info used to unmap and
2926          * free the buffers.
2927          */
2928         finfo = (struct octnet_buf_free_info *)skb->cb;
2929         finfo->lio = lio;
2930         finfo->skb = skb;
2931         finfo->sc = NULL;
2932
2933         /* Prepare the attributes for the data to be passed to OSI. */
2934         memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2935
2936         ndata.buf = (void *)finfo;
2937
2938         ndata.q_no = iq_no;
2939
2940         if (netif_is_multiqueue(netdev)) {
2941                 if (octnet_iq_is_full(oct, ndata.q_no)) {
2942                         /* defer sending if queue is full */
2943                         netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2944                                    ndata.q_no);
2945                         stats->tx_iq_busy++;
2946                         return NETDEV_TX_BUSY;
2947                 }
2948         } else {
2949                 if (octnet_iq_is_full(oct, lio->txq)) {
2950                         /* defer sending if queue is full */
2951                         stats->tx_iq_busy++;
2952                         netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2953                                    lio->txq);
2954                         return NETDEV_TX_BUSY;
2955                 }
2956         }
2957         /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2958          *      lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2959          */
2960
2961         ndata.datasize = skb->len;
2962
2963         cmdsetup.u64 = 0;
2964         cmdsetup.s.iq_no = iq_no;
2965
2966         if (skb->ip_summed == CHECKSUM_PARTIAL) {
2967                 if (skb->encapsulation) {
2968                         cmdsetup.s.tnl_csum = 1;
2969                         stats->tx_vxlan++;
2970                 } else {
2971                         cmdsetup.s.transport_csum = 1;
2972                 }
2973         }
2974         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2975                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2976                 cmdsetup.s.timestamp = 1;
2977         }
2978
2979         if (skb_shinfo(skb)->nr_frags == 0) {
2980                 cmdsetup.s.u.datasize = skb->len;
2981                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2982
2983                 /* Offload checksum calculation for TCP/UDP packets */
2984                 dptr = dma_map_single(&oct->pci_dev->dev,
2985                                       skb->data,
2986                                       skb->len,
2987                                       DMA_TO_DEVICE);
2988                 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2989                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2990                                 __func__);
2991                         return NETDEV_TX_BUSY;
2992                 }
2993
2994                 ndata.cmd.cmd2.dptr = dptr;
2995                 finfo->dptr = dptr;
2996                 ndata.reqtype = REQTYPE_NORESP_NET;
2997
2998         } else {
2999                 int i, frags;
3000                 struct skb_frag_struct *frag;
3001                 struct octnic_gather *g;
3002
3003                 spin_lock(&lio->glist_lock[q_idx]);
3004                 g = (struct octnic_gather *)
3005                         list_delete_head(&lio->glist[q_idx]);
3006                 spin_unlock(&lio->glist_lock[q_idx]);
3007
3008                 if (!g) {
3009                         netif_info(lio, tx_err, lio->netdev,
3010                                    "Transmit scatter gather: glist null!\n");
3011                         goto lio_xmit_failed;
3012                 }
3013
3014                 cmdsetup.s.gather = 1;
3015                 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
3016                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
3017
3018                 memset(g->sg, 0, g->sg_size);
3019
3020                 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
3021                                                  skb->data,
3022                                                  (skb->len - skb->data_len),
3023                                                  DMA_TO_DEVICE);
3024                 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
3025                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
3026                                 __func__);
3027                         return NETDEV_TX_BUSY;
3028                 }
3029                 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
3030
3031                 frags = skb_shinfo(skb)->nr_frags;
3032                 i = 1;
3033                 while (frags--) {
3034                         frag = &skb_shinfo(skb)->frags[i - 1];
3035
3036                         g->sg[(i >> 2)].ptr[(i & 3)] =
3037                                 dma_map_page(&oct->pci_dev->dev,
3038                                              frag->page.p,
3039                                              frag->page_offset,
3040                                              frag->size,
3041                                              DMA_TO_DEVICE);
3042
3043                         if (dma_mapping_error(&oct->pci_dev->dev,
3044                                               g->sg[i >> 2].ptr[i & 3])) {
3045                                 dma_unmap_single(&oct->pci_dev->dev,
3046                                                  g->sg[0].ptr[0],
3047                                                  skb->len - skb->data_len,
3048                                                  DMA_TO_DEVICE);
3049                                 for (j = 1; j < i; j++) {
3050                                         frag = &skb_shinfo(skb)->frags[j - 1];
3051                                         dma_unmap_page(&oct->pci_dev->dev,
3052                                                        g->sg[j >> 2].ptr[j & 3],
3053                                                        frag->size,
3054                                                        DMA_TO_DEVICE);
3055                                 }
3056                                 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
3057                                         __func__);
3058                                 return NETDEV_TX_BUSY;
3059                         }
3060
3061                         add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
3062                         i++;
3063                 }
3064
3065                 dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
3066                                            g->sg_size, DMA_TO_DEVICE);
3067                 dptr = g->sg_dma_ptr;
3068
3069                 ndata.cmd.cmd2.dptr = dptr;
3070                 finfo->dptr = dptr;
3071                 finfo->g = g;
3072
3073                 ndata.reqtype = REQTYPE_NORESP_NET_SG;
3074         }
3075
3076         irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
3077         tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
3078
3079         if (skb_shinfo(skb)->gso_size) {
3080                 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
3081                 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
3082                 stats->tx_gso++;
3083         }
3084
3085         /* HW insert VLAN tag */
3086         if (skb_vlan_tag_present(skb)) {
3087                 irh->priority = skb_vlan_tag_get(skb) >> 13;
3088                 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
3089         }
3090
3091         if (unlikely(cmdsetup.s.timestamp))
3092                 status = send_nic_timestamp_pkt(oct, &ndata, finfo);
3093         else
3094                 status = octnet_send_nic_data_pkt(oct, &ndata);
3095         if (status == IQ_SEND_FAILED)
3096                 goto lio_xmit_failed;
3097
3098         netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
3099
3100         if (status == IQ_SEND_STOP)
3101                 stop_q(lio->netdev, q_idx);
3102
3103         netif_trans_update(netdev);
3104
3105         if (skb_shinfo(skb)->gso_size)
3106                 stats->tx_done += skb_shinfo(skb)->gso_segs;
3107         else
3108                 stats->tx_done++;
3109         stats->tx_tot_bytes += skb->len;
3110
3111         return NETDEV_TX_OK;
3112
3113 lio_xmit_failed:
3114         stats->tx_dropped++;
3115         netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
3116                    iq_no, stats->tx_dropped);
3117         if (dptr)
3118                 dma_unmap_single(&oct->pci_dev->dev, dptr,
3119                                  ndata.datasize, DMA_TO_DEVICE);
3120         tx_buffer_free(skb);
3121         return NETDEV_TX_OK;
3122 }
3123
3124 /** \brief Network device Tx timeout
3125  * @param netdev    pointer to network device
3126  */
3127 static void liquidio_tx_timeout(struct net_device *netdev)
3128 {
3129         struct lio *lio;
3130
3131         lio = GET_LIO(netdev);
3132
3133         netif_info(lio, tx_err, lio->netdev,
3134                    "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
3135                    netdev->stats.tx_dropped);
3136         netif_trans_update(netdev);
3137         txqs_wake(netdev);
3138 }
3139
3140 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
3141                                     __be16 proto __attribute__((unused)),
3142                                     u16 vid)
3143 {
3144         struct lio *lio = GET_LIO(netdev);
3145         struct octeon_device *oct = lio->oct_dev;
3146         struct octnic_ctrl_pkt nctrl;
3147         int ret = 0;
3148
3149         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3150
3151         nctrl.ncmd.u64 = 0;
3152         nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3153         nctrl.ncmd.s.param1 = vid;
3154         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3155         nctrl.wait_time = 100;
3156         nctrl.netpndev = (u64)netdev;
3157         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3158
3159         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3160         if (ret < 0) {
3161                 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3162                         ret);
3163         }
3164
3165         return ret;
3166 }
3167
3168 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
3169                                      __be16 proto __attribute__((unused)),
3170                                      u16 vid)
3171 {
3172         struct lio *lio = GET_LIO(netdev);
3173         struct octeon_device *oct = lio->oct_dev;
3174         struct octnic_ctrl_pkt nctrl;
3175         int ret = 0;
3176
3177         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3178
3179         nctrl.ncmd.u64 = 0;
3180         nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3181         nctrl.ncmd.s.param1 = vid;
3182         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3183         nctrl.wait_time = 100;
3184         nctrl.netpndev = (u64)netdev;
3185         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3186
3187         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3188         if (ret < 0) {
3189                 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3190                         ret);
3191         }
3192         return ret;
3193 }
3194
3195 /** Sending command to enable/disable RX checksum offload
3196  * @param netdev                pointer to network device
3197  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
3198  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
3199  *                              OCTNET_CMD_RXCSUM_DISABLE
3200  * @returns                     SUCCESS or FAILURE
3201  */
3202 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
3203                                        u8 rx_cmd)
3204 {
3205         struct lio *lio = GET_LIO(netdev);
3206         struct octeon_device *oct = lio->oct_dev;
3207         struct octnic_ctrl_pkt nctrl;
3208         int ret = 0;
3209
3210         nctrl.ncmd.u64 = 0;
3211         nctrl.ncmd.s.cmd = command;
3212         nctrl.ncmd.s.param1 = rx_cmd;
3213         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3214         nctrl.wait_time = 100;
3215         nctrl.netpndev = (u64)netdev;
3216         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3217
3218         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3219         if (ret < 0) {
3220                 dev_err(&oct->pci_dev->dev,
3221                         "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3222                         ret);
3223         }
3224         return ret;
3225 }
3226
3227 /** Sending command to add/delete VxLAN UDP port to firmware
3228  * @param netdev                pointer to network device
3229  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
3230  * @param vxlan_port            VxLAN port to be added or deleted
3231  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
3232  *                              OCTNET_CMD_VXLAN_PORT_DEL
3233  * @returns                     SUCCESS or FAILURE
3234  */
3235 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
3236                                        u16 vxlan_port, u8 vxlan_cmd_bit)
3237 {
3238         struct lio *lio = GET_LIO(netdev);
3239         struct octeon_device *oct = lio->oct_dev;
3240         struct octnic_ctrl_pkt nctrl;
3241         int ret = 0;
3242
3243         nctrl.ncmd.u64 = 0;
3244         nctrl.ncmd.s.cmd = command;
3245         nctrl.ncmd.s.more = vxlan_cmd_bit;
3246         nctrl.ncmd.s.param1 = vxlan_port;
3247         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3248         nctrl.wait_time = 100;
3249         nctrl.netpndev = (u64)netdev;
3250         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3251
3252         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3253         if (ret < 0) {
3254                 dev_err(&oct->pci_dev->dev,
3255                         "VxLAN port add/delete failed in core (ret:0x%x)\n",
3256                         ret);
3257         }
3258         return ret;
3259 }
3260
3261 /** \brief Net device fix features
3262  * @param netdev  pointer to network device
3263  * @param request features requested
3264  * @returns updated features list
3265  */
3266 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
3267                                                netdev_features_t request)
3268 {
3269         struct lio *lio = netdev_priv(netdev);
3270
3271         if ((request & NETIF_F_RXCSUM) &&
3272             !(lio->dev_capability & NETIF_F_RXCSUM))
3273                 request &= ~NETIF_F_RXCSUM;
3274
3275         if ((request & NETIF_F_HW_CSUM) &&
3276             !(lio->dev_capability & NETIF_F_HW_CSUM))
3277                 request &= ~NETIF_F_HW_CSUM;
3278
3279         if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
3280                 request &= ~NETIF_F_TSO;
3281
3282         if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
3283                 request &= ~NETIF_F_TSO6;
3284
3285         if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
3286                 request &= ~NETIF_F_LRO;
3287
3288         /*Disable LRO if RXCSUM is off */
3289         if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
3290             (lio->dev_capability & NETIF_F_LRO))
3291                 request &= ~NETIF_F_LRO;
3292
3293         return request;
3294 }
3295
3296 /** \brief Net device set features
3297  * @param netdev  pointer to network device
3298  * @param features features to enable/disable
3299  */
3300 static int liquidio_set_features(struct net_device *netdev,
3301                                  netdev_features_t features)
3302 {
3303         struct lio *lio = netdev_priv(netdev);
3304
3305         if (!((netdev->features ^ features) & NETIF_F_LRO))
3306                 return 0;
3307
3308         if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
3309                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3310                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3311         else if (!(features & NETIF_F_LRO) &&
3312                  (lio->dev_capability & NETIF_F_LRO))
3313                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
3314                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3315
3316         /* Sending command to firmware to enable/disable RX checksum
3317          * offload settings using ethtool
3318          */
3319         if (!(netdev->features & NETIF_F_RXCSUM) &&
3320             (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3321             (features & NETIF_F_RXCSUM))
3322                 liquidio_set_rxcsum_command(netdev,
3323                                             OCTNET_CMD_TNL_RX_CSUM_CTL,
3324                                             OCTNET_CMD_RXCSUM_ENABLE);
3325         else if ((netdev->features & NETIF_F_RXCSUM) &&
3326                  (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3327                  !(features & NETIF_F_RXCSUM))
3328                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3329                                             OCTNET_CMD_RXCSUM_DISABLE);
3330
3331         return 0;
3332 }
3333
3334 static void liquidio_add_vxlan_port(struct net_device *netdev,
3335                                     struct udp_tunnel_info *ti)
3336 {
3337         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3338                 return;
3339
3340         liquidio_vxlan_port_command(netdev,
3341                                     OCTNET_CMD_VXLAN_PORT_CONFIG,
3342                                     htons(ti->port),
3343                                     OCTNET_CMD_VXLAN_PORT_ADD);
3344 }
3345
3346 static void liquidio_del_vxlan_port(struct net_device *netdev,
3347                                     struct udp_tunnel_info *ti)
3348 {
3349         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3350                 return;
3351
3352         liquidio_vxlan_port_command(netdev,
3353                                     OCTNET_CMD_VXLAN_PORT_CONFIG,
3354                                     htons(ti->port),
3355                                     OCTNET_CMD_VXLAN_PORT_DEL);
3356 }
3357
3358 static struct net_device_ops lionetdevops = {
3359         .ndo_open               = liquidio_open,
3360         .ndo_stop               = liquidio_stop,
3361         .ndo_start_xmit         = liquidio_xmit,
3362         .ndo_get_stats          = liquidio_get_stats,
3363         .ndo_set_mac_address    = liquidio_set_mac,
3364         .ndo_set_rx_mode        = liquidio_set_mcast_list,
3365         .ndo_tx_timeout         = liquidio_tx_timeout,
3366
3367         .ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3368         .ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3369         .ndo_change_mtu         = liquidio_change_mtu,
3370         .ndo_do_ioctl           = liquidio_ioctl,
3371         .ndo_fix_features       = liquidio_fix_features,
3372         .ndo_set_features       = liquidio_set_features,
3373         .ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
3374         .ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
3375 };
3376
3377 /** \brief Entry point for the liquidio module
3378  */
3379 static int __init liquidio_init(void)
3380 {
3381         int i;
3382         struct handshake *hs;
3383
3384         init_completion(&first_stage);
3385
3386         octeon_init_device_list(conf_type);
3387
3388         if (liquidio_init_pci())
3389                 return -EINVAL;
3390
3391         wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3392
3393         for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3394                 hs = &handshake[i];
3395                 if (hs->pci_dev) {
3396                         wait_for_completion(&hs->init);
3397                         if (!hs->init_ok) {
3398                                 /* init handshake failed */
3399                                 dev_err(&hs->pci_dev->dev,
3400                                         "Failed to init device\n");
3401                                 liquidio_deinit_pci();
3402                                 return -EIO;
3403                         }
3404                 }
3405         }
3406
3407         for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3408                 hs = &handshake[i];
3409                 if (hs->pci_dev) {
3410                         wait_for_completion_timeout(&hs->started,
3411                                                     msecs_to_jiffies(30000));
3412                         if (!hs->started_ok) {
3413                                 /* starter handshake failed */
3414                                 dev_err(&hs->pci_dev->dev,
3415                                         "Firmware failed to start\n");
3416                                 liquidio_deinit_pci();
3417                                 return -EIO;
3418                         }
3419                 }
3420         }
3421
3422         return 0;
3423 }
3424
3425 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3426 {
3427         struct octeon_device *oct = (struct octeon_device *)buf;
3428         struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3429         int gmxport = 0;
3430         union oct_link_status *ls;
3431         int i;
3432
3433         if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
3434                 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3435                         recv_pkt->buffer_size[0],
3436                         recv_pkt->rh.r_nic_info.gmxport);
3437                 goto nic_info_err;
3438         }
3439
3440         gmxport = recv_pkt->rh.r_nic_info.gmxport;
3441         ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
3442
3443         octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3444         for (i = 0; i < oct->ifcount; i++) {
3445                 if (oct->props[i].gmxport == gmxport) {
3446                         update_link_status(oct->props[i].netdev, ls);
3447                         break;
3448                 }
3449         }
3450
3451 nic_info_err:
3452         for (i = 0; i < recv_pkt->buffer_count; i++)
3453                 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3454         octeon_free_recv_info(recv_info);
3455         return 0;
3456 }
3457
3458 /**
3459  * \brief Setup network interfaces
3460  * @param octeon_dev  octeon device
3461  *
3462  * Called during init time for each device. It assumes the NIC
3463  * is already up and running.  The link information for each
3464  * interface is passed in link_info.
3465  */
3466 static int setup_nic_devices(struct octeon_device *octeon_dev)
3467 {
3468         struct lio *lio = NULL;
3469         struct net_device *netdev;
3470         u8 mac[6], i, j;
3471         struct octeon_soft_command *sc;
3472         struct liquidio_if_cfg_context *ctx;
3473         struct liquidio_if_cfg_resp *resp;
3474         struct octdev_props *props;
3475         int retval, num_iqueues, num_oqueues;
3476         union oct_nic_if_cfg if_cfg;
3477         unsigned int base_queue;
3478         unsigned int gmx_port_id;
3479         u32 resp_size, ctx_size, data_size;
3480         u32 ifidx_or_pfnum;
3481         struct lio_version *vdata;
3482
3483         /* This is to handle link status changes */
3484         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3485                                     OPCODE_NIC_INFO,
3486                                     lio_nic_info, octeon_dev);
3487
3488         /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3489          * They are handled directly.
3490          */
3491         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3492                                         free_netbuf);
3493
3494         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3495                                         free_netsgbuf);
3496
3497         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3498                                         free_netsgbuf_with_resp);
3499
3500         for (i = 0; i < octeon_dev->ifcount; i++) {
3501                 resp_size = sizeof(struct liquidio_if_cfg_resp);
3502                 ctx_size = sizeof(struct liquidio_if_cfg_context);
3503                 data_size = sizeof(struct lio_version);
3504                 sc = (struct octeon_soft_command *)
3505                         octeon_alloc_soft_command(octeon_dev, data_size,
3506                                                   resp_size, ctx_size);
3507                 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3508                 ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
3509                 vdata = (struct lio_version *)sc->virtdptr;
3510
3511                 *((u64 *)vdata) = 0;
3512                 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3513                 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3514                 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3515
3516                 if (OCTEON_CN23XX_PF(octeon_dev)) {
3517                         num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3518                         num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3519                         base_queue = octeon_dev->sriov_info.pf_srn;
3520
3521                         gmx_port_id = octeon_dev->pf_num;
3522                         ifidx_or_pfnum = octeon_dev->pf_num;
3523                 } else {
3524                         num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3525                                                 octeon_get_conf(octeon_dev), i);
3526                         num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3527                                                 octeon_get_conf(octeon_dev), i);
3528                         base_queue = CFG_GET_BASE_QUE_NIC_IF(
3529                                                 octeon_get_conf(octeon_dev), i);
3530                         gmx_port_id = CFG_GET_GMXID_NIC_IF(
3531                                                 octeon_get_conf(octeon_dev), i);
3532                         ifidx_or_pfnum = i;
3533                 }
3534
3535                 dev_dbg(&octeon_dev->pci_dev->dev,
3536                         "requesting config for interface %d, iqs %d, oqs %d\n",
3537                         ifidx_or_pfnum, num_iqueues, num_oqueues);
3538                 WRITE_ONCE(ctx->cond, 0);
3539                 ctx->octeon_id = lio_get_device_id(octeon_dev);
3540                 init_waitqueue_head(&ctx->wc);
3541
3542                 if_cfg.u64 = 0;
3543                 if_cfg.s.num_iqueues = num_iqueues;
3544                 if_cfg.s.num_oqueues = num_oqueues;
3545                 if_cfg.s.base_queue = base_queue;
3546                 if_cfg.s.gmx_port_id = gmx_port_id;
3547
3548                 sc->iq_no = 0;
3549
3550                 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3551                                             OPCODE_NIC_IF_CFG, 0,
3552                                             if_cfg.u64, 0);
3553
3554                 sc->callback = if_cfg_callback;
3555                 sc->callback_arg = sc;
3556                 sc->wait_time = 3000;
3557
3558                 retval = octeon_send_soft_command(octeon_dev, sc);
3559                 if (retval == IQ_SEND_FAILED) {
3560                         dev_err(&octeon_dev->pci_dev->dev,
3561                                 "iq/oq config failed status: %x\n",
3562                                 retval);
3563                         /* Soft instr is freed by driver in case of failure. */
3564                         goto setup_nic_dev_fail;
3565                 }
3566
3567                 /* Sleep on a wait queue till the cond flag indicates that the
3568                  * response arrived or timed-out.
3569                  */
3570                 sleep_cond(&ctx->wc, &ctx->cond);
3571                 retval = resp->status;
3572                 if (retval) {
3573                         dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3574                         goto setup_nic_dev_fail;
3575                 }
3576
3577                 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3578                                     (sizeof(struct liquidio_if_cfg_info)) >> 3);
3579
3580                 num_iqueues = hweight64(resp->cfg_info.iqmask);
3581                 num_oqueues = hweight64(resp->cfg_info.oqmask);
3582
3583                 if (!(num_iqueues) || !(num_oqueues)) {
3584                         dev_err(&octeon_dev->pci_dev->dev,
3585                                 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3586                                 resp->cfg_info.iqmask,
3587                                 resp->cfg_info.oqmask);
3588                         goto setup_nic_dev_fail;
3589                 }
3590                 dev_dbg(&octeon_dev->pci_dev->dev,
3591                         "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3592                         i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3593                         num_iqueues, num_oqueues);
3594                 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
3595
3596                 if (!netdev) {
3597                         dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3598                         goto setup_nic_dev_fail;
3599                 }
3600
3601                 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3602
3603                 if (num_iqueues > 1)
3604                         lionetdevops.ndo_select_queue = select_q;
3605
3606                 /* Associate the routines that will handle different
3607                  * netdev tasks.
3608                  */
3609                 netdev->netdev_ops = &lionetdevops;
3610
3611                 lio = GET_LIO(netdev);
3612
3613                 memset(lio, 0, sizeof(struct lio));
3614
3615                 lio->ifidx = ifidx_or_pfnum;
3616
3617                 props = &octeon_dev->props[i];
3618                 props->gmxport = resp->cfg_info.linfo.gmxport;
3619                 props->netdev = netdev;
3620
3621                 lio->linfo.num_rxpciq = num_oqueues;
3622                 lio->linfo.num_txpciq = num_iqueues;
3623                 for (j = 0; j < num_oqueues; j++) {
3624                         lio->linfo.rxpciq[j].u64 =
3625                                 resp->cfg_info.linfo.rxpciq[j].u64;
3626                 }
3627                 for (j = 0; j < num_iqueues; j++) {
3628                         lio->linfo.txpciq[j].u64 =
3629                                 resp->cfg_info.linfo.txpciq[j].u64;
3630                 }
3631                 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3632                 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3633                 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3634
3635                 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3636
3637                 if (OCTEON_CN23XX_PF(octeon_dev) ||
3638                     OCTEON_CN6XXX(octeon_dev)) {
3639                         lio->dev_capability = NETIF_F_HIGHDMA
3640                                               | NETIF_F_IP_CSUM
3641                                               | NETIF_F_IPV6_CSUM
3642                                               | NETIF_F_SG | NETIF_F_RXCSUM
3643                                               | NETIF_F_GRO
3644                                               | NETIF_F_TSO | NETIF_F_TSO6
3645                                               | NETIF_F_LRO;
3646                 }
3647                 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3648
3649                 /*  Copy of transmit encapsulation capabilities:
3650                  *  TSO, TSO6, Checksums for this device
3651                  */
3652                 lio->enc_dev_capability = NETIF_F_IP_CSUM
3653                                           | NETIF_F_IPV6_CSUM
3654                                           | NETIF_F_GSO_UDP_TUNNEL
3655                                           | NETIF_F_HW_CSUM | NETIF_F_SG
3656                                           | NETIF_F_RXCSUM
3657                                           | NETIF_F_TSO | NETIF_F_TSO6
3658                                           | NETIF_F_LRO;
3659
3660                 netdev->hw_enc_features = (lio->enc_dev_capability &
3661                                            ~NETIF_F_LRO);
3662
3663                 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3664
3665                 netdev->vlan_features = lio->dev_capability;
3666                 /* Add any unchangeable hw features */
3667                 lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3668                                         NETIF_F_HW_VLAN_CTAG_RX |
3669                                         NETIF_F_HW_VLAN_CTAG_TX;
3670
3671                 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3672
3673                 netdev->hw_features = lio->dev_capability;
3674                 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3675                 netdev->hw_features = netdev->hw_features &
3676                         ~NETIF_F_HW_VLAN_CTAG_RX;
3677
3678                 /* Point to the  properties for octeon device to which this
3679                  * interface belongs.
3680                  */
3681                 lio->oct_dev = octeon_dev;
3682                 lio->octprops = props;
3683                 lio->netdev = netdev;
3684
3685                 dev_dbg(&octeon_dev->pci_dev->dev,
3686                         "if%d gmx: %d hw_addr: 0x%llx\n", i,
3687                         lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3688
3689                 /* 64-bit swap required on LE machines */
3690                 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3691                 for (j = 0; j < 6; j++)
3692                         mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3693
3694                 /* Copy MAC Address to OS network device structure */
3695
3696                 ether_addr_copy(netdev->dev_addr, mac);
3697
3698                 /* By default all interfaces on a single Octeon uses the same
3699                  * tx and rx queues
3700                  */
3701                 lio->txq = lio->linfo.txpciq[0].s.q_no;
3702                 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3703                 if (setup_io_queues(octeon_dev, i)) {
3704                         dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3705                         goto setup_nic_dev_fail;
3706                 }
3707
3708                 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3709
3710                 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3711                 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3712
3713                 if (setup_glists(octeon_dev, lio, num_iqueues)) {
3714                         dev_err(&octeon_dev->pci_dev->dev,
3715                                 "Gather list allocation failed\n");
3716                         goto setup_nic_dev_fail;
3717                 }
3718
3719                 /* Register ethtool support */
3720                 liquidio_set_ethtool_ops(netdev);
3721                 octeon_dev->priv_flags = 0x0;
3722
3723                 if (netdev->features & NETIF_F_LRO)
3724                         liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3725                                              OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3726
3727                 liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
3728
3729                 if ((debug != -1) && (debug & NETIF_MSG_HW))
3730                         liquidio_set_feature(netdev,
3731                                              OCTNET_CMD_VERBOSE_ENABLE, 0);
3732
3733                 if (setup_link_status_change_wq(netdev))
3734                         goto setup_nic_dev_fail;
3735
3736                 /* Register the network device with the OS */
3737                 if (register_netdev(netdev)) {
3738                         dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3739                         goto setup_nic_dev_fail;
3740                 }
3741
3742                 dev_dbg(&octeon_dev->pci_dev->dev,
3743                         "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3744                         i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3745                 netif_carrier_off(netdev);
3746                 lio->link_changes++;
3747
3748                 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3749
3750                 /* Sending command to firmware to enable Rx checksum offload
3751                  * by default at the time of setup of Liquidio driver for
3752                  * this device
3753                  */
3754                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3755                                             OCTNET_CMD_RXCSUM_ENABLE);
3756                 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3757                                      OCTNET_CMD_TXCSUM_ENABLE);
3758
3759                 dev_dbg(&octeon_dev->pci_dev->dev,
3760                         "NIC ifidx:%d Setup successful\n", i);
3761
3762                 octeon_free_soft_command(octeon_dev, sc);
3763         }
3764
3765         return 0;
3766
3767 setup_nic_dev_fail:
3768
3769         octeon_free_soft_command(octeon_dev, sc);
3770
3771         while (i--) {
3772                 dev_err(&octeon_dev->pci_dev->dev,
3773                         "NIC ifidx:%d Setup failed\n", i);
3774                 liquidio_destroy_nic_device(octeon_dev, i);
3775         }
3776         return -ENODEV;
3777 }
3778
3779 /**
3780  * \brief initialize the NIC
3781  * @param oct octeon device
3782  *
3783  * This initialization routine is called once the Octeon device application is
3784  * up and running
3785  */
3786 static int liquidio_init_nic_module(struct octeon_device *oct)
3787 {
3788         struct oct_intrmod_cfg *intrmod_cfg;
3789         int i, retval = 0;
3790         int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3791
3792         dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3793
3794         /* only default iq and oq were initialized
3795          * initialize the rest as well
3796          */
3797         /* run port_config command for each port */
3798         oct->ifcount = num_nic_ports;
3799
3800         memset(oct->props, 0,
3801                sizeof(struct octdev_props) * num_nic_ports);
3802
3803         for (i = 0; i < MAX_OCTEON_LINKS; i++)
3804                 oct->props[i].gmxport = -1;
3805
3806         retval = setup_nic_devices(oct);
3807         if (retval) {
3808                 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3809                 goto octnet_init_failure;
3810         }
3811
3812         liquidio_ptp_init(oct);
3813
3814         /* Initialize interrupt moderation params */
3815         intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
3816         intrmod_cfg->rx_enable = 1;
3817         intrmod_cfg->check_intrvl =   LIO_INTRMOD_CHECK_INTERVAL;
3818         intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
3819         intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
3820         intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
3821         intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
3822         intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
3823         intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
3824         intrmod_cfg->tx_enable = 1;
3825         intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
3826         intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
3827         intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
3828         intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
3829         dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3830
3831         return retval;
3832
3833 octnet_init_failure:
3834
3835         oct->ifcount = 0;
3836
3837         return retval;
3838 }
3839
3840 /**
3841  * \brief starter callback that invokes the remaining initialization work after
3842  * the NIC is up and running.
3843  * @param octptr  work struct work_struct
3844  */
3845 static void nic_starter(struct work_struct *work)
3846 {
3847         struct octeon_device *oct;
3848         struct cavium_wk *wk = (struct cavium_wk *)work;
3849
3850         oct = (struct octeon_device *)wk->ctxptr;
3851
3852         if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3853                 return;
3854
3855         /* If the status of the device is CORE_OK, the core
3856          * application has reported its application type. Call
3857          * any registered handlers now and move to the RUNNING
3858          * state.
3859          */
3860         if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3861                 schedule_delayed_work(&oct->nic_poll_work.work,
3862                                       LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3863                 return;
3864         }
3865
3866         atomic_set(&oct->status, OCT_DEV_RUNNING);
3867
3868         if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3869                 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3870
3871                 if (liquidio_init_nic_module(oct))
3872                         dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3873                 else
3874                         handshake[oct->octeon_id].started_ok = 1;
3875         } else {
3876                 dev_err(&oct->pci_dev->dev,
3877                         "Unexpected application running on NIC (%d). Check firmware.\n",
3878                         oct->app_mode);
3879         }
3880
3881         complete(&handshake[oct->octeon_id].started);
3882 }
3883
3884 /**
3885  * \brief Device initialization for each Octeon device that is probed
3886  * @param octeon_dev  octeon device
3887  */
3888 static int octeon_device_init(struct octeon_device *octeon_dev)
3889 {
3890         int j, ret;
3891         int fw_loaded = 0;
3892         char bootcmd[] = "\n";
3893         struct octeon_device_priv *oct_priv =
3894                 (struct octeon_device_priv *)octeon_dev->priv;
3895         atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
3896
3897         /* Enable access to the octeon device and make its DMA capability
3898          * known to the OS.
3899          */
3900         if (octeon_pci_os_setup(octeon_dev))
3901                 return 1;
3902
3903         /* Identify the Octeon type and map the BAR address space. */
3904         if (octeon_chip_specific_setup(octeon_dev)) {
3905                 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
3906                 return 1;
3907         }
3908
3909         atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
3910
3911         octeon_dev->app_mode = CVM_DRV_INVALID_APP;
3912
3913         if (OCTEON_CN23XX_PF(octeon_dev)) {
3914                 if (!cn23xx_fw_loaded(octeon_dev)) {
3915                         fw_loaded = 0;
3916                         /* Do a soft reset of the Octeon device. */
3917                         if (octeon_dev->fn_list.soft_reset(octeon_dev))
3918                                 return 1;
3919                         /* things might have changed */
3920                         if (!cn23xx_fw_loaded(octeon_dev))
3921                                 fw_loaded = 0;
3922                         else
3923                                 fw_loaded = 1;
3924                 } else {
3925                         fw_loaded = 1;
3926                 }
3927         } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
3928                 return 1;
3929         }
3930
3931         /* Initialize the dispatch mechanism used to push packets arriving on
3932          * Octeon Output queues.
3933          */
3934         if (octeon_init_dispatch_list(octeon_dev))
3935                 return 1;
3936
3937         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3938                                     OPCODE_NIC_CORE_DRV_ACTIVE,
3939                                     octeon_core_drv_init,
3940                                     octeon_dev);
3941
3942         INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
3943         octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
3944         schedule_delayed_work(&octeon_dev->nic_poll_work.work,
3945                               LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3946
3947         atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
3948
3949         octeon_set_io_queues_off(octeon_dev);
3950
3951         if (OCTEON_CN23XX_PF(octeon_dev)) {
3952                 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
3953                 if (ret) {
3954                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
3955                         return ret;
3956                 }
3957         }
3958
3959         /* Initialize soft command buffer pool
3960          */
3961         if (octeon_setup_sc_buffer_pool(octeon_dev)) {
3962                 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
3963                 return 1;
3964         }
3965         atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
3966
3967         /*  Setup the data structures that manage this Octeon's Input queues. */
3968         if (octeon_setup_instr_queues(octeon_dev)) {
3969                 dev_err(&octeon_dev->pci_dev->dev,
3970                         "instruction queue initialization failed\n");
3971                 /* On error, release any previously allocated queues */
3972                 for (j = 0; j < octeon_dev->num_iqs; j++)
3973                         octeon_delete_instr_queue(octeon_dev, j);
3974                 return 1;
3975         }
3976         atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
3977
3978         /* Initialize lists to manage the requests of different types that
3979          * arrive from user & kernel applications for this octeon device.
3980          */
3981         if (octeon_setup_response_list(octeon_dev)) {
3982                 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
3983                 return 1;
3984         }
3985         atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
3986
3987         if (octeon_setup_output_queues(octeon_dev)) {
3988                 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
3989                 /* Release any previously allocated queues */
3990                 for (j = 0; j < octeon_dev->num_oqs; j++)
3991                         octeon_delete_droq(octeon_dev, j);
3992                 return 1;
3993         }
3994
3995         atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
3996
3997         if (OCTEON_CN23XX_PF(octeon_dev)) {
3998                 if (octeon_allocate_ioq_vector(octeon_dev)) {
3999                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4000                         return 1;
4001                 }
4002
4003         } else {
4004                 /* The input and output queue registers were setup earlier (the
4005                  * queues were not enabled). Any additional registers
4006                  * that need to be programmed should be done now.
4007                  */
4008                 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4009                 if (ret) {
4010                         dev_err(&octeon_dev->pci_dev->dev,
4011                                 "Failed to configure device registers\n");
4012                         return ret;
4013                 }
4014         }
4015
4016         /* Initialize the tasklet that handles output queue packet processing.*/
4017         dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4018         tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4019                      (unsigned long)octeon_dev);
4020
4021         /* Setup the interrupt handler and record the INT SUM register address
4022          */
4023         if (octeon_setup_interrupt(octeon_dev))
4024                 return 1;
4025
4026         /* Enable Octeon device interrupts */
4027         octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4028
4029         /* Enable the input and output queues for this Octeon device */
4030         ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4031         if (ret) {
4032                 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4033                 return ret;
4034         }
4035
4036         atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4037
4038         if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
4039                 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4040                 if (!ddr_timeout) {
4041                         dev_info(&octeon_dev->pci_dev->dev,
4042                                  "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4043                 }
4044
4045                 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4046
4047                 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4048                 while (!ddr_timeout) {
4049                         set_current_state(TASK_INTERRUPTIBLE);
4050                         if (schedule_timeout(HZ / 10)) {
4051                                 /* user probably pressed Control-C */
4052                                 return 1;
4053                         }
4054                 }
4055                 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4056                 if (ret) {
4057                         dev_err(&octeon_dev->pci_dev->dev,
4058                                 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4059                                 ret);
4060                         return 1;
4061                 }
4062
4063                 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4064                         dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4065                         return 1;
4066                 }
4067
4068                 /* Divert uboot to take commands from host instead. */
4069                 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4070
4071                 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4072                 ret = octeon_init_consoles(octeon_dev);
4073                 if (ret) {
4074                         dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4075                         return 1;
4076                 }
4077                 ret = octeon_add_console(octeon_dev, 0);
4078                 if (ret) {
4079                         dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4080                         return 1;
4081                 }
4082
4083                 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4084
4085                 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4086                 ret = load_firmware(octeon_dev);
4087                 if (ret) {
4088                         dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4089                         return 1;
4090                 }
4091                 /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
4092                  * loaded
4093                  */
4094                 if (OCTEON_CN23XX_PF(octeon_dev))
4095                         octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
4096                                            2ULL);
4097         }
4098
4099         handshake[octeon_dev->octeon_id].init_ok = 1;
4100         complete(&handshake[octeon_dev->octeon_id].init);
4101
4102         atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4103
4104         /* Send Credit for Octeon Output queues. Credits are always sent after
4105          * the output queue is enabled.
4106          */
4107         for (j = 0; j < octeon_dev->num_oqs; j++)
4108                 writel(octeon_dev->droq[j]->max_count,
4109                        octeon_dev->droq[j]->pkts_credit_reg);
4110
4111         /* Packets can start arriving on the output queues from this point. */
4112         return 0;
4113 }
4114
4115 /**
4116  * \brief Exits the module
4117  */
4118 static void __exit liquidio_exit(void)
4119 {
4120         liquidio_deinit_pci();
4121
4122         pr_info("LiquidIO network module is now unloaded\n");
4123 }
4124
4125 module_init(liquidio_init);
4126 module_exit(liquidio_exit);