0284204b4b5f97c1f6f11f57e977e4b4f11e77b9
[linux-2.6-block.git] / drivers / net / ethernet / cavium / liquidio / lio_core.c
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28
29 /* OOM task polling interval */
30 #define LIO_OOM_POLL_INTERVAL_MS 250
31
32 #define OCTNIC_MAX_SG  MAX_SKB_FRAGS
33
34 /**
35  * \brief Delete gather lists
36  * @param lio per-network private data
37  */
38 void lio_delete_glists(struct lio *lio)
39 {
40         struct octnic_gather *g;
41         int i;
42
43         kfree(lio->glist_lock);
44         lio->glist_lock = NULL;
45
46         if (!lio->glist)
47                 return;
48
49         for (i = 0; i < lio->oct_dev->num_iqs; i++) {
50                 do {
51                         g = (struct octnic_gather *)
52                             lio_list_delete_head(&lio->glist[i]);
53                         kfree(g);
54                 } while (g);
55
56                 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
57                     lio->glists_dma_base && lio->glists_dma_base[i]) {
58                         lio_dma_free(lio->oct_dev,
59                                      lio->glist_entry_size * lio->tx_qsize,
60                                      lio->glists_virt_base[i],
61                                      lio->glists_dma_base[i]);
62                 }
63         }
64
65         kfree(lio->glists_virt_base);
66         lio->glists_virt_base = NULL;
67
68         kfree(lio->glists_dma_base);
69         lio->glists_dma_base = NULL;
70
71         kfree(lio->glist);
72         lio->glist = NULL;
73 }
74
75 /**
76  * \brief Setup gather lists
77  * @param lio per-network private data
78  */
79 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
80 {
81         struct octnic_gather *g;
82         int i, j;
83
84         lio->glist_lock =
85             kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
86         if (!lio->glist_lock)
87                 return -ENOMEM;
88
89         lio->glist =
90             kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
91         if (!lio->glist) {
92                 kfree(lio->glist_lock);
93                 lio->glist_lock = NULL;
94                 return -ENOMEM;
95         }
96
97         lio->glist_entry_size =
98                 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
99
100         /* allocate memory to store virtual and dma base address of
101          * per glist consistent memory
102          */
103         lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
104                                         GFP_KERNEL);
105         lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
106                                        GFP_KERNEL);
107
108         if (!lio->glists_virt_base || !lio->glists_dma_base) {
109                 lio_delete_glists(lio);
110                 return -ENOMEM;
111         }
112
113         for (i = 0; i < num_iqs; i++) {
114                 int numa_node = dev_to_node(&oct->pci_dev->dev);
115
116                 spin_lock_init(&lio->glist_lock[i]);
117
118                 INIT_LIST_HEAD(&lio->glist[i]);
119
120                 lio->glists_virt_base[i] =
121                         lio_dma_alloc(oct,
122                                       lio->glist_entry_size * lio->tx_qsize,
123                                       &lio->glists_dma_base[i]);
124
125                 if (!lio->glists_virt_base[i]) {
126                         lio_delete_glists(lio);
127                         return -ENOMEM;
128                 }
129
130                 for (j = 0; j < lio->tx_qsize; j++) {
131                         g = kzalloc_node(sizeof(*g), GFP_KERNEL,
132                                          numa_node);
133                         if (!g)
134                                 g = kzalloc(sizeof(*g), GFP_KERNEL);
135                         if (!g)
136                                 break;
137
138                         g->sg = lio->glists_virt_base[i] +
139                                 (j * lio->glist_entry_size);
140
141                         g->sg_dma_ptr = lio->glists_dma_base[i] +
142                                         (j * lio->glist_entry_size);
143
144                         list_add_tail(&g->list, &lio->glist[i]);
145                 }
146
147                 if (j != lio->tx_qsize) {
148                         lio_delete_glists(lio);
149                         return -ENOMEM;
150                 }
151         }
152
153         return 0;
154 }
155
156 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
157 {
158         struct lio *lio = GET_LIO(netdev);
159         struct octeon_device *oct = lio->oct_dev;
160         struct octnic_ctrl_pkt nctrl;
161         int ret = 0;
162
163         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
164
165         nctrl.ncmd.u64 = 0;
166         nctrl.ncmd.s.cmd = cmd;
167         nctrl.ncmd.s.param1 = param1;
168         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
169         nctrl.netpndev = (u64)netdev;
170         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
171
172         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
173         if (ret) {
174                 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
175                         ret);
176                 if (ret > 0)
177                         ret = -EIO;
178         }
179         return ret;
180 }
181
182 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
183                                         unsigned int bytes_compl)
184 {
185         struct netdev_queue *netdev_queue = txq;
186
187         netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
188 }
189
190 void octeon_update_tx_completion_counters(void *buf, int reqtype,
191                                           unsigned int *pkts_compl,
192                                           unsigned int *bytes_compl)
193 {
194         struct octnet_buf_free_info *finfo;
195         struct sk_buff *skb = NULL;
196         struct octeon_soft_command *sc;
197
198         switch (reqtype) {
199         case REQTYPE_NORESP_NET:
200         case REQTYPE_NORESP_NET_SG:
201                 finfo = buf;
202                 skb = finfo->skb;
203                 break;
204
205         case REQTYPE_RESP_NET_SG:
206         case REQTYPE_RESP_NET:
207                 sc = buf;
208                 skb = sc->callback_arg;
209                 break;
210
211         default:
212                 return;
213         }
214
215         (*pkts_compl)++;
216         *bytes_compl += skb->len;
217 }
218
219 int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
220 {
221         struct octnet_buf_free_info *finfo;
222         struct sk_buff *skb;
223         struct octeon_soft_command *sc;
224         struct netdev_queue *txq;
225
226         switch (reqtype) {
227         case REQTYPE_NORESP_NET:
228         case REQTYPE_NORESP_NET_SG:
229                 finfo = buf;
230                 skb = finfo->skb;
231                 break;
232
233         case REQTYPE_RESP_NET_SG:
234         case REQTYPE_RESP_NET:
235                 sc = buf;
236                 skb = sc->callback_arg;
237                 break;
238
239         default:
240                 return 0;
241         }
242
243         txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
244         netdev_tx_sent_queue(txq, skb->len);
245
246         return netif_xmit_stopped(txq);
247 }
248
249 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
250 {
251         struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
252         struct net_device *netdev = (struct net_device *)nctrl->netpndev;
253         struct lio *lio = GET_LIO(netdev);
254         struct octeon_device *oct = lio->oct_dev;
255         u8 *mac;
256
257         if (nctrl->sc_status)
258                 return;
259
260         switch (nctrl->ncmd.s.cmd) {
261         case OCTNET_CMD_CHANGE_DEVFLAGS:
262         case OCTNET_CMD_SET_MULTI_LIST:
263         case OCTNET_CMD_SET_UC_LIST:
264                 break;
265
266         case OCTNET_CMD_CHANGE_MACADDR:
267                 mac = ((u8 *)&nctrl->udd[0]) + 2;
268                 if (nctrl->ncmd.s.param1) {
269                         /* vfidx is 0 based, but vf_num (param1) is 1 based */
270                         int vfidx = nctrl->ncmd.s.param1 - 1;
271                         bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
272
273                         if (mac_is_admin_assigned)
274                                 netif_info(lio, probe, lio->netdev,
275                                            "MAC Address %pM is configured for VF %d\n",
276                                            mac, vfidx);
277                 } else {
278                         netif_info(lio, probe, lio->netdev,
279                                    " MACAddr changed to %pM\n",
280                                    mac);
281                 }
282                 break;
283
284         case OCTNET_CMD_GPIO_ACCESS:
285                 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
286
287                 break;
288
289         case OCTNET_CMD_ID_ACTIVE:
290                 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
291
292                 break;
293
294         case OCTNET_CMD_LRO_ENABLE:
295                 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
296                 break;
297
298         case OCTNET_CMD_LRO_DISABLE:
299                 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
300                          netdev->name);
301                 break;
302
303         case OCTNET_CMD_VERBOSE_ENABLE:
304                 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
305                          netdev->name);
306                 break;
307
308         case OCTNET_CMD_VERBOSE_DISABLE:
309                 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
310                          netdev->name);
311                 break;
312
313         case OCTNET_CMD_VLAN_FILTER_CTL:
314                 if (nctrl->ncmd.s.param1)
315                         dev_info(&oct->pci_dev->dev,
316                                  "%s VLAN filter enabled\n", netdev->name);
317                 else
318                         dev_info(&oct->pci_dev->dev,
319                                  "%s VLAN filter disabled\n", netdev->name);
320                 break;
321
322         case OCTNET_CMD_ADD_VLAN_FILTER:
323                 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
324                          netdev->name, nctrl->ncmd.s.param1);
325                 break;
326
327         case OCTNET_CMD_DEL_VLAN_FILTER:
328                 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
329                          netdev->name, nctrl->ncmd.s.param1);
330                 break;
331
332         case OCTNET_CMD_SET_SETTINGS:
333                 dev_info(&oct->pci_dev->dev, "%s settings changed\n",
334                          netdev->name);
335
336                 break;
337
338         /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
339          * Command passed by NIC driver
340          */
341         case OCTNET_CMD_TNL_RX_CSUM_CTL:
342                 if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
343                         netif_info(lio, probe, lio->netdev,
344                                    "RX Checksum Offload Enabled\n");
345                 } else if (nctrl->ncmd.s.param1 ==
346                            OCTNET_CMD_RXCSUM_DISABLE) {
347                         netif_info(lio, probe, lio->netdev,
348                                    "RX Checksum Offload Disabled\n");
349                 }
350                 break;
351
352                 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
353                  * Command passed by NIC driver
354                  */
355         case OCTNET_CMD_TNL_TX_CSUM_CTL:
356                 if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
357                         netif_info(lio, probe, lio->netdev,
358                                    "TX Checksum Offload Enabled\n");
359                 } else if (nctrl->ncmd.s.param1 ==
360                            OCTNET_CMD_TXCSUM_DISABLE) {
361                         netif_info(lio, probe, lio->netdev,
362                                    "TX Checksum Offload Disabled\n");
363                 }
364                 break;
365
366                 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
367                  * Command passed by NIC driver
368                  */
369         case OCTNET_CMD_VXLAN_PORT_CONFIG:
370                 if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
371                         netif_info(lio, probe, lio->netdev,
372                                    "VxLAN Destination UDP PORT:%d ADDED\n",
373                                    nctrl->ncmd.s.param1);
374                 } else if (nctrl->ncmd.s.more ==
375                            OCTNET_CMD_VXLAN_PORT_DEL) {
376                         netif_info(lio, probe, lio->netdev,
377                                    "VxLAN Destination UDP PORT:%d DELETED\n",
378                                    nctrl->ncmd.s.param1);
379                 }
380                 break;
381
382         case OCTNET_CMD_SET_FLOW_CTL:
383                 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
384                 break;
385
386         case OCTNET_CMD_QUEUE_COUNT_CTL:
387                 netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
388                            nctrl->ncmd.s.param1);
389                 break;
390
391         default:
392                 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
393                         nctrl->ncmd.s.cmd);
394         }
395 }
396
397 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
398 {
399         bool macaddr_changed = false;
400         struct net_device *netdev;
401         struct lio *lio;
402
403         rtnl_lock();
404
405         netdev = oct->props[0].netdev;
406         lio = GET_LIO(netdev);
407
408         lio->linfo.macaddr_is_admin_asgnd = true;
409
410         if (!ether_addr_equal(netdev->dev_addr, mac)) {
411                 macaddr_changed = true;
412                 ether_addr_copy(netdev->dev_addr, mac);
413                 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
414                 call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
415         }
416
417         rtnl_unlock();
418
419         if (macaddr_changed)
420                 dev_info(&oct->pci_dev->dev,
421                          "PF changed VF's MAC address to %pM\n", mac);
422
423         /* no need to notify the firmware of the macaddr change because
424          * the PF did that already
425          */
426 }
427
428 static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
429 {
430         struct cavium_wk *wk = (struct cavium_wk *)work;
431         struct lio *lio = (struct lio *)wk->ctxptr;
432         struct octeon_device *oct = lio->oct_dev;
433         struct octeon_droq *droq;
434         int q, q_no = 0;
435
436         if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
437                 for (q = 0; q < lio->linfo.num_rxpciq; q++) {
438                         q_no = lio->linfo.rxpciq[q].s.q_no;
439                         droq = oct->droq[q_no];
440                         if (!droq)
441                                 continue;
442                         octeon_droq_check_oom(droq);
443                 }
444         }
445         queue_delayed_work(lio->rxq_status_wq.wq,
446                            &lio->rxq_status_wq.wk.work,
447                            msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
448 }
449
450 int setup_rx_oom_poll_fn(struct net_device *netdev)
451 {
452         struct lio *lio = GET_LIO(netdev);
453         struct octeon_device *oct = lio->oct_dev;
454
455         lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
456                                                 WQ_MEM_RECLAIM, 0);
457         if (!lio->rxq_status_wq.wq) {
458                 dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
459                 return -ENOMEM;
460         }
461         INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
462                           octnet_poll_check_rxq_oom_status);
463         lio->rxq_status_wq.wk.ctxptr = lio;
464         queue_delayed_work(lio->rxq_status_wq.wq,
465                            &lio->rxq_status_wq.wk.work,
466                            msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
467         return 0;
468 }
469
470 void cleanup_rx_oom_poll_fn(struct net_device *netdev)
471 {
472         struct lio *lio = GET_LIO(netdev);
473
474         if (lio->rxq_status_wq.wq) {
475                 cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
476                 flush_workqueue(lio->rxq_status_wq.wq);
477                 destroy_workqueue(lio->rxq_status_wq.wq);
478         }
479 }
480
481 /* Runs in interrupt context. */
482 static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
483 {
484         struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
485         struct net_device *netdev;
486         struct lio *lio;
487
488         netdev = oct->props[iq->ifidx].netdev;
489
490         /* This is needed because the first IQ does not have
491          * a netdev associated with it.
492          */
493         if (!netdev)
494                 return;
495
496         lio = GET_LIO(netdev);
497         if (__netif_subqueue_stopped(netdev, iq->q_index) &&
498             lio->linfo.link.s.link_up &&
499             (!octnet_iq_is_full(oct, iq_num))) {
500                 netif_wake_subqueue(netdev, iq->q_index);
501                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
502                                           tx_restart, 1);
503         }
504 }
505
506 /**
507  * \brief Setup output queue
508  * @param oct octeon device
509  * @param q_no which queue
510  * @param num_descs how many descriptors
511  * @param desc_size size of each descriptor
512  * @param app_ctx application context
513  */
514 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
515                              int desc_size, void *app_ctx)
516 {
517         int ret_val;
518
519         dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
520         /* droq creation and local register settings. */
521         ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
522         if (ret_val < 0)
523                 return ret_val;
524
525         if (ret_val == 1) {
526                 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
527                 return 0;
528         }
529
530         /* Enable the droq queues */
531         octeon_set_droq_pkt_op(oct, q_no, 1);
532
533         /* Send Credit for Octeon Output queues. Credits are always
534          * sent after the output queue is enabled.
535          */
536         writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
537
538         return ret_val;
539 }
540
541 /** Routine to push packets arriving on Octeon interface upto network layer.
542  * @param oct_id   - octeon device id.
543  * @param skbuff   - skbuff struct to be passed to network layer.
544  * @param len      - size of total data received.
545  * @param rh       - Control header associated with the packet
546  * @param param    - additional control data with the packet
547  * @param arg      - farg registered in droq_ops
548  */
549 static void
550 liquidio_push_packet(u32 octeon_id __attribute__((unused)),
551                      void *skbuff,
552                      u32 len,
553                      union octeon_rh *rh,
554                      void *param,
555                      void *arg)
556 {
557         struct net_device *netdev = (struct net_device *)arg;
558         struct octeon_droq *droq =
559             container_of(param, struct octeon_droq, napi);
560         struct sk_buff *skb = (struct sk_buff *)skbuff;
561         struct skb_shared_hwtstamps *shhwtstamps;
562         struct napi_struct *napi = param;
563         u16 vtag = 0;
564         u32 r_dh_off;
565         u64 ns;
566
567         if (netdev) {
568                 struct lio *lio = GET_LIO(netdev);
569                 struct octeon_device *oct = lio->oct_dev;
570
571                 /* Do not proceed if the interface is not in RUNNING state. */
572                 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
573                         recv_buffer_free(skb);
574                         droq->stats.rx_dropped++;
575                         return;
576                 }
577
578                 skb->dev = netdev;
579
580                 skb_record_rx_queue(skb, droq->q_no);
581                 if (likely(len > MIN_SKB_SIZE)) {
582                         struct octeon_skb_page_info *pg_info;
583                         unsigned char *va;
584
585                         pg_info = ((struct octeon_skb_page_info *)(skb->cb));
586                         if (pg_info->page) {
587                                 /* For Paged allocation use the frags */
588                                 va = page_address(pg_info->page) +
589                                         pg_info->page_offset;
590                                 memcpy(skb->data, va, MIN_SKB_SIZE);
591                                 skb_put(skb, MIN_SKB_SIZE);
592                                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
593                                                 pg_info->page,
594                                                 pg_info->page_offset +
595                                                 MIN_SKB_SIZE,
596                                                 len - MIN_SKB_SIZE,
597                                                 LIO_RXBUFFER_SZ);
598                         }
599                 } else {
600                         struct octeon_skb_page_info *pg_info =
601                                 ((struct octeon_skb_page_info *)(skb->cb));
602                         skb_copy_to_linear_data(skb, page_address(pg_info->page)
603                                                 + pg_info->page_offset, len);
604                         skb_put(skb, len);
605                         put_page(pg_info->page);
606                 }
607
608                 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
609
610                 if (oct->ptp_enable) {
611                         if (rh->r_dh.has_hwtstamp) {
612                                 /* timestamp is included from the hardware at
613                                  * the beginning of the packet.
614                                  */
615                                 if (ifstate_check
616                                         (lio,
617                                          LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
618                                         /* Nanoseconds are in the first 64-bits
619                                          * of the packet.
620                                          */
621                                         memcpy(&ns, (skb->data + r_dh_off),
622                                                sizeof(ns));
623                                         r_dh_off -= BYTES_PER_DHLEN_UNIT;
624                                         shhwtstamps = skb_hwtstamps(skb);
625                                         shhwtstamps->hwtstamp =
626                                                 ns_to_ktime(ns +
627                                                             lio->ptp_adjust);
628                                 }
629                         }
630                 }
631
632                 if (rh->r_dh.has_hash) {
633                         __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
634                         u32 hash = be32_to_cpu(*hash_be);
635
636                         skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
637                         r_dh_off -= BYTES_PER_DHLEN_UNIT;
638                 }
639
640                 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
641                 skb->protocol = eth_type_trans(skb, skb->dev);
642
643                 if ((netdev->features & NETIF_F_RXCSUM) &&
644                     (((rh->r_dh.encap_on) &&
645                       (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
646                      (!(rh->r_dh.encap_on) &&
647                       (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
648                         /* checksum has already been verified */
649                         skb->ip_summed = CHECKSUM_UNNECESSARY;
650                 else
651                         skb->ip_summed = CHECKSUM_NONE;
652
653                 /* Setting Encapsulation field on basis of status received
654                  * from the firmware
655                  */
656                 if (rh->r_dh.encap_on) {
657                         skb->encapsulation = 1;
658                         skb->csum_level = 1;
659                         droq->stats.rx_vxlan++;
660                 }
661
662                 /* inbound VLAN tag */
663                 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
664                     rh->r_dh.vlan) {
665                         u16 priority = rh->r_dh.priority;
666                         u16 vid = rh->r_dh.vlan;
667
668                         vtag = (priority << VLAN_PRIO_SHIFT) | vid;
669                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
670                 }
671
672                 napi_gro_receive(napi, skb);
673
674                 droq->stats.rx_bytes_received += len -
675                         rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
676                 droq->stats.rx_pkts_received++;
677         } else {
678                 recv_buffer_free(skb);
679         }
680 }
681
682 /**
683  * \brief wrapper for calling napi_schedule
684  * @param param parameters to pass to napi_schedule
685  *
686  * Used when scheduling on different CPUs
687  */
688 static void napi_schedule_wrapper(void *param)
689 {
690         struct napi_struct *napi = param;
691
692         napi_schedule(napi);
693 }
694
695 /**
696  * \brief callback when receive interrupt occurs and we are in NAPI mode
697  * @param arg pointer to octeon output queue
698  */
699 static void liquidio_napi_drv_callback(void *arg)
700 {
701         struct octeon_device *oct;
702         struct octeon_droq *droq = arg;
703         int this_cpu = smp_processor_id();
704
705         oct = droq->oct_dev;
706
707         if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
708             droq->cpu_id == this_cpu) {
709                 napi_schedule_irqoff(&droq->napi);
710         } else {
711                 call_single_data_t *csd = &droq->csd;
712
713                 csd->func = napi_schedule_wrapper;
714                 csd->info = &droq->napi;
715                 csd->flags = 0;
716
717                 smp_call_function_single_async(droq->cpu_id, csd);
718         }
719 }
720
721 /**
722  * \brief Entry point for NAPI polling
723  * @param napi NAPI structure
724  * @param budget maximum number of items to process
725  */
726 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
727 {
728         struct octeon_instr_queue *iq;
729         struct octeon_device *oct;
730         struct octeon_droq *droq;
731         int tx_done = 0, iq_no;
732         int work_done;
733
734         droq = container_of(napi, struct octeon_droq, napi);
735         oct = droq->oct_dev;
736         iq_no = droq->q_no;
737
738         /* Handle Droq descriptors */
739         work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
740
741         /* Flush the instruction queue */
742         iq = oct->instr_queue[iq_no];
743         if (iq) {
744                 /* TODO: move this check to inside octeon_flush_iq,
745                  * once check_db_timeout is removed
746                  */
747                 if (atomic_read(&iq->instr_pending))
748                         /* Process iq buffers with in the budget limits */
749                         tx_done = octeon_flush_iq(oct, iq, budget);
750                 else
751                         tx_done = 1;
752                 /* Update iq read-index rather than waiting for next interrupt.
753                  * Return back if tx_done is false.
754                  */
755                 /* sub-queue status update */
756                 lio_update_txq_status(oct, iq_no);
757         } else {
758                 dev_err(&oct->pci_dev->dev, "%s:  iq (%d) num invalid\n",
759                         __func__, iq_no);
760         }
761
762 #define MAX_REG_CNT  2000000U
763         /* force enable interrupt if reg cnts are high to avoid wraparound */
764         if ((work_done < budget && tx_done) ||
765             (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
766             (droq->pkt_count >= MAX_REG_CNT)) {
767                 tx_done = 1;
768                 napi_complete_done(napi, work_done);
769
770                 octeon_enable_irq(droq->oct_dev, droq->q_no);
771                 return 0;
772         }
773
774         return (!tx_done) ? (budget) : (work_done);
775 }
776
777 /**
778  * \brief Setup input and output queues
779  * @param octeon_dev octeon device
780  * @param ifidx Interface index
781  *
782  * Note: Queues are with respect to the octeon device. Thus
783  * an input queue is for egress packets, and output queues
784  * are for ingress packets.
785  */
786 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
787                              u32 num_iqs, u32 num_oqs)
788 {
789         struct octeon_droq_ops droq_ops;
790         struct net_device *netdev;
791         struct octeon_droq *droq;
792         struct napi_struct *napi;
793         int cpu_id_modulus;
794         int num_tx_descs;
795         struct lio *lio;
796         int retval = 0;
797         int q, q_no;
798         int cpu_id;
799
800         netdev = octeon_dev->props[ifidx].netdev;
801
802         lio = GET_LIO(netdev);
803
804         memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
805
806         droq_ops.fptr = liquidio_push_packet;
807         droq_ops.farg = netdev;
808
809         droq_ops.poll_mode = 1;
810         droq_ops.napi_fn = liquidio_napi_drv_callback;
811         cpu_id = 0;
812         cpu_id_modulus = num_present_cpus();
813
814         /* set up DROQs. */
815         for (q = 0; q < num_oqs; q++) {
816                 q_no = lio->linfo.rxpciq[q].s.q_no;
817                 dev_dbg(&octeon_dev->pci_dev->dev,
818                         "%s index:%d linfo.rxpciq.s.q_no:%d\n",
819                         __func__, q, q_no);
820                 retval = octeon_setup_droq(
821                     octeon_dev, q_no,
822                     CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
823                                                 lio->ifidx),
824                     CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
825                                                    lio->ifidx),
826                     NULL);
827                 if (retval) {
828                         dev_err(&octeon_dev->pci_dev->dev,
829                                 "%s : Runtime DROQ(RxQ) creation failed.\n",
830                                 __func__);
831                         return 1;
832                 }
833
834                 droq = octeon_dev->droq[q_no];
835                 napi = &droq->napi;
836                 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
837                         (u64)netdev, (u64)octeon_dev);
838                 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
839
840                 /* designate a CPU for this droq */
841                 droq->cpu_id = cpu_id;
842                 cpu_id++;
843                 if (cpu_id >= cpu_id_modulus)
844                         cpu_id = 0;
845
846                 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
847         }
848
849         if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
850                 /* 23XX PF/VF can send/recv control messages (via the first
851                  * PF/VF-owned droq) from the firmware even if the ethX
852                  * interface is down, so that's why poll_mode must be off
853                  * for the first droq.
854                  */
855                 octeon_dev->droq[0]->ops.poll_mode = 0;
856         }
857
858         /* set up IQs. */
859         for (q = 0; q < num_iqs; q++) {
860                 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
861                     octeon_get_conf(octeon_dev), lio->ifidx);
862                 retval = octeon_setup_iq(octeon_dev, ifidx, q,
863                                          lio->linfo.txpciq[q], num_tx_descs,
864                                          netdev_get_tx_queue(netdev, q));
865                 if (retval) {
866                         dev_err(&octeon_dev->pci_dev->dev,
867                                 " %s : Runtime IQ(TxQ) creation failed.\n",
868                                 __func__);
869                         return 1;
870                 }
871
872                 /* XPS */
873                 if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
874                     octeon_dev->ioq_vector) {
875                         struct octeon_ioq_vector    *ioq_vector;
876
877                         ioq_vector = &octeon_dev->ioq_vector[q];
878                         netif_set_xps_queue(netdev,
879                                             &ioq_vector->affinity_mask,
880                                             ioq_vector->iq_index);
881                 }
882         }
883
884         return 0;
885 }
886
887 static
888 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
889 {
890         struct octeon_device *oct = droq->oct_dev;
891         struct octeon_device_priv *oct_priv =
892             (struct octeon_device_priv *)oct->priv;
893
894         if (droq->ops.poll_mode) {
895                 droq->ops.napi_fn(droq);
896         } else {
897                 if (ret & MSIX_PO_INT) {
898                         if (OCTEON_CN23XX_VF(oct))
899                                 dev_err(&oct->pci_dev->dev,
900                                         "should not come here should not get rx when poll mode = 0 for vf\n");
901                         tasklet_schedule(&oct_priv->droq_tasklet);
902                         return 1;
903                 }
904                 /* this will be flushed periodically by check iq db */
905                 if (ret & MSIX_PI_INT)
906                         return 0;
907         }
908
909         return 0;
910 }
911
912 irqreturn_t
913 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
914 {
915         struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
916         struct octeon_device *oct = ioq_vector->oct_dev;
917         struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
918         u64 ret;
919
920         ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
921
922         if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
923                 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
924
925         return IRQ_HANDLED;
926 }
927
928 /**
929  * \brief Droq packet processor sceduler
930  * @param oct octeon device
931  */
932 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
933 {
934         struct octeon_device_priv *oct_priv =
935                 (struct octeon_device_priv *)oct->priv;
936         struct octeon_droq *droq;
937         u64 oq_no;
938
939         if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
940                 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
941                      oq_no++) {
942                         if (!(oct->droq_intr & BIT_ULL(oq_no)))
943                                 continue;
944
945                         droq = oct->droq[oq_no];
946
947                         if (droq->ops.poll_mode) {
948                                 droq->ops.napi_fn(droq);
949                                 oct_priv->napi_mask |= (1 << oq_no);
950                         } else {
951                                 tasklet_schedule(&oct_priv->droq_tasklet);
952                         }
953                 }
954         }
955 }
956
957 /**
958  * \brief Interrupt handler for octeon
959  * @param irq unused
960  * @param dev octeon device
961  */
962 static
963 irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
964                                          void *dev)
965 {
966         struct octeon_device *oct = (struct octeon_device *)dev;
967         irqreturn_t ret;
968
969         /* Disable our interrupts for the duration of ISR */
970         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
971
972         ret = oct->fn_list.process_interrupt_regs(oct);
973
974         if (ret == IRQ_HANDLED)
975                 liquidio_schedule_droq_pkt_handlers(oct);
976
977         /* Re-enable our interrupts  */
978         if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
979                 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
980
981         return ret;
982 }
983
984 /**
985  * \brief Setup interrupt for octeon device
986  * @param oct octeon device
987  *
988  *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
989  */
990 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
991 {
992         struct msix_entry *msix_entries;
993         char *queue_irq_names = NULL;
994         int i, num_interrupts = 0;
995         int num_alloc_ioq_vectors;
996         char *aux_irq_name = NULL;
997         int num_ioq_vectors;
998         int irqret, err;
999
1000         if (oct->msix_on) {
1001                 oct->num_msix_irqs = num_ioqs;
1002                 if (OCTEON_CN23XX_PF(oct)) {
1003                         num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
1004
1005                         /* one non ioq interrupt for handling
1006                          * sli_mac_pf_int_sum
1007                          */
1008                         oct->num_msix_irqs += 1;
1009                 } else if (OCTEON_CN23XX_VF(oct)) {
1010                         num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
1011                 }
1012
1013                 /* allocate storage for the names assigned to each irq */
1014                 oct->irq_name_storage =
1015                         kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
1016                 if (!oct->irq_name_storage) {
1017                         dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1018                         return -ENOMEM;
1019                 }
1020
1021                 queue_irq_names = oct->irq_name_storage;
1022
1023                 if (OCTEON_CN23XX_PF(oct))
1024                         aux_irq_name = &queue_irq_names
1025                                 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1026
1027                 oct->msix_entries = kcalloc(oct->num_msix_irqs,
1028                                             sizeof(struct msix_entry),
1029                                             GFP_KERNEL);
1030                 if (!oct->msix_entries) {
1031                         dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1032                         kfree(oct->irq_name_storage);
1033                         oct->irq_name_storage = NULL;
1034                         return -ENOMEM;
1035                 }
1036
1037                 msix_entries = (struct msix_entry *)oct->msix_entries;
1038
1039                 /*Assumption is that pf msix vectors start from pf srn to pf to
1040                  * trs and not from 0. if not change this code
1041                  */
1042                 if (OCTEON_CN23XX_PF(oct)) {
1043                         for (i = 0; i < oct->num_msix_irqs - 1; i++)
1044                                 msix_entries[i].entry =
1045                                         oct->sriov_info.pf_srn + i;
1046
1047                         msix_entries[oct->num_msix_irqs - 1].entry =
1048                                 oct->sriov_info.trs;
1049                 } else if (OCTEON_CN23XX_VF(oct)) {
1050                         for (i = 0; i < oct->num_msix_irqs; i++)
1051                                 msix_entries[i].entry = i;
1052                 }
1053                 num_alloc_ioq_vectors = pci_enable_msix_range(
1054                                                 oct->pci_dev, msix_entries,
1055                                                 oct->num_msix_irqs,
1056                                                 oct->num_msix_irqs);
1057                 if (num_alloc_ioq_vectors < 0) {
1058                         dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1059                         kfree(oct->msix_entries);
1060                         oct->msix_entries = NULL;
1061                         kfree(oct->irq_name_storage);
1062                         oct->irq_name_storage = NULL;
1063                         return num_alloc_ioq_vectors;
1064                 }
1065
1066                 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1067
1068                 num_ioq_vectors = oct->num_msix_irqs;
1069                 /** For PF, there is one non-ioq interrupt handler */
1070                 if (OCTEON_CN23XX_PF(oct)) {
1071                         num_ioq_vectors -= 1;
1072
1073                         snprintf(aux_irq_name, INTRNAMSIZ,
1074                                  "LiquidIO%u-pf%u-aux", oct->octeon_id,
1075                                  oct->pf_num);
1076                         irqret = request_irq(
1077                                         msix_entries[num_ioq_vectors].vector,
1078                                         liquidio_legacy_intr_handler, 0,
1079                                         aux_irq_name, oct);
1080                         if (irqret) {
1081                                 dev_err(&oct->pci_dev->dev,
1082                                         "Request_irq failed for MSIX interrupt Error: %d\n",
1083                                         irqret);
1084                                 pci_disable_msix(oct->pci_dev);
1085                                 kfree(oct->msix_entries);
1086                                 kfree(oct->irq_name_storage);
1087                                 oct->irq_name_storage = NULL;
1088                                 oct->msix_entries = NULL;
1089                                 return irqret;
1090                         }
1091                 }
1092                 for (i = 0 ; i < num_ioq_vectors ; i++) {
1093                         if (OCTEON_CN23XX_PF(oct))
1094                                 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1095                                          INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1096                                          oct->octeon_id, oct->pf_num, i);
1097
1098                         if (OCTEON_CN23XX_VF(oct))
1099                                 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1100                                          INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1101                                          oct->octeon_id, oct->vf_num, i);
1102
1103                         irqret = request_irq(msix_entries[i].vector,
1104                                              liquidio_msix_intr_handler, 0,
1105                                              &queue_irq_names[IRQ_NAME_OFF(i)],
1106                                              &oct->ioq_vector[i]);
1107
1108                         if (irqret) {
1109                                 dev_err(&oct->pci_dev->dev,
1110                                         "Request_irq failed for MSIX interrupt Error: %d\n",
1111                                         irqret);
1112                                 /** Freeing the non-ioq irq vector here . */
1113                                 free_irq(msix_entries[num_ioq_vectors].vector,
1114                                          oct);
1115
1116                                 while (i) {
1117                                         i--;
1118                                         /** clearing affinity mask. */
1119                                         irq_set_affinity_hint(
1120                                                       msix_entries[i].vector,
1121                                                       NULL);
1122                                         free_irq(msix_entries[i].vector,
1123                                                  &oct->ioq_vector[i]);
1124                                 }
1125                                 pci_disable_msix(oct->pci_dev);
1126                                 kfree(oct->msix_entries);
1127                                 kfree(oct->irq_name_storage);
1128                                 oct->irq_name_storage = NULL;
1129                                 oct->msix_entries = NULL;
1130                                 return irqret;
1131                         }
1132                         oct->ioq_vector[i].vector = msix_entries[i].vector;
1133                         /* assign the cpu mask for this msix interrupt vector */
1134                         irq_set_affinity_hint(msix_entries[i].vector,
1135                                               &oct->ioq_vector[i].affinity_mask
1136                                               );
1137                 }
1138                 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1139                         oct->octeon_id);
1140         } else {
1141                 err = pci_enable_msi(oct->pci_dev);
1142                 if (err)
1143                         dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1144                                  err);
1145                 else
1146                         oct->flags |= LIO_FLAG_MSI_ENABLED;
1147
1148                 /* allocate storage for the names assigned to the irq */
1149                 oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
1150                 if (!oct->irq_name_storage)
1151                         return -ENOMEM;
1152
1153                 queue_irq_names = oct->irq_name_storage;
1154
1155                 if (OCTEON_CN23XX_PF(oct))
1156                         snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1157                                  "LiquidIO%u-pf%u-rxtx-%u",
1158                                  oct->octeon_id, oct->pf_num, 0);
1159
1160                 if (OCTEON_CN23XX_VF(oct))
1161                         snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1162                                  "LiquidIO%u-vf%u-rxtx-%u",
1163                                  oct->octeon_id, oct->vf_num, 0);
1164
1165                 irqret = request_irq(oct->pci_dev->irq,
1166                                      liquidio_legacy_intr_handler,
1167                                      IRQF_SHARED,
1168                                      &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1169                 if (irqret) {
1170                         if (oct->flags & LIO_FLAG_MSI_ENABLED)
1171                                 pci_disable_msi(oct->pci_dev);
1172                         dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1173                                 irqret);
1174                         kfree(oct->irq_name_storage);
1175                         oct->irq_name_storage = NULL;
1176                         return irqret;
1177                 }
1178         }
1179         return 0;
1180 }
1181
1182 /**
1183  * \brief Net device change_mtu
1184  * @param netdev network device
1185  */
1186 int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
1187 {
1188         struct lio *lio = GET_LIO(netdev);
1189         struct octeon_device *oct = lio->oct_dev;
1190         struct octeon_soft_command *sc;
1191         union octnet_cmd *ncmd;
1192         int ret = 0;
1193
1194         sc = (struct octeon_soft_command *)
1195                 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
1196
1197         ncmd = (union octnet_cmd *)sc->virtdptr;
1198
1199         init_completion(&sc->complete);
1200         sc->sc_status = OCTEON_REQUEST_PENDING;
1201
1202         ncmd->u64 = 0;
1203         ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
1204         ncmd->s.param1 = new_mtu;
1205
1206         octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1207
1208         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1209
1210         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1211                                     OPCODE_NIC_CMD, 0, 0, 0);
1212
1213         ret = octeon_send_soft_command(oct, sc);
1214         if (ret == IQ_SEND_FAILED) {
1215                 netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
1216                 octeon_free_soft_command(oct, sc);
1217                 return -EINVAL;
1218         }
1219         /* Sleep on a wait queue till the cond flag indicates that the
1220          * response arrived or timed-out.
1221          */
1222         ret = wait_for_sc_completion_timeout(oct, sc, 0);
1223         if (ret)
1224                 return ret;
1225
1226         if (sc->sc_status) {
1227                 WRITE_ONCE(sc->caller_is_done, true);
1228                 return -EINVAL;
1229         }
1230
1231         netdev->mtu = new_mtu;
1232         lio->mtu = new_mtu;
1233
1234         WRITE_ONCE(sc->caller_is_done, true);
1235         return 0;
1236 }
1237
1238 int lio_wait_for_clean_oq(struct octeon_device *oct)
1239 {
1240         int retry = 100, pending_pkts = 0;
1241         int idx;
1242
1243         do {
1244                 pending_pkts = 0;
1245
1246                 for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
1247                         if (!(oct->io_qmask.oq & BIT_ULL(idx)))
1248                                 continue;
1249                         pending_pkts +=
1250                                 atomic_read(&oct->droq[idx]->pkts_pending);
1251                 }
1252
1253                 if (pending_pkts > 0)
1254                         schedule_timeout_uninterruptible(1);
1255
1256         } while (retry-- && pending_pkts);
1257
1258         return pending_pkts;
1259 }
1260
1261 static void
1262 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1263                           u32 status, void *ptr)
1264 {
1265         struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1266         struct oct_nic_stats_resp *resp =
1267             (struct oct_nic_stats_resp *)sc->virtrptr;
1268         struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1269         struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1270         struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1271         struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1272
1273         if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1274                 octeon_swap_8B_data((u64 *)&resp->stats,
1275                                     (sizeof(struct oct_link_stats)) >> 3);
1276
1277                 /* RX link-level stats */
1278                 rstats->total_rcvd = rsp_rstats->total_rcvd;
1279                 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1280                 rstats->total_bcst = rsp_rstats->total_bcst;
1281                 rstats->total_mcst = rsp_rstats->total_mcst;
1282                 rstats->runts      = rsp_rstats->runts;
1283                 rstats->ctl_rcvd   = rsp_rstats->ctl_rcvd;
1284                 /* Accounts for over/under-run of buffers */
1285                 rstats->fifo_err  = rsp_rstats->fifo_err;
1286                 rstats->dmac_drop = rsp_rstats->dmac_drop;
1287                 rstats->fcs_err   = rsp_rstats->fcs_err;
1288                 rstats->jabber_err = rsp_rstats->jabber_err;
1289                 rstats->l2_err    = rsp_rstats->l2_err;
1290                 rstats->frame_err = rsp_rstats->frame_err;
1291                 rstats->red_drops = rsp_rstats->red_drops;
1292
1293                 /* RX firmware stats */
1294                 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1295                 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1296                 rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
1297                 rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
1298                 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1299                 rstats->fw_err_link = rsp_rstats->fw_err_link;
1300                 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1301                 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1302                 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1303
1304                 /* Number of packets that are LROed      */
1305                 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1306                 /* Number of octets that are LROed       */
1307                 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1308                 /* Number of LRO packets formed          */
1309                 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1310                 /* Number of times lRO of packet aborted */
1311                 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1312                 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1313                 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1314                 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1315                 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1316                 /* intrmod: packet forward rate */
1317                 rstats->fwd_rate = rsp_rstats->fwd_rate;
1318
1319                 /* TX link-level stats */
1320                 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1321                 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1322                 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1323                 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1324                 tstats->ctl_sent = rsp_tstats->ctl_sent;
1325                 /* Packets sent after one collision*/
1326                 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1327                 /* Packets sent after multiple collision*/
1328                 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1329                 /* Packets not sent due to max collisions */
1330                 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1331                 /* Packets not sent due to max deferrals */
1332                 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1333                 /* Accounts for over/under-run of buffers */
1334                 tstats->fifo_err = rsp_tstats->fifo_err;
1335                 tstats->runts = rsp_tstats->runts;
1336                 /* Total number of collisions detected */
1337                 tstats->total_collisions = rsp_tstats->total_collisions;
1338
1339                 /* firmware stats */
1340                 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1341                 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1342                 tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
1343                 tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
1344                 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1345                 tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1346                 tstats->fw_err_link = rsp_tstats->fw_err_link;
1347                 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1348                 tstats->fw_tso = rsp_tstats->fw_tso;
1349                 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1350                 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1351                 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1352
1353                 resp->status = 1;
1354         } else {
1355                 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1356                 resp->status = -1;
1357         }
1358 }
1359
1360 int lio_fetch_vf_stats(struct lio *lio)
1361 {
1362         struct octeon_device *oct_dev = lio->oct_dev;
1363         struct octeon_soft_command *sc;
1364         struct oct_nic_vf_stats_resp *resp;
1365
1366         int retval;
1367
1368         /* Alloc soft command */
1369         sc = (struct octeon_soft_command *)
1370                 octeon_alloc_soft_command(oct_dev,
1371                                           0,
1372                                           sizeof(struct oct_nic_vf_stats_resp),
1373                                           0);
1374
1375         if (!sc) {
1376                 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1377                 retval = -ENOMEM;
1378                 goto lio_fetch_vf_stats_exit;
1379         }
1380
1381         resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr;
1382         memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp));
1383
1384         init_completion(&sc->complete);
1385         sc->sc_status = OCTEON_REQUEST_PENDING;
1386
1387         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1388
1389         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1390                                     OPCODE_NIC_VF_PORT_STATS, 0, 0, 0);
1391
1392         retval = octeon_send_soft_command(oct_dev, sc);
1393         if (retval == IQ_SEND_FAILED) {
1394                 octeon_free_soft_command(oct_dev, sc);
1395                 goto lio_fetch_vf_stats_exit;
1396         }
1397
1398         retval =
1399                 wait_for_sc_completion_timeout(oct_dev, sc,
1400                                                (2 * LIO_SC_MAX_TMO_MS));
1401         if (retval)  {
1402                 dev_err(&oct_dev->pci_dev->dev,
1403                         "sc OPCODE_NIC_VF_PORT_STATS command failed\n");
1404                 goto lio_fetch_vf_stats_exit;
1405         }
1406
1407         if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1408                 octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt,
1409                                     (sizeof(u64)) >> 3);
1410
1411                 if (resp->spoofmac_cnt != 0) {
1412                         dev_warn(&oct_dev->pci_dev->dev,
1413                                  "%llu Spoofed packets detected\n",
1414                                  resp->spoofmac_cnt);
1415                 }
1416         }
1417         WRITE_ONCE(sc->caller_is_done, 1);
1418
1419 lio_fetch_vf_stats_exit:
1420         return retval;
1421 }
1422
1423 void lio_fetch_stats(struct work_struct *work)
1424 {
1425         struct cavium_wk *wk = (struct cavium_wk *)work;
1426         struct lio *lio = wk->ctxptr;
1427         struct octeon_device *oct_dev = lio->oct_dev;
1428         struct octeon_soft_command *sc;
1429         struct oct_nic_stats_resp *resp;
1430         unsigned long time_in_jiffies;
1431         int retval;
1432
1433         if (OCTEON_CN23XX_PF(oct_dev)) {
1434                 /* report spoofchk every 2 seconds */
1435                 if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) &&
1436                     (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) &&
1437                     oct_dev->sriov_info.num_vfs_alloced) {
1438                         lio_fetch_vf_stats(lio);
1439                 }
1440
1441                 oct_dev->vfstats_poll++;
1442         }
1443
1444         /* Alloc soft command */
1445         sc = (struct octeon_soft_command *)
1446                 octeon_alloc_soft_command(oct_dev,
1447                                           0,
1448                                           sizeof(struct oct_nic_stats_resp),
1449                                           0);
1450
1451         if (!sc) {
1452                 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1453                 goto lio_fetch_stats_exit;
1454         }
1455
1456         resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1457         memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1458
1459         init_completion(&sc->complete);
1460         sc->sc_status = OCTEON_REQUEST_PENDING;
1461
1462         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1463
1464         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1465                                     OPCODE_NIC_PORT_STATS, 0, 0, 0);
1466
1467         retval = octeon_send_soft_command(oct_dev, sc);
1468         if (retval == IQ_SEND_FAILED) {
1469                 octeon_free_soft_command(oct_dev, sc);
1470                 goto lio_fetch_stats_exit;
1471         }
1472
1473         retval = wait_for_sc_completion_timeout(oct_dev, sc,
1474                                                 (2 * LIO_SC_MAX_TMO_MS));
1475         if (retval)  {
1476                 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1477                 goto lio_fetch_stats_exit;
1478         }
1479
1480         octnet_nic_stats_callback(oct_dev, sc->sc_status, sc);
1481         WRITE_ONCE(sc->caller_is_done, true);
1482
1483 lio_fetch_stats_exit:
1484         time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS);
1485         if (ifstate_check(lio, LIO_IFSTATE_RUNNING))
1486                 schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies);
1487
1488         return;
1489 }
1490
1491 int liquidio_set_speed(struct lio *lio, int speed)
1492 {
1493         struct octeon_device *oct = lio->oct_dev;
1494         struct oct_nic_seapi_resp *resp;
1495         struct octeon_soft_command *sc;
1496         union octnet_cmd *ncmd;
1497         int retval;
1498         u32 var;
1499
1500         if (oct->speed_setting == speed)
1501                 return 0;
1502
1503         if (!OCTEON_CN23XX_PF(oct)) {
1504                 dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
1505                         __func__);
1506                 return -EOPNOTSUPP;
1507         }
1508
1509         sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1510                                        sizeof(struct oct_nic_seapi_resp),
1511                                        0);
1512         if (!sc)
1513                 return -ENOMEM;
1514
1515         ncmd = sc->virtdptr;
1516         resp = sc->virtrptr;
1517         memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1518
1519         init_completion(&sc->complete);
1520         sc->sc_status = OCTEON_REQUEST_PENDING;
1521
1522         ncmd->u64 = 0;
1523         ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
1524         ncmd->s.param1 = speed;
1525
1526         octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1527
1528         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1529
1530         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1531                                     OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1532
1533         retval = octeon_send_soft_command(oct, sc);
1534         if (retval == IQ_SEND_FAILED) {
1535                 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1536                 octeon_free_soft_command(oct, sc);
1537                 retval = -EBUSY;
1538         } else {
1539                 /* Wait for response or timeout */
1540                 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1541                 if (retval)
1542                         return retval;
1543
1544                 retval = resp->status;
1545
1546                 if (retval) {
1547                         dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
1548                                 __func__, retval);
1549                         WRITE_ONCE(sc->caller_is_done, true);
1550
1551                         return -EIO;
1552                 }
1553
1554                 var = be32_to_cpu((__force __be32)resp->speed);
1555                 if (var != speed) {
1556                         dev_err(&oct->pci_dev->dev,
1557                                 "%s: setting failed speed= %x, expect %x\n",
1558                                 __func__, var, speed);
1559                 }
1560
1561                 oct->speed_setting = var;
1562                 WRITE_ONCE(sc->caller_is_done, true);
1563         }
1564
1565         return retval;
1566 }
1567
1568 int liquidio_get_speed(struct lio *lio)
1569 {
1570         struct octeon_device *oct = lio->oct_dev;
1571         struct oct_nic_seapi_resp *resp;
1572         struct octeon_soft_command *sc;
1573         union octnet_cmd *ncmd;
1574         int retval;
1575
1576         sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1577                                        sizeof(struct oct_nic_seapi_resp),
1578                                        0);
1579         if (!sc)
1580                 return -ENOMEM;
1581
1582         ncmd = sc->virtdptr;
1583         resp = sc->virtrptr;
1584         memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1585
1586         init_completion(&sc->complete);
1587         sc->sc_status = OCTEON_REQUEST_PENDING;
1588
1589         ncmd->u64 = 0;
1590         ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
1591
1592         octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1593
1594         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1595
1596         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1597                                     OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1598
1599         retval = octeon_send_soft_command(oct, sc);
1600         if (retval == IQ_SEND_FAILED) {
1601                 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1602                 octeon_free_soft_command(oct, sc);
1603                 retval = -EIO;
1604         } else {
1605                 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1606                 if (retval)
1607                         return retval;
1608
1609                 retval = resp->status;
1610                 if (retval) {
1611                         dev_err(&oct->pci_dev->dev,
1612                                 "%s failed retval=%d\n", __func__, retval);
1613                         retval = -EIO;
1614                 } else {
1615                         u32 var;
1616
1617                         var = be32_to_cpu((__force __be32)resp->speed);
1618                         oct->speed_setting = var;
1619                         if (var == 0xffff) {
1620                                 /* unable to access boot variables
1621                                  * get the default value based on the NIC type
1622                                  */
1623                                 if (oct->subsystem_id ==
1624                                                 OCTEON_CN2350_25GB_SUBSYS_ID ||
1625                                     oct->subsystem_id ==
1626                                                 OCTEON_CN2360_25GB_SUBSYS_ID) {
1627                                         oct->no_speed_setting = 1;
1628                                         oct->speed_setting = 25;
1629                                 } else {
1630                                         oct->speed_setting = 10;
1631                                 }
1632                         }
1633
1634                 }
1635                 WRITE_ONCE(sc->caller_is_done, true);
1636         }
1637
1638         return retval;
1639 }