2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/numa.h>
42 #include <linux/pci.h>
43 #include <linux/utsname.h>
44 #include <linux/version.h>
45 #include <linux/vmalloc.h>
48 #include "ena_netdev.h"
49 #include <linux/bpf_trace.h>
50 #include "ena_pci_id_tbl.h"
52 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
54 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
55 MODULE_DESCRIPTION(DEVICE_NAME);
56 MODULE_LICENSE("GPL");
57 MODULE_VERSION(DRV_MODULE_VERSION);
59 /* Time in jiffies before concluding the transmitter is hung. */
60 #define TX_TIMEOUT (5 * HZ)
62 #define ENA_NAPI_BUDGET 64
64 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
65 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
66 static int debug = -1;
67 module_param(debug, int, 0);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 static struct ena_aenq_handlers aenq_handlers;
72 static struct workqueue_struct *ena_wq;
74 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
76 static int ena_rss_init_default(struct ena_adapter *adapter);
77 static void check_for_admin_com_state(struct ena_adapter *adapter);
78 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
79 static int ena_restore_device(struct ena_adapter *adapter);
81 static void ena_init_io_rings(struct ena_adapter *adapter,
82 int first_index, int count);
83 static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
85 static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
87 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
88 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
91 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
92 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
93 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
94 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
95 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
96 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
97 int first_index, int count);
98 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
99 int first_index, int count);
100 static int ena_up(struct ena_adapter *adapter);
101 static void ena_down(struct ena_adapter *adapter);
102 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
103 struct ena_ring *rx_ring);
104 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
105 struct ena_ring *rx_ring);
106 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
107 struct ena_tx_buffer *tx_info);
108 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
109 int first_index, int count);
111 static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
113 struct ena_adapter *adapter = netdev_priv(dev);
115 /* Change the state of the device to trigger reset
116 * Check that we are not in the middle or a trigger already
119 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
122 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
123 u64_stats_update_begin(&adapter->syncp);
124 adapter->dev_stats.tx_timeout++;
125 u64_stats_update_end(&adapter->syncp);
127 netif_err(adapter, tx_err, dev, "Transmit time out\n");
130 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
134 for (i = 0; i < adapter->num_io_queues; i++)
135 adapter->rx_ring[i].mtu = mtu;
138 static int ena_change_mtu(struct net_device *dev, int new_mtu)
140 struct ena_adapter *adapter = netdev_priv(dev);
143 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
145 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
146 update_rx_ring_mtu(adapter, new_mtu);
149 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
156 static int ena_xmit_common(struct net_device *dev,
157 struct ena_ring *ring,
158 struct ena_tx_buffer *tx_info,
159 struct ena_com_tx_ctx *ena_tx_ctx,
163 struct ena_adapter *adapter = netdev_priv(dev);
166 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
168 netif_dbg(adapter, tx_queued, dev,
169 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
171 ena_com_write_sq_doorbell(ring->ena_com_io_sq);
174 /* prepare the packet's descriptors to dma engine */
175 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
178 /* In case there isn't enough space in the queue for the packet,
179 * we simply drop it. All other failure reasons of
180 * ena_com_prepare_tx() are fatal and therefore require a device reset.
183 netif_err(adapter, tx_queued, dev,
184 "failed to prepare tx bufs\n");
185 u64_stats_update_begin(&ring->syncp);
186 ring->tx_stats.prepare_ctx_err++;
187 u64_stats_update_end(&ring->syncp);
189 adapter->reset_reason =
190 ENA_REGS_RESET_DRIVER_INVALID_STATE;
191 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
196 u64_stats_update_begin(&ring->syncp);
197 ring->tx_stats.cnt++;
198 ring->tx_stats.bytes += bytes;
199 u64_stats_update_end(&ring->syncp);
201 tx_info->tx_descs = nb_hw_desc;
202 tx_info->last_jiffies = jiffies;
203 tx_info->print_once = 0;
205 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
210 /* This is the XDP napi callback. XDP queues use a separate napi callback
213 static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
215 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
216 u32 xdp_work_done, xdp_budget;
217 struct ena_ring *xdp_ring;
218 int napi_comp_call = 0;
221 xdp_ring = ena_napi->xdp_ring;
222 xdp_ring->first_interrupt = ena_napi->first_interrupt;
226 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
227 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
228 napi_complete_done(napi, 0);
232 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
234 /* If the device is about to reset or down, avoid unmask
235 * the interrupt and return 0 so NAPI won't reschedule
237 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
238 napi_complete_done(napi, 0);
240 } else if (xdp_budget > xdp_work_done) {
242 if (napi_complete_done(napi, xdp_work_done))
243 ena_unmask_interrupt(xdp_ring, NULL);
244 ena_update_ring_numa_node(xdp_ring, NULL);
250 u64_stats_update_begin(&xdp_ring->syncp);
251 xdp_ring->tx_stats.napi_comp += napi_comp_call;
252 xdp_ring->tx_stats.tx_poll++;
253 u64_stats_update_end(&xdp_ring->syncp);
258 static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
259 struct ena_tx_buffer *tx_info,
260 struct xdp_buff *xdp,
264 struct ena_adapter *adapter = xdp_ring->adapter;
265 struct ena_com_buf *ena_buf;
269 tx_info->xdpf = convert_to_xdp_frame(xdp);
270 size = tx_info->xdpf->len;
271 ena_buf = tx_info->bufs;
273 /* llq push buffer */
274 *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
275 *push_hdr = tx_info->xdpf->data;
277 if (size - *push_len > 0) {
278 dma = dma_map_single(xdp_ring->dev,
279 *push_hdr + *push_len,
282 if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
283 goto error_report_dma_error;
285 tx_info->map_linear_data = 1;
286 tx_info->num_of_bufs = 1;
289 ena_buf->paddr = dma;
294 error_report_dma_error:
295 u64_stats_update_begin(&xdp_ring->syncp);
296 xdp_ring->tx_stats.dma_mapping_err++;
297 u64_stats_update_end(&xdp_ring->syncp);
298 netdev_warn(adapter->netdev, "failed to map xdp buff\n");
300 xdp_return_frame_rx_napi(tx_info->xdpf);
301 tx_info->xdpf = NULL;
302 tx_info->num_of_bufs = 0;
307 static int ena_xdp_xmit_buff(struct net_device *dev,
308 struct xdp_buff *xdp,
310 struct ena_rx_buffer *rx_info)
312 struct ena_adapter *adapter = netdev_priv(dev);
313 struct ena_com_tx_ctx ena_tx_ctx = {0};
314 struct ena_tx_buffer *tx_info;
315 struct ena_ring *xdp_ring;
316 u16 next_to_use, req_id;
321 xdp_ring = &adapter->tx_ring[qid];
322 next_to_use = xdp_ring->next_to_use;
323 req_id = xdp_ring->free_ids[next_to_use];
324 tx_info = &xdp_ring->tx_buffer_info[req_id];
325 tx_info->num_of_bufs = 0;
326 page_ref_inc(rx_info->page);
327 tx_info->xdp_rx_page = rx_info->page;
329 rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
331 goto error_drop_packet;
333 ena_tx_ctx.ena_bufs = tx_info->bufs;
334 ena_tx_ctx.push_header = push_hdr;
335 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
336 ena_tx_ctx.req_id = req_id;
337 ena_tx_ctx.header_len = push_len;
339 rc = ena_xmit_common(dev,
344 xdp->data_end - xdp->data);
346 goto error_unmap_dma;
347 /* trigger the dma engine. ena_com_write_sq_doorbell()
350 ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
351 u64_stats_update_begin(&xdp_ring->syncp);
352 xdp_ring->tx_stats.doorbells++;
353 u64_stats_update_end(&xdp_ring->syncp);
358 ena_unmap_tx_buff(xdp_ring, tx_info);
359 tx_info->xdpf = NULL;
365 static int ena_xdp_execute(struct ena_ring *rx_ring,
366 struct xdp_buff *xdp,
367 struct ena_rx_buffer *rx_info)
369 struct bpf_prog *xdp_prog;
370 u32 verdict = XDP_PASS;
373 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
378 verdict = bpf_prog_run_xdp(xdp_prog, xdp);
380 if (verdict == XDP_TX)
381 ena_xdp_xmit_buff(rx_ring->netdev,
383 rx_ring->qid + rx_ring->adapter->num_io_queues,
385 else if (unlikely(verdict == XDP_ABORTED))
386 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
387 else if (unlikely(verdict > XDP_TX))
388 bpf_warn_invalid_xdp_action(verdict);
394 static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
396 adapter->xdp_first_ring = adapter->num_io_queues;
397 adapter->xdp_num_queues = adapter->num_io_queues;
399 ena_init_io_rings(adapter,
400 adapter->xdp_first_ring,
401 adapter->xdp_num_queues);
404 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
408 rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
409 adapter->xdp_num_queues);
413 rc = ena_create_io_tx_queues_in_range(adapter,
414 adapter->xdp_first_ring,
415 adapter->xdp_num_queues);
422 ena_free_all_io_tx_resources(adapter);
427 /* Provides a way for both kernel and bpf-prog to know
428 * more about the RX-queue a given XDP frame arrived on.
430 static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
434 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid);
437 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
438 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
443 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
447 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
448 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
450 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
457 static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
459 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
460 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
463 void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
464 struct bpf_prog *prog,
468 struct ena_ring *rx_ring;
471 for (i = first; i < count; i++) {
472 rx_ring = &adapter->rx_ring[i];
473 xchg(&rx_ring->xdp_bpf_prog, prog);
475 ena_xdp_register_rxq_info(rx_ring);
476 rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
478 ena_xdp_unregister_rxq_info(rx_ring);
479 rx_ring->rx_headroom = 0;
484 void ena_xdp_exchange_program(struct ena_adapter *adapter,
485 struct bpf_prog *prog)
487 struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
489 ena_xdp_exchange_program_rx_in_range(adapter,
492 adapter->num_io_queues);
495 bpf_prog_put(old_bpf_prog);
498 static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
503 was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
508 adapter->xdp_first_ring = 0;
509 adapter->xdp_num_queues = 0;
510 ena_xdp_exchange_program(adapter, NULL);
512 rc = ena_up(adapter);
519 static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
521 struct ena_adapter *adapter = netdev_priv(netdev);
522 struct bpf_prog *prog = bpf->prog;
523 struct bpf_prog *old_bpf_prog;
527 is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
528 rc = ena_xdp_allowed(adapter);
529 if (rc == ENA_XDP_ALLOWED) {
530 old_bpf_prog = adapter->xdp_bpf_prog;
533 ena_init_all_xdp_queues(adapter);
534 } else if (!old_bpf_prog) {
536 ena_init_all_xdp_queues(adapter);
538 ena_xdp_exchange_program(adapter, prog);
540 if (is_up && !old_bpf_prog) {
541 rc = ena_up(adapter);
545 } else if (old_bpf_prog) {
546 rc = ena_destroy_and_free_all_xdp_queues(adapter);
551 prev_mtu = netdev->max_mtu;
552 netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
555 netif_info(adapter, drv, adapter->netdev,
556 "xdp program set, changing the max_mtu from %d to %d",
557 prev_mtu, netdev->max_mtu);
559 } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
560 netif_err(adapter, drv, adapter->netdev,
561 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
562 netdev->mtu, ENA_XDP_MAX_MTU);
563 NL_SET_ERR_MSG_MOD(bpf->extack,
564 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
566 } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
567 netif_err(adapter, drv, adapter->netdev,
568 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
569 adapter->num_io_queues, adapter->max_num_io_queues);
570 NL_SET_ERR_MSG_MOD(bpf->extack,
571 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
578 /* This is the main xdp callback, it's used by the kernel to set/unset the xdp
579 * program as well as to query the current xdp program id.
581 static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
583 struct ena_adapter *adapter = netdev_priv(netdev);
585 switch (bpf->command) {
587 return ena_xdp_set(netdev, bpf);
589 bpf->prog_id = adapter->xdp_bpf_prog ?
590 adapter->xdp_bpf_prog->aux->id : 0;
598 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
600 #ifdef CONFIG_RFS_ACCEL
604 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
605 if (!adapter->netdev->rx_cpu_rmap)
607 for (i = 0; i < adapter->num_io_queues; i++) {
608 int irq_idx = ENA_IO_IRQ_IDX(i);
610 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
611 pci_irq_vector(adapter->pdev, irq_idx));
613 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
614 adapter->netdev->rx_cpu_rmap = NULL;
618 #endif /* CONFIG_RFS_ACCEL */
622 static void ena_init_io_rings_common(struct ena_adapter *adapter,
623 struct ena_ring *ring, u16 qid)
626 ring->pdev = adapter->pdev;
627 ring->dev = &adapter->pdev->dev;
628 ring->netdev = adapter->netdev;
629 ring->napi = &adapter->ena_napi[qid].napi;
630 ring->adapter = adapter;
631 ring->ena_dev = adapter->ena_dev;
632 ring->per_napi_packets = 0;
634 ring->first_interrupt = false;
635 ring->no_interrupt_event_cnt = 0;
636 u64_stats_init(&ring->syncp);
639 static void ena_init_io_rings(struct ena_adapter *adapter,
640 int first_index, int count)
642 struct ena_com_dev *ena_dev;
643 struct ena_ring *txr, *rxr;
646 ena_dev = adapter->ena_dev;
648 for (i = first_index; i < first_index + count; i++) {
649 txr = &adapter->tx_ring[i];
650 rxr = &adapter->rx_ring[i];
652 /* TX common ring state */
653 ena_init_io_rings_common(adapter, txr, i);
655 /* TX specific ring state */
656 txr->ring_size = adapter->requested_tx_ring_size;
657 txr->tx_max_header_size = ena_dev->tx_max_header_size;
658 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
659 txr->sgl_size = adapter->max_tx_sgl_size;
660 txr->smoothed_interval =
661 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
663 /* Don't init RX queues for xdp queues */
664 if (!ENA_IS_XDP_INDEX(adapter, i)) {
665 /* RX common ring state */
666 ena_init_io_rings_common(adapter, rxr, i);
668 /* RX specific ring state */
669 rxr->ring_size = adapter->requested_rx_ring_size;
670 rxr->rx_copybreak = adapter->rx_copybreak;
671 rxr->sgl_size = adapter->max_rx_sgl_size;
672 rxr->smoothed_interval =
673 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
674 rxr->empty_rx_queue = 0;
675 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
680 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
681 * @adapter: network interface device structure
684 * Return 0 on success, negative on failure
686 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
688 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
689 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
692 if (tx_ring->tx_buffer_info) {
693 netif_err(adapter, ifup,
694 adapter->netdev, "tx_buffer_info info is not NULL");
698 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
699 node = cpu_to_node(ena_irq->cpu);
701 tx_ring->tx_buffer_info = vzalloc_node(size, node);
702 if (!tx_ring->tx_buffer_info) {
703 tx_ring->tx_buffer_info = vzalloc(size);
704 if (!tx_ring->tx_buffer_info)
705 goto err_tx_buffer_info;
708 size = sizeof(u16) * tx_ring->ring_size;
709 tx_ring->free_ids = vzalloc_node(size, node);
710 if (!tx_ring->free_ids) {
711 tx_ring->free_ids = vzalloc(size);
712 if (!tx_ring->free_ids)
713 goto err_tx_free_ids;
716 size = tx_ring->tx_max_header_size;
717 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
718 if (!tx_ring->push_buf_intermediate_buf) {
719 tx_ring->push_buf_intermediate_buf = vzalloc(size);
720 if (!tx_ring->push_buf_intermediate_buf)
721 goto err_push_buf_intermediate_buf;
724 /* Req id ring for TX out of order completions */
725 for (i = 0; i < tx_ring->ring_size; i++)
726 tx_ring->free_ids[i] = i;
728 /* Reset tx statistics */
729 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
731 tx_ring->next_to_use = 0;
732 tx_ring->next_to_clean = 0;
733 tx_ring->cpu = ena_irq->cpu;
736 err_push_buf_intermediate_buf:
737 vfree(tx_ring->free_ids);
738 tx_ring->free_ids = NULL;
740 vfree(tx_ring->tx_buffer_info);
741 tx_ring->tx_buffer_info = NULL;
746 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
747 * @adapter: network interface device structure
750 * Free all transmit software resources
752 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
754 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
756 vfree(tx_ring->tx_buffer_info);
757 tx_ring->tx_buffer_info = NULL;
759 vfree(tx_ring->free_ids);
760 tx_ring->free_ids = NULL;
762 vfree(tx_ring->push_buf_intermediate_buf);
763 tx_ring->push_buf_intermediate_buf = NULL;
766 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
772 for (i = first_index; i < first_index + count; i++) {
773 rc = ena_setup_tx_resources(adapter, i);
782 netif_err(adapter, ifup, adapter->netdev,
783 "Tx queue %d: allocation failed\n", i);
785 /* rewind the index freeing the rings as we go */
786 while (first_index < i--)
787 ena_free_tx_resources(adapter, i);
791 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
792 int first_index, int count)
796 for (i = first_index; i < first_index + count; i++)
797 ena_free_tx_resources(adapter, i);
800 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
801 * @adapter: board private structure
803 * Free all transmit software resources
805 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
807 ena_free_all_io_tx_resources_in_range(adapter,
809 adapter->xdp_num_queues +
810 adapter->num_io_queues);
813 static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
815 if (likely(req_id < rx_ring->ring_size))
818 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
819 "Invalid rx req_id: %hu\n", req_id);
821 u64_stats_update_begin(&rx_ring->syncp);
822 rx_ring->rx_stats.bad_req_id++;
823 u64_stats_update_end(&rx_ring->syncp);
825 /* Trigger device reset */
826 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
827 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
831 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
832 * @adapter: network interface device structure
835 * Returns 0 on success, negative on failure
837 static int ena_setup_rx_resources(struct ena_adapter *adapter,
840 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
841 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
844 if (rx_ring->rx_buffer_info) {
845 netif_err(adapter, ifup, adapter->netdev,
846 "rx_buffer_info is not NULL");
850 /* alloc extra element so in rx path
851 * we can always prefetch rx_info + 1
853 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
854 node = cpu_to_node(ena_irq->cpu);
856 rx_ring->rx_buffer_info = vzalloc_node(size, node);
857 if (!rx_ring->rx_buffer_info) {
858 rx_ring->rx_buffer_info = vzalloc(size);
859 if (!rx_ring->rx_buffer_info)
863 size = sizeof(u16) * rx_ring->ring_size;
864 rx_ring->free_ids = vzalloc_node(size, node);
865 if (!rx_ring->free_ids) {
866 rx_ring->free_ids = vzalloc(size);
867 if (!rx_ring->free_ids) {
868 vfree(rx_ring->rx_buffer_info);
869 rx_ring->rx_buffer_info = NULL;
874 /* Req id ring for receiving RX pkts out of order */
875 for (i = 0; i < rx_ring->ring_size; i++)
876 rx_ring->free_ids[i] = i;
878 /* Reset rx statistics */
879 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
881 rx_ring->next_to_clean = 0;
882 rx_ring->next_to_use = 0;
883 rx_ring->cpu = ena_irq->cpu;
888 /* ena_free_rx_resources - Free I/O Rx Resources
889 * @adapter: network interface device structure
892 * Free all receive software resources
894 static void ena_free_rx_resources(struct ena_adapter *adapter,
897 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
899 vfree(rx_ring->rx_buffer_info);
900 rx_ring->rx_buffer_info = NULL;
902 vfree(rx_ring->free_ids);
903 rx_ring->free_ids = NULL;
906 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
907 * @adapter: board private structure
909 * Return 0 on success, negative on failure
911 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
915 for (i = 0; i < adapter->num_io_queues; i++) {
916 rc = ena_setup_rx_resources(adapter, i);
925 netif_err(adapter, ifup, adapter->netdev,
926 "Rx queue %d: allocation failed\n", i);
928 /* rewind the index freeing the rings as we go */
930 ena_free_rx_resources(adapter, i);
934 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
935 * @adapter: board private structure
937 * Free all receive software resources
939 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
943 for (i = 0; i < adapter->num_io_queues; i++)
944 ena_free_rx_resources(adapter, i);
947 static int ena_alloc_rx_page(struct ena_ring *rx_ring,
948 struct ena_rx_buffer *rx_info, gfp_t gfp)
950 struct ena_com_buf *ena_buf;
954 /* if previous allocated page is not used */
955 if (unlikely(rx_info->page))
958 page = alloc_page(gfp);
959 if (unlikely(!page)) {
960 u64_stats_update_begin(&rx_ring->syncp);
961 rx_ring->rx_stats.page_alloc_fail++;
962 u64_stats_update_end(&rx_ring->syncp);
966 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
968 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
969 u64_stats_update_begin(&rx_ring->syncp);
970 rx_ring->rx_stats.dma_mapping_err++;
971 u64_stats_update_end(&rx_ring->syncp);
976 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
977 "alloc page %p, rx_info %p\n", page, rx_info);
979 rx_info->page = page;
980 rx_info->page_offset = 0;
981 ena_buf = &rx_info->ena_buf;
982 ena_buf->paddr = dma + rx_ring->rx_headroom;
983 ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
988 static void ena_free_rx_page(struct ena_ring *rx_ring,
989 struct ena_rx_buffer *rx_info)
991 struct page *page = rx_info->page;
992 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
994 if (unlikely(!page)) {
995 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
996 "Trying to free unallocated buffer\n");
1000 dma_unmap_page(rx_ring->dev,
1001 ena_buf->paddr - rx_ring->rx_headroom,
1006 rx_info->page = NULL;
1009 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
1011 u16 next_to_use, req_id;
1015 next_to_use = rx_ring->next_to_use;
1017 for (i = 0; i < num; i++) {
1018 struct ena_rx_buffer *rx_info;
1020 req_id = rx_ring->free_ids[next_to_use];
1022 rx_info = &rx_ring->rx_buffer_info[req_id];
1024 rc = ena_alloc_rx_page(rx_ring, rx_info,
1025 GFP_ATOMIC | __GFP_COMP);
1026 if (unlikely(rc < 0)) {
1027 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1028 "failed to alloc buffer for rx queue %d\n",
1032 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1036 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1037 "failed to add buffer for rx queue %d\n",
1041 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1042 rx_ring->ring_size);
1045 if (unlikely(i < num)) {
1046 u64_stats_update_begin(&rx_ring->syncp);
1047 rx_ring->rx_stats.refil_partial++;
1048 u64_stats_update_end(&rx_ring->syncp);
1049 netdev_warn(rx_ring->netdev,
1050 "refilled rx qid %d with only %d buffers (from %d)\n",
1051 rx_ring->qid, i, num);
1054 /* ena_com_write_sq_doorbell issues a wmb() */
1056 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1058 rx_ring->next_to_use = next_to_use;
1063 static void ena_free_rx_bufs(struct ena_adapter *adapter,
1066 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1069 for (i = 0; i < rx_ring->ring_size; i++) {
1070 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1073 ena_free_rx_page(rx_ring, rx_info);
1077 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1078 * @adapter: board private structure
1080 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1082 struct ena_ring *rx_ring;
1083 int i, rc, bufs_num;
1085 for (i = 0; i < adapter->num_io_queues; i++) {
1086 rx_ring = &adapter->rx_ring[i];
1087 bufs_num = rx_ring->ring_size - 1;
1088 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1090 if (unlikely(rc != bufs_num))
1091 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1092 "refilling Queue %d failed. allocated %d buffers from: %d\n",
1097 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
1101 for (i = 0; i < adapter->num_io_queues; i++)
1102 ena_free_rx_bufs(adapter, i);
1105 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
1106 struct ena_tx_buffer *tx_info)
1108 struct ena_com_buf *ena_buf;
1112 ena_buf = tx_info->bufs;
1113 cnt = tx_info->num_of_bufs;
1118 if (tx_info->map_linear_data) {
1119 dma_unmap_single(tx_ring->dev,
1120 dma_unmap_addr(ena_buf, paddr),
1121 dma_unmap_len(ena_buf, len),
1127 /* unmap remaining mapped pages */
1128 for (i = 0; i < cnt; i++) {
1129 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
1130 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
1135 /* ena_free_tx_bufs - Free Tx Buffers per Queue
1136 * @tx_ring: TX ring for which buffers be freed
1138 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
1140 bool print_once = true;
1143 for (i = 0; i < tx_ring->ring_size; i++) {
1144 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1150 netdev_notice(tx_ring->netdev,
1151 "free uncompleted tx skb qid %d idx 0x%x\n",
1155 netdev_dbg(tx_ring->netdev,
1156 "free uncompleted tx skb qid %d idx 0x%x\n",
1160 ena_unmap_tx_buff(tx_ring, tx_info);
1162 dev_kfree_skb_any(tx_info->skb);
1164 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
1168 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
1170 struct ena_ring *tx_ring;
1173 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1174 tx_ring = &adapter->tx_ring[i];
1175 ena_free_tx_bufs(tx_ring);
1179 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1184 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1185 ena_qid = ENA_IO_TXQ_IDX(i);
1186 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1190 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1195 for (i = 0; i < adapter->num_io_queues; i++) {
1196 ena_qid = ENA_IO_RXQ_IDX(i);
1197 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1198 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1202 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
1204 ena_destroy_all_tx_queues(adapter);
1205 ena_destroy_all_rx_queues(adapter);
1208 static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
1209 struct ena_tx_buffer *tx_info, bool is_xdp)
1212 netif_err(ring->adapter,
1215 "tx_info doesn't have valid %s",
1216 is_xdp ? "xdp frame" : "skb");
1218 netif_err(ring->adapter,
1221 "Invalid req_id: %hu\n",
1224 u64_stats_update_begin(&ring->syncp);
1225 ring->tx_stats.bad_req_id++;
1226 u64_stats_update_end(&ring->syncp);
1228 /* Trigger device reset */
1229 ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
1230 set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
1234 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
1236 struct ena_tx_buffer *tx_info = NULL;
1238 if (likely(req_id < tx_ring->ring_size)) {
1239 tx_info = &tx_ring->tx_buffer_info[req_id];
1240 if (likely(tx_info->skb))
1244 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
1247 static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
1249 struct ena_tx_buffer *tx_info = NULL;
1251 if (likely(req_id < xdp_ring->ring_size)) {
1252 tx_info = &xdp_ring->tx_buffer_info[req_id];
1253 if (likely(tx_info->xdpf))
1257 return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
1260 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
1262 struct netdev_queue *txq;
1271 next_to_clean = tx_ring->next_to_clean;
1272 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
1274 while (tx_pkts < budget) {
1275 struct ena_tx_buffer *tx_info;
1276 struct sk_buff *skb;
1278 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
1283 rc = validate_tx_req_id(tx_ring, req_id);
1287 tx_info = &tx_ring->tx_buffer_info[req_id];
1290 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1291 prefetch(&skb->end);
1293 tx_info->skb = NULL;
1294 tx_info->last_jiffies = 0;
1296 ena_unmap_tx_buff(tx_ring, tx_info);
1298 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1299 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
1302 tx_bytes += skb->len;
1305 total_done += tx_info->tx_descs;
1307 tx_ring->free_ids[next_to_clean] = req_id;
1308 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1309 tx_ring->ring_size);
1312 tx_ring->next_to_clean = next_to_clean;
1313 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
1314 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
1316 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
1318 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1319 "tx_poll: q %d done. total pkts: %d\n",
1320 tx_ring->qid, tx_pkts);
1322 /* need to make the rings circular update visible to
1323 * ena_start_xmit() before checking for netif_queue_stopped().
1327 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1328 ENA_TX_WAKEUP_THRESH);
1329 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
1330 __netif_tx_lock(txq, smp_processor_id());
1332 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1333 ENA_TX_WAKEUP_THRESH);
1334 if (netif_tx_queue_stopped(txq) && above_thresh &&
1335 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
1336 netif_tx_wake_queue(txq);
1337 u64_stats_update_begin(&tx_ring->syncp);
1338 tx_ring->tx_stats.queue_wakeup++;
1339 u64_stats_update_end(&tx_ring->syncp);
1341 __netif_tx_unlock(txq);
1347 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
1349 struct sk_buff *skb;
1352 skb = napi_get_frags(rx_ring->napi);
1354 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1355 rx_ring->rx_copybreak);
1357 if (unlikely(!skb)) {
1358 u64_stats_update_begin(&rx_ring->syncp);
1359 rx_ring->rx_stats.skb_alloc_fail++;
1360 u64_stats_update_end(&rx_ring->syncp);
1361 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1362 "Failed to allocate skb. frags: %d\n", frags);
1369 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1370 struct ena_com_rx_buf_info *ena_bufs,
1374 struct sk_buff *skb;
1375 struct ena_rx_buffer *rx_info;
1376 u16 len, req_id, buf = 0;
1380 len = ena_bufs[buf].len;
1381 req_id = ena_bufs[buf].req_id;
1383 rc = validate_rx_req_id(rx_ring, req_id);
1384 if (unlikely(rc < 0))
1387 rx_info = &rx_ring->rx_buffer_info[req_id];
1389 if (unlikely(!rx_info->page)) {
1390 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
1395 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1396 "rx_info %p page %p\n",
1397 rx_info, rx_info->page);
1399 /* save virt address of first buffer */
1400 va = page_address(rx_info->page) + rx_info->page_offset;
1401 prefetch(va + NET_IP_ALIGN);
1403 if (len <= rx_ring->rx_copybreak) {
1404 skb = ena_alloc_skb(rx_ring, false);
1408 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1409 "rx allocated small packet. len %d. data_len %d\n",
1410 skb->len, skb->data_len);
1412 /* sync this buffer for CPU use */
1413 dma_sync_single_for_cpu(rx_ring->dev,
1414 dma_unmap_addr(&rx_info->ena_buf, paddr),
1417 skb_copy_to_linear_data(skb, va, len);
1418 dma_sync_single_for_device(rx_ring->dev,
1419 dma_unmap_addr(&rx_info->ena_buf, paddr),
1424 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1425 rx_ring->free_ids[*next_to_clean] = req_id;
1426 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1427 rx_ring->ring_size);
1431 skb = ena_alloc_skb(rx_ring, true);
1436 dma_unmap_page(rx_ring->dev,
1437 dma_unmap_addr(&rx_info->ena_buf, paddr),
1438 ENA_PAGE_SIZE, DMA_FROM_DEVICE);
1440 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
1441 rx_info->page_offset, len, ENA_PAGE_SIZE);
1443 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1444 "rx skb updated. len %d. data_len %d\n",
1445 skb->len, skb->data_len);
1447 rx_info->page = NULL;
1449 rx_ring->free_ids[*next_to_clean] = req_id;
1451 ENA_RX_RING_IDX_NEXT(*next_to_clean,
1452 rx_ring->ring_size);
1453 if (likely(--descs == 0))
1457 len = ena_bufs[buf].len;
1458 req_id = ena_bufs[buf].req_id;
1460 rc = validate_rx_req_id(rx_ring, req_id);
1461 if (unlikely(rc < 0))
1464 rx_info = &rx_ring->rx_buffer_info[req_id];
1470 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1471 * @adapter: structure containing adapter specific data
1472 * @ena_rx_ctx: received packet context/metadata
1473 * @skb: skb currently being received and modified
1475 static void ena_rx_checksum(struct ena_ring *rx_ring,
1476 struct ena_com_rx_ctx *ena_rx_ctx,
1477 struct sk_buff *skb)
1479 /* Rx csum disabled */
1480 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1481 skb->ip_summed = CHECKSUM_NONE;
1485 /* For fragmented packets the checksum isn't valid */
1486 if (ena_rx_ctx->frag) {
1487 skb->ip_summed = CHECKSUM_NONE;
1491 /* if IP and error */
1492 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1493 (ena_rx_ctx->l3_csum_err))) {
1494 /* ipv4 checksum error */
1495 skb->ip_summed = CHECKSUM_NONE;
1496 u64_stats_update_begin(&rx_ring->syncp);
1497 rx_ring->rx_stats.bad_csum++;
1498 u64_stats_update_end(&rx_ring->syncp);
1499 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1500 "RX IPv4 header checksum error\n");
1505 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1506 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1507 if (unlikely(ena_rx_ctx->l4_csum_err)) {
1508 /* TCP/UDP checksum error */
1509 u64_stats_update_begin(&rx_ring->syncp);
1510 rx_ring->rx_stats.bad_csum++;
1511 u64_stats_update_end(&rx_ring->syncp);
1512 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1513 "RX L4 checksum error\n");
1514 skb->ip_summed = CHECKSUM_NONE;
1518 if (likely(ena_rx_ctx->l4_csum_checked)) {
1519 skb->ip_summed = CHECKSUM_UNNECESSARY;
1520 u64_stats_update_begin(&rx_ring->syncp);
1521 rx_ring->rx_stats.csum_good++;
1522 u64_stats_update_end(&rx_ring->syncp);
1524 u64_stats_update_begin(&rx_ring->syncp);
1525 rx_ring->rx_stats.csum_unchecked++;
1526 u64_stats_update_end(&rx_ring->syncp);
1527 skb->ip_summed = CHECKSUM_NONE;
1530 skb->ip_summed = CHECKSUM_NONE;
1536 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1537 struct ena_com_rx_ctx *ena_rx_ctx,
1538 struct sk_buff *skb)
1540 enum pkt_hash_types hash_type;
1542 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1543 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1544 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1546 hash_type = PKT_HASH_TYPE_L4;
1548 hash_type = PKT_HASH_TYPE_NONE;
1550 /* Override hash type if the packet is fragmented */
1551 if (ena_rx_ctx->frag)
1552 hash_type = PKT_HASH_TYPE_NONE;
1554 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1558 int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
1560 struct ena_rx_buffer *rx_info;
1563 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1564 xdp->data = page_address(rx_info->page) +
1565 rx_info->page_offset + rx_ring->rx_headroom;
1566 xdp_set_data_meta_invalid(xdp);
1567 xdp->data_hard_start = page_address(rx_info->page);
1568 xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
1569 /* If for some reason we received a bigger packet than
1570 * we expect, then we simply drop it
1572 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
1575 ret = ena_xdp_execute(rx_ring, xdp, rx_info);
1577 /* The xdp program might expand the headers */
1578 if (ret == XDP_PASS) {
1579 rx_info->page_offset = xdp->data - xdp->data_hard_start;
1580 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1585 /* ena_clean_rx_irq - Cleanup RX irq
1586 * @rx_ring: RX ring to clean
1587 * @napi: napi handler
1588 * @budget: how many packets driver is allowed to clean
1590 * Returns the number of cleaned buffers.
1592 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1595 u16 next_to_clean = rx_ring->next_to_clean;
1596 struct ena_com_rx_ctx ena_rx_ctx;
1597 struct ena_adapter *adapter;
1598 u32 res_budget, work_done;
1599 int rx_copybreak_pkt = 0;
1600 int refill_threshold;
1601 struct sk_buff *skb;
1602 int refill_required;
1603 struct xdp_buff xdp;
1609 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1610 "%s qid %d\n", __func__, rx_ring->qid);
1611 res_budget = budget;
1612 xdp.rxq = &rx_ring->xdp_rxq;
1615 xdp_verdict = XDP_PASS;
1617 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1618 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1619 ena_rx_ctx.descs = 0;
1620 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1621 rx_ring->ena_com_io_sq,
1626 if (unlikely(ena_rx_ctx.descs == 0))
1629 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1630 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1631 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1632 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1634 if (ena_xdp_present_ring(rx_ring))
1635 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
1637 /* allocate skb and fill it */
1638 if (xdp_verdict == XDP_PASS)
1639 skb = ena_rx_skb(rx_ring,
1644 if (unlikely(!skb)) {
1645 if (xdp_verdict == XDP_TX) {
1646 ena_free_rx_page(rx_ring,
1647 &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
1650 for (i = 0; i < ena_rx_ctx.descs; i++) {
1651 rx_ring->free_ids[next_to_clean] =
1652 rx_ring->ena_bufs[i].req_id;
1654 ENA_RX_RING_IDX_NEXT(next_to_clean,
1655 rx_ring->ring_size);
1657 if (xdp_verdict == XDP_TX || xdp_verdict == XDP_DROP)
1662 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1664 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1666 skb_record_rx_queue(skb, rx_ring->qid);
1668 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1669 total_len += rx_ring->ena_bufs[0].len;
1671 napi_gro_receive(napi, skb);
1673 total_len += skb->len;
1674 napi_gro_frags(napi);
1678 } while (likely(res_budget));
1680 work_done = budget - res_budget;
1681 rx_ring->per_napi_packets += work_done;
1682 u64_stats_update_begin(&rx_ring->syncp);
1683 rx_ring->rx_stats.bytes += total_len;
1684 rx_ring->rx_stats.cnt += work_done;
1685 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1686 u64_stats_update_end(&rx_ring->syncp);
1688 rx_ring->next_to_clean = next_to_clean;
1690 refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
1692 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1693 ENA_RX_REFILL_THRESH_PACKET);
1695 /* Optimization, try to batch new rx buffers */
1696 if (refill_required > refill_threshold) {
1697 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1698 ena_refill_rx_bufs(rx_ring, refill_required);
1704 adapter = netdev_priv(rx_ring->netdev);
1706 u64_stats_update_begin(&rx_ring->syncp);
1707 rx_ring->rx_stats.bad_desc_num++;
1708 u64_stats_update_end(&rx_ring->syncp);
1710 /* Too many desc from the device. Trigger reset */
1711 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1712 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1717 static void ena_dim_work(struct work_struct *w)
1719 struct dim *dim = container_of(w, struct dim, work);
1720 struct dim_cq_moder cur_moder =
1721 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1722 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1724 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1725 dim->state = DIM_START_MEASURE;
1728 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1730 struct dim_sample dim_sample;
1731 struct ena_ring *rx_ring = ena_napi->rx_ring;
1733 if (!rx_ring->per_napi_packets)
1736 rx_ring->non_empty_napi_events++;
1738 dim_update_sample(rx_ring->non_empty_napi_events,
1739 rx_ring->rx_stats.cnt,
1740 rx_ring->rx_stats.bytes,
1743 net_dim(&ena_napi->dim, dim_sample);
1745 rx_ring->per_napi_packets = 0;
1748 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
1749 struct ena_ring *rx_ring)
1751 struct ena_eth_io_intr_reg intr_reg;
1752 u32 rx_interval = 0;
1753 /* Rx ring can be NULL when for XDP tx queues which don't have an
1754 * accompanying rx_ring pair.
1757 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1758 rx_ring->smoothed_interval :
1759 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
1761 /* Update intr register: rx intr delay,
1762 * tx intr delay and interrupt unmask
1764 ena_com_update_intr_reg(&intr_reg,
1766 tx_ring->smoothed_interval,
1769 /* It is a shared MSI-X.
1770 * Tx and Rx CQ have pointer to it.
1771 * So we use one of them to reach the intr reg
1772 * The Tx ring is used because the rx_ring is NULL for XDP queues
1774 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
1777 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1778 struct ena_ring *rx_ring)
1780 int cpu = get_cpu();
1783 /* Check only one ring since the 2 rings are running on the same cpu */
1784 if (likely(tx_ring->cpu == cpu))
1787 numa_node = cpu_to_node(cpu);
1790 if (numa_node != NUMA_NO_NODE) {
1791 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1793 ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1806 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
1815 if (unlikely(!xdp_ring))
1817 next_to_clean = xdp_ring->next_to_clean;
1819 while (tx_pkts < budget) {
1820 struct ena_tx_buffer *tx_info;
1821 struct xdp_frame *xdpf;
1823 rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
1828 rc = validate_xdp_req_id(xdp_ring, req_id);
1832 tx_info = &xdp_ring->tx_buffer_info[req_id];
1833 xdpf = tx_info->xdpf;
1835 tx_info->xdpf = NULL;
1836 tx_info->last_jiffies = 0;
1837 ena_unmap_tx_buff(xdp_ring, tx_info);
1839 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1840 "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
1843 tx_bytes += xdpf->len;
1845 total_done += tx_info->tx_descs;
1847 __free_page(tx_info->xdp_rx_page);
1848 xdp_ring->free_ids[next_to_clean] = req_id;
1849 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1850 xdp_ring->ring_size);
1853 xdp_ring->next_to_clean = next_to_clean;
1854 ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
1855 ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
1857 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1858 "tx_poll: q %d done. total pkts: %d\n",
1859 xdp_ring->qid, tx_pkts);
1864 static int ena_io_poll(struct napi_struct *napi, int budget)
1866 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1867 struct ena_ring *tx_ring, *rx_ring;
1869 int rx_work_done = 0;
1871 int napi_comp_call = 0;
1874 tx_ring = ena_napi->tx_ring;
1875 rx_ring = ena_napi->rx_ring;
1877 tx_ring->first_interrupt = ena_napi->first_interrupt;
1878 rx_ring->first_interrupt = ena_napi->first_interrupt;
1880 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1882 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1883 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1884 napi_complete_done(napi, 0);
1888 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1889 /* On netpoll the budget is zero and the handler should only clean the
1893 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1895 /* If the device is about to reset or down, avoid unmask
1896 * the interrupt and return 0 so NAPI won't reschedule
1898 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1899 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1900 napi_complete_done(napi, 0);
1903 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1906 /* Update numa and unmask the interrupt only when schedule
1907 * from the interrupt context (vs from sk_busy_loop)
1909 if (napi_complete_done(napi, rx_work_done)) {
1910 /* We apply adaptive moderation on Rx path only.
1911 * Tx uses static interrupt moderation.
1913 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1914 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
1916 ena_unmask_interrupt(tx_ring, rx_ring);
1919 ena_update_ring_numa_node(tx_ring, rx_ring);
1926 u64_stats_update_begin(&tx_ring->syncp);
1927 tx_ring->tx_stats.napi_comp += napi_comp_call;
1928 tx_ring->tx_stats.tx_poll++;
1929 u64_stats_update_end(&tx_ring->syncp);
1934 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1936 struct ena_adapter *adapter = (struct ena_adapter *)data;
1938 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1940 /* Don't call the aenq handler before probe is done */
1941 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1942 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1947 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1948 * @irq: interrupt number
1949 * @data: pointer to a network interface private napi device structure
1951 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1953 struct ena_napi *ena_napi = data;
1955 ena_napi->first_interrupt = true;
1957 napi_schedule_irqoff(&ena_napi->napi);
1962 /* Reserve a single MSI-X vector for management (admin + aenq).
1963 * plus reserve one vector for each potential io queue.
1964 * the number of potential io queues is the minimum of what the device
1965 * supports and the number of vCPUs.
1967 static int ena_enable_msix(struct ena_adapter *adapter)
1969 int msix_vecs, irq_cnt;
1971 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1972 netif_err(adapter, probe, adapter->netdev,
1973 "Error, MSI-X is already enabled\n");
1977 /* Reserved the max msix vectors we might need */
1978 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1979 netif_dbg(adapter, probe, adapter->netdev,
1980 "trying to enable MSI-X, vectors %d\n", msix_vecs);
1982 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1983 msix_vecs, PCI_IRQ_MSIX);
1986 netif_err(adapter, probe, adapter->netdev,
1987 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1991 if (irq_cnt != msix_vecs) {
1992 netif_notice(adapter, probe, adapter->netdev,
1993 "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1994 irq_cnt, msix_vecs);
1995 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1998 if (ena_init_rx_cpu_rmap(adapter))
1999 netif_warn(adapter, probe, adapter->netdev,
2000 "Failed to map IRQs to CPUs\n");
2002 adapter->msix_vecs = irq_cnt;
2003 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
2008 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
2012 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
2013 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
2014 pci_name(adapter->pdev));
2015 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
2016 ena_intr_msix_mgmnt;
2017 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
2018 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
2019 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
2020 cpu = cpumask_first(cpu_online_mask);
2021 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
2022 cpumask_set_cpu(cpu,
2023 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
2026 static void ena_setup_io_intr(struct ena_adapter *adapter)
2028 struct net_device *netdev;
2029 int irq_idx, i, cpu;
2032 netdev = adapter->netdev;
2033 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2035 for (i = 0; i < io_queue_count; i++) {
2036 irq_idx = ENA_IO_IRQ_IDX(i);
2037 cpu = i % num_online_cpus();
2039 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
2040 "%s-Tx-Rx-%d", netdev->name, i);
2041 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
2042 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
2043 adapter->irq_tbl[irq_idx].vector =
2044 pci_irq_vector(adapter->pdev, irq_idx);
2045 adapter->irq_tbl[irq_idx].cpu = cpu;
2047 cpumask_set_cpu(cpu,
2048 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
2052 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
2054 unsigned long flags = 0;
2055 struct ena_irq *irq;
2058 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2059 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2062 netif_err(adapter, probe, adapter->netdev,
2063 "failed to request admin irq\n");
2067 netif_dbg(adapter, probe, adapter->netdev,
2068 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2069 irq->affinity_hint_mask.bits[0], irq->vector);
2071 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2076 static int ena_request_io_irq(struct ena_adapter *adapter)
2078 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2079 unsigned long flags = 0;
2080 struct ena_irq *irq;
2083 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2084 netif_err(adapter, ifup, adapter->netdev,
2085 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2089 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2090 irq = &adapter->irq_tbl[i];
2091 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2094 netif_err(adapter, ifup, adapter->netdev,
2095 "Failed to request I/O IRQ. index %d rc %d\n",
2100 netif_dbg(adapter, ifup, adapter->netdev,
2101 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2102 i, irq->affinity_hint_mask.bits[0], irq->vector);
2104 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2110 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
2111 irq = &adapter->irq_tbl[k];
2112 free_irq(irq->vector, irq->data);
2118 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
2120 struct ena_irq *irq;
2122 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2123 synchronize_irq(irq->vector);
2124 irq_set_affinity_hint(irq->vector, NULL);
2125 free_irq(irq->vector, irq->data);
2128 static void ena_free_io_irq(struct ena_adapter *adapter)
2130 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2131 struct ena_irq *irq;
2134 #ifdef CONFIG_RFS_ACCEL
2135 if (adapter->msix_vecs >= 1) {
2136 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2137 adapter->netdev->rx_cpu_rmap = NULL;
2139 #endif /* CONFIG_RFS_ACCEL */
2141 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2142 irq = &adapter->irq_tbl[i];
2143 irq_set_affinity_hint(irq->vector, NULL);
2144 free_irq(irq->vector, irq->data);
2148 static void ena_disable_msix(struct ena_adapter *adapter)
2150 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
2151 pci_free_irq_vectors(adapter->pdev);
2154 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
2156 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2159 if (!netif_running(adapter->netdev))
2162 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
2163 synchronize_irq(adapter->irq_tbl[i].vector);
2166 static void ena_del_napi_in_range(struct ena_adapter *adapter,
2172 for (i = first_index; i < first_index + count; i++) {
2173 /* Check if napi was initialized before */
2174 if (!ENA_IS_XDP_INDEX(adapter, i) ||
2175 adapter->ena_napi[i].xdp_ring)
2176 netif_napi_del(&adapter->ena_napi[i].napi);
2178 WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
2179 adapter->ena_napi[i].xdp_ring);
2183 static void ena_init_napi_in_range(struct ena_adapter *adapter,
2184 int first_index, int count)
2186 struct ena_napi *napi = {0};
2189 for (i = first_index; i < first_index + count; i++) {
2190 napi = &adapter->ena_napi[i];
2192 netif_napi_add(adapter->netdev,
2193 &adapter->ena_napi[i].napi,
2194 ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
2197 if (!ENA_IS_XDP_INDEX(adapter, i)) {
2198 napi->rx_ring = &adapter->rx_ring[i];
2199 napi->tx_ring = &adapter->tx_ring[i];
2201 napi->xdp_ring = &adapter->tx_ring[i];
2207 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
2213 for (i = first_index; i < first_index + count; i++)
2214 napi_disable(&adapter->ena_napi[i].napi);
2217 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
2223 for (i = first_index; i < first_index + count; i++)
2224 napi_enable(&adapter->ena_napi[i].napi);
2227 /* Configure the Rx forwarding */
2228 static int ena_rss_configure(struct ena_adapter *adapter)
2230 struct ena_com_dev *ena_dev = adapter->ena_dev;
2233 /* In case the RSS table wasn't initialized by probe */
2234 if (!ena_dev->rss.tbl_log_size) {
2235 rc = ena_rss_init_default(adapter);
2236 if (rc && (rc != -EOPNOTSUPP)) {
2237 netif_err(adapter, ifup, adapter->netdev,
2238 "Failed to init RSS rc: %d\n", rc);
2243 /* Set indirect table */
2244 rc = ena_com_indirect_table_set(ena_dev);
2245 if (unlikely(rc && rc != -EOPNOTSUPP))
2248 /* Configure hash function (if supported) */
2249 rc = ena_com_set_hash_function(ena_dev);
2250 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2253 /* Configure hash inputs (if supported) */
2254 rc = ena_com_set_hash_ctrl(ena_dev);
2255 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2261 static int ena_up_complete(struct ena_adapter *adapter)
2265 rc = ena_rss_configure(adapter);
2269 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
2271 ena_refill_all_rx_bufs(adapter);
2273 /* enable transmits */
2274 netif_tx_start_all_queues(adapter->netdev);
2276 ena_napi_enable_in_range(adapter,
2278 adapter->xdp_num_queues + adapter->num_io_queues);
2283 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
2285 struct ena_com_create_io_ctx ctx;
2286 struct ena_com_dev *ena_dev;
2287 struct ena_ring *tx_ring;
2292 ena_dev = adapter->ena_dev;
2294 tx_ring = &adapter->tx_ring[qid];
2295 msix_vector = ENA_IO_IRQ_IDX(qid);
2296 ena_qid = ENA_IO_TXQ_IDX(qid);
2298 memset(&ctx, 0x0, sizeof(ctx));
2300 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
2302 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
2303 ctx.msix_vector = msix_vector;
2304 ctx.queue_size = tx_ring->ring_size;
2305 ctx.numa_node = cpu_to_node(tx_ring->cpu);
2307 rc = ena_com_create_io_queue(ena_dev, &ctx);
2309 netif_err(adapter, ifup, adapter->netdev,
2310 "Failed to create I/O TX queue num %d rc: %d\n",
2315 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2316 &tx_ring->ena_com_io_sq,
2317 &tx_ring->ena_com_io_cq);
2319 netif_err(adapter, ifup, adapter->netdev,
2320 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2322 ena_com_destroy_io_queue(ena_dev, ena_qid);
2326 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
2330 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
2331 int first_index, int count)
2333 struct ena_com_dev *ena_dev = adapter->ena_dev;
2336 for (i = first_index; i < first_index + count; i++) {
2337 rc = ena_create_io_tx_queue(adapter, i);
2345 while (i-- > first_index)
2346 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
2351 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
2353 struct ena_com_dev *ena_dev;
2354 struct ena_com_create_io_ctx ctx;
2355 struct ena_ring *rx_ring;
2360 ena_dev = adapter->ena_dev;
2362 rx_ring = &adapter->rx_ring[qid];
2363 msix_vector = ENA_IO_IRQ_IDX(qid);
2364 ena_qid = ENA_IO_RXQ_IDX(qid);
2366 memset(&ctx, 0x0, sizeof(ctx));
2369 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
2370 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2371 ctx.msix_vector = msix_vector;
2372 ctx.queue_size = rx_ring->ring_size;
2373 ctx.numa_node = cpu_to_node(rx_ring->cpu);
2375 rc = ena_com_create_io_queue(ena_dev, &ctx);
2377 netif_err(adapter, ifup, adapter->netdev,
2378 "Failed to create I/O RX queue num %d rc: %d\n",
2383 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2384 &rx_ring->ena_com_io_sq,
2385 &rx_ring->ena_com_io_cq);
2387 netif_err(adapter, ifup, adapter->netdev,
2388 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2393 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
2397 ena_com_destroy_io_queue(ena_dev, ena_qid);
2401 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
2403 struct ena_com_dev *ena_dev = adapter->ena_dev;
2406 for (i = 0; i < adapter->num_io_queues; i++) {
2407 rc = ena_create_io_rx_queue(adapter, i);
2410 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
2417 cancel_work_sync(&adapter->ena_napi[i].dim.work);
2418 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
2424 static void set_io_rings_size(struct ena_adapter *adapter,
2430 for (i = 0; i < adapter->num_io_queues; i++) {
2431 adapter->tx_ring[i].ring_size = new_tx_size;
2432 adapter->rx_ring[i].ring_size = new_rx_size;
2436 /* This function allows queue allocation to backoff when the system is
2437 * low on memory. If there is not enough memory to allocate io queues
2438 * the driver will try to allocate smaller queues.
2440 * The backoff algorithm is as follows:
2441 * 1. Try to allocate TX and RX and if successful.
2442 * 1.1. return success
2444 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2446 * 3. If TX or RX is smaller than 256
2447 * 3.1. return failure.
2449 * 4.1. go back to 1.
2451 static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2453 int rc, cur_rx_ring_size, cur_tx_ring_size;
2454 int new_rx_ring_size, new_tx_ring_size;
2456 /* current queue sizes might be set to smaller than the requested
2457 * ones due to past queue allocation failures.
2459 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2460 adapter->requested_rx_ring_size);
2463 if (ena_xdp_present(adapter)) {
2464 rc = ena_setup_and_create_all_xdp_queues(adapter);
2469 rc = ena_setup_tx_resources_in_range(adapter,
2471 adapter->num_io_queues);
2475 rc = ena_create_io_tx_queues_in_range(adapter,
2477 adapter->num_io_queues);
2479 goto err_create_tx_queues;
2481 rc = ena_setup_all_rx_resources(adapter);
2485 rc = ena_create_all_io_rx_queues(adapter);
2487 goto err_create_rx_queues;
2491 err_create_rx_queues:
2492 ena_free_all_io_rx_resources(adapter);
2494 ena_destroy_all_tx_queues(adapter);
2495 err_create_tx_queues:
2496 ena_free_all_io_tx_resources(adapter);
2498 if (rc != -ENOMEM) {
2499 netif_err(adapter, ifup, adapter->netdev,
2500 "Queue creation failed with error code %d\n",
2505 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2506 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2508 netif_err(adapter, ifup, adapter->netdev,
2509 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2510 cur_tx_ring_size, cur_rx_ring_size);
2512 new_tx_ring_size = cur_tx_ring_size;
2513 new_rx_ring_size = cur_rx_ring_size;
2515 /* Decrease the size of the larger queue, or
2516 * decrease both if they are the same size.
2518 if (cur_rx_ring_size <= cur_tx_ring_size)
2519 new_tx_ring_size = cur_tx_ring_size / 2;
2520 if (cur_rx_ring_size >= cur_tx_ring_size)
2521 new_rx_ring_size = cur_rx_ring_size / 2;
2523 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2524 new_rx_ring_size < ENA_MIN_RING_SIZE) {
2525 netif_err(adapter, ifup, adapter->netdev,
2526 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2531 netif_err(adapter, ifup, adapter->netdev,
2532 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2536 set_io_rings_size(adapter, new_tx_ring_size,
2541 static int ena_up(struct ena_adapter *adapter)
2543 int io_queue_count, rc, i;
2545 netdev_dbg(adapter->netdev, "%s\n", __func__);
2547 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2548 ena_setup_io_intr(adapter);
2550 /* napi poll functions should be initialized before running
2551 * request_irq(), to handle a rare condition where there is a pending
2552 * interrupt, causing the ISR to fire immediately while the poll
2553 * function wasn't set yet, causing a null dereference
2555 ena_init_napi_in_range(adapter, 0, io_queue_count);
2557 rc = ena_request_io_irq(adapter);
2561 rc = create_queues_with_size_backoff(adapter);
2563 goto err_create_queues_with_backoff;
2565 rc = ena_up_complete(adapter);
2569 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2570 netif_carrier_on(adapter->netdev);
2572 u64_stats_update_begin(&adapter->syncp);
2573 adapter->dev_stats.interface_up++;
2574 u64_stats_update_end(&adapter->syncp);
2576 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2578 /* Enable completion queues interrupt */
2579 for (i = 0; i < adapter->num_io_queues; i++)
2580 ena_unmask_interrupt(&adapter->tx_ring[i],
2581 &adapter->rx_ring[i]);
2583 /* schedule napi in case we had pending packets
2584 * from the last time we disable napi
2586 for (i = 0; i < io_queue_count; i++)
2587 napi_schedule(&adapter->ena_napi[i].napi);
2592 ena_destroy_all_tx_queues(adapter);
2593 ena_free_all_io_tx_resources(adapter);
2594 ena_destroy_all_rx_queues(adapter);
2595 ena_free_all_io_rx_resources(adapter);
2596 err_create_queues_with_backoff:
2597 ena_free_io_irq(adapter);
2599 ena_del_napi_in_range(adapter, 0, io_queue_count);
2604 static void ena_down(struct ena_adapter *adapter)
2606 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2608 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2610 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2612 u64_stats_update_begin(&adapter->syncp);
2613 adapter->dev_stats.interface_down++;
2614 u64_stats_update_end(&adapter->syncp);
2616 netif_carrier_off(adapter->netdev);
2617 netif_tx_disable(adapter->netdev);
2619 /* After this point the napi handler won't enable the tx queue */
2620 ena_napi_disable_in_range(adapter, 0, io_queue_count);
2622 /* After destroy the queue there won't be any new interrupts */
2624 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2627 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2629 dev_err(&adapter->pdev->dev, "Device reset failed\n");
2630 /* stop submitting admin commands on a device that was reset */
2631 ena_com_set_admin_running_state(adapter->ena_dev, false);
2634 ena_destroy_all_io_queues(adapter);
2636 ena_disable_io_intr_sync(adapter);
2637 ena_free_io_irq(adapter);
2638 ena_del_napi_in_range(adapter, 0, io_queue_count);
2640 ena_free_all_tx_bufs(adapter);
2641 ena_free_all_rx_bufs(adapter);
2642 ena_free_all_io_tx_resources(adapter);
2643 ena_free_all_io_rx_resources(adapter);
2646 /* ena_open - Called when a network interface is made active
2647 * @netdev: network interface device structure
2649 * Returns 0 on success, negative value on failure
2651 * The open entry point is called when a network interface is made
2652 * active by the system (IFF_UP). At this point all resources needed
2653 * for transmit and receive operations are allocated, the interrupt
2654 * handler is registered with the OS, the watchdog timer is started,
2655 * and the stack is notified that the interface is ready.
2657 static int ena_open(struct net_device *netdev)
2659 struct ena_adapter *adapter = netdev_priv(netdev);
2662 /* Notify the stack of the actual queue counts. */
2663 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
2665 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2669 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
2671 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2675 rc = ena_up(adapter);
2682 /* ena_close - Disables a network interface
2683 * @netdev: network interface device structure
2685 * Returns 0, this is not allowed to fail
2687 * The close entry point is called when an interface is de-activated
2688 * by the OS. The hardware is still under the drivers control, but
2689 * needs to be disabled. A global MAC reset is issued to stop the
2690 * hardware, and all transmit and receive resources are freed.
2692 static int ena_close(struct net_device *netdev)
2694 struct ena_adapter *adapter = netdev_priv(netdev);
2696 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2698 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2701 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2704 /* Check for device status and issue reset if needed*/
2705 check_for_admin_com_state(adapter);
2706 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2707 netif_err(adapter, ifdown, adapter->netdev,
2708 "Destroy failure, restarting device\n");
2709 ena_dump_stats_to_dmesg(adapter);
2710 /* rtnl lock already obtained in dev_ioctl() layer */
2711 ena_destroy_device(adapter, false);
2712 ena_restore_device(adapter);
2718 int ena_update_queue_sizes(struct ena_adapter *adapter,
2724 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2725 ena_close(adapter->netdev);
2726 adapter->requested_tx_ring_size = new_tx_size;
2727 adapter->requested_rx_ring_size = new_rx_size;
2728 ena_init_io_rings(adapter,
2730 adapter->xdp_num_queues +
2731 adapter->num_io_queues);
2732 return dev_was_up ? ena_up(adapter) : 0;
2735 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2737 struct ena_com_dev *ena_dev = adapter->ena_dev;
2738 int prev_channel_count;
2741 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2742 ena_close(adapter->netdev);
2743 prev_channel_count = adapter->num_io_queues;
2744 adapter->num_io_queues = new_channel_count;
2745 if (ena_xdp_present(adapter) &&
2746 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2747 adapter->xdp_first_ring = new_channel_count;
2748 adapter->xdp_num_queues = new_channel_count;
2749 if (prev_channel_count > new_channel_count)
2750 ena_xdp_exchange_program_rx_in_range(adapter,
2753 prev_channel_count);
2755 ena_xdp_exchange_program_rx_in_range(adapter,
2756 adapter->xdp_bpf_prog,
2761 /* We need to destroy the rss table so that the indirection
2762 * table will be reinitialized by ena_up()
2764 ena_com_rss_destroy(ena_dev);
2765 ena_init_io_rings(adapter,
2767 adapter->xdp_num_queues +
2768 adapter->num_io_queues);
2769 return dev_was_up ? ena_open(adapter->netdev) : 0;
2772 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
2774 u32 mss = skb_shinfo(skb)->gso_size;
2775 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2778 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2779 ena_tx_ctx->l4_csum_enable = 1;
2781 ena_tx_ctx->tso_enable = 1;
2782 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2783 ena_tx_ctx->l4_csum_partial = 0;
2785 ena_tx_ctx->tso_enable = 0;
2786 ena_meta->l4_hdr_len = 0;
2787 ena_tx_ctx->l4_csum_partial = 1;
2790 switch (ip_hdr(skb)->version) {
2792 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2793 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2796 ena_tx_ctx->l3_csum_enable = 1;
2797 l4_protocol = ip_hdr(skb)->protocol;
2800 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2801 l4_protocol = ipv6_hdr(skb)->nexthdr;
2807 if (l4_protocol == IPPROTO_TCP)
2808 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2810 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2812 ena_meta->mss = mss;
2813 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2814 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2815 ena_tx_ctx->meta_valid = 1;
2818 ena_tx_ctx->meta_valid = 0;
2822 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2823 struct sk_buff *skb)
2825 int num_frags, header_len, rc;
2827 num_frags = skb_shinfo(skb)->nr_frags;
2828 header_len = skb_headlen(skb);
2830 if (num_frags < tx_ring->sgl_size)
2833 if ((num_frags == tx_ring->sgl_size) &&
2834 (header_len < tx_ring->tx_max_header_size))
2837 u64_stats_update_begin(&tx_ring->syncp);
2838 tx_ring->tx_stats.linearize++;
2839 u64_stats_update_end(&tx_ring->syncp);
2841 rc = skb_linearize(skb);
2843 u64_stats_update_begin(&tx_ring->syncp);
2844 tx_ring->tx_stats.linearize_failed++;
2845 u64_stats_update_end(&tx_ring->syncp);
2851 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2852 struct ena_tx_buffer *tx_info,
2853 struct sk_buff *skb,
2857 struct ena_adapter *adapter = tx_ring->adapter;
2858 struct ena_com_buf *ena_buf;
2860 u32 skb_head_len, frag_len, last_frag;
2865 skb_head_len = skb_headlen(skb);
2867 ena_buf = tx_info->bufs;
2869 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2870 /* When the device is LLQ mode, the driver will copy
2871 * the header into the device memory space.
2872 * the ena_com layer assume the header is in a linear
2874 * This assumption might be wrong since part of the header
2875 * can be in the fragmented buffers.
2876 * Use skb_header_pointer to make sure the header is in a
2877 * linear memory space.
2880 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2881 *push_hdr = skb_header_pointer(skb, 0, push_len,
2882 tx_ring->push_buf_intermediate_buf);
2883 *header_len = push_len;
2884 if (unlikely(skb->data != *push_hdr)) {
2885 u64_stats_update_begin(&tx_ring->syncp);
2886 tx_ring->tx_stats.llq_buffer_copy++;
2887 u64_stats_update_end(&tx_ring->syncp);
2889 delta = push_len - skb_head_len;
2893 *header_len = min_t(u32, skb_head_len,
2894 tx_ring->tx_max_header_size);
2897 netif_dbg(adapter, tx_queued, adapter->netdev,
2898 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2899 *push_hdr, push_len);
2901 if (skb_head_len > push_len) {
2902 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2903 skb_head_len - push_len, DMA_TO_DEVICE);
2904 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2905 goto error_report_dma_error;
2907 ena_buf->paddr = dma;
2908 ena_buf->len = skb_head_len - push_len;
2911 tx_info->num_of_bufs++;
2912 tx_info->map_linear_data = 1;
2914 tx_info->map_linear_data = 0;
2917 last_frag = skb_shinfo(skb)->nr_frags;
2919 for (i = 0; i < last_frag; i++) {
2920 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2922 frag_len = skb_frag_size(frag);
2924 if (unlikely(delta >= frag_len)) {
2929 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2930 frag_len - delta, DMA_TO_DEVICE);
2931 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2932 goto error_report_dma_error;
2934 ena_buf->paddr = dma;
2935 ena_buf->len = frag_len - delta;
2937 tx_info->num_of_bufs++;
2943 error_report_dma_error:
2944 u64_stats_update_begin(&tx_ring->syncp);
2945 tx_ring->tx_stats.dma_mapping_err++;
2946 u64_stats_update_end(&tx_ring->syncp);
2947 netdev_warn(adapter->netdev, "failed to map skb\n");
2949 tx_info->skb = NULL;
2951 tx_info->num_of_bufs += i;
2952 ena_unmap_tx_buff(tx_ring, tx_info);
2957 /* Called with netif_tx_lock. */
2958 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2960 struct ena_adapter *adapter = netdev_priv(dev);
2961 struct ena_tx_buffer *tx_info;
2962 struct ena_com_tx_ctx ena_tx_ctx;
2963 struct ena_ring *tx_ring;
2964 struct netdev_queue *txq;
2966 u16 next_to_use, req_id, header_len;
2969 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2970 /* Determine which tx ring we will be placed on */
2971 qid = skb_get_queue_mapping(skb);
2972 tx_ring = &adapter->tx_ring[qid];
2973 txq = netdev_get_tx_queue(dev, qid);
2975 rc = ena_check_and_linearize_skb(tx_ring, skb);
2977 goto error_drop_packet;
2979 skb_tx_timestamp(skb);
2981 next_to_use = tx_ring->next_to_use;
2982 req_id = tx_ring->free_ids[next_to_use];
2983 tx_info = &tx_ring->tx_buffer_info[req_id];
2984 tx_info->num_of_bufs = 0;
2986 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2988 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2990 goto error_drop_packet;
2992 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2993 ena_tx_ctx.ena_bufs = tx_info->bufs;
2994 ena_tx_ctx.push_header = push_hdr;
2995 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2996 ena_tx_ctx.req_id = req_id;
2997 ena_tx_ctx.header_len = header_len;
2999 /* set flags and meta data */
3000 ena_tx_csum(&ena_tx_ctx, skb);
3002 rc = ena_xmit_common(dev,
3009 goto error_unmap_dma;
3011 netdev_tx_sent_queue(txq, skb->len);
3013 /* stop the queue when no more space available, the packet can have up
3014 * to sgl_size + 2. one for the meta descriptor and one for header
3015 * (if the header is larger than tx_max_header_size).
3017 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3018 tx_ring->sgl_size + 2))) {
3019 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
3022 netif_tx_stop_queue(txq);
3023 u64_stats_update_begin(&tx_ring->syncp);
3024 tx_ring->tx_stats.queue_stop++;
3025 u64_stats_update_end(&tx_ring->syncp);
3027 /* There is a rare condition where this function decide to
3028 * stop the queue but meanwhile clean_tx_irq updates
3029 * next_to_completion and terminates.
3030 * The queue will remain stopped forever.
3031 * To solve this issue add a mb() to make sure that
3032 * netif_tx_stop_queue() write is vissible before checking if
3033 * there is additional space in the queue.
3037 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3038 ENA_TX_WAKEUP_THRESH)) {
3039 netif_tx_wake_queue(txq);
3040 u64_stats_update_begin(&tx_ring->syncp);
3041 tx_ring->tx_stats.queue_wakeup++;
3042 u64_stats_update_end(&tx_ring->syncp);
3046 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
3047 /* trigger the dma engine. ena_com_write_sq_doorbell()
3050 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
3051 u64_stats_update_begin(&tx_ring->syncp);
3052 tx_ring->tx_stats.doorbells++;
3053 u64_stats_update_end(&tx_ring->syncp);
3056 return NETDEV_TX_OK;
3059 ena_unmap_tx_buff(tx_ring, tx_info);
3060 tx_info->skb = NULL;
3064 return NETDEV_TX_OK;
3067 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
3068 struct net_device *sb_dev)
3071 /* we suspect that this is good for in--kernel network services that
3072 * want to loop incoming skb rx to tx in normal user generated traffic,
3073 * most probably we will not get to this
3075 if (skb_rx_queue_recorded(skb))
3076 qid = skb_get_rx_queue(skb);
3078 qid = netdev_pick_tx(dev, skb, NULL);
3083 static void ena_config_host_info(struct ena_com_dev *ena_dev,
3084 struct pci_dev *pdev)
3086 struct ena_admin_host_info *host_info;
3089 /* Allocate only the host info */
3090 rc = ena_com_allocate_host_info(ena_dev);
3092 pr_err("Cannot allocate host info\n");
3096 host_info = ena_dev->host_attr.host_info;
3098 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
3099 host_info->os_type = ENA_ADMIN_OS_LINUX;
3100 host_info->kernel_ver = LINUX_VERSION_CODE;
3101 strlcpy(host_info->kernel_ver_str, utsname()->version,
3102 sizeof(host_info->kernel_ver_str) - 1);
3103 host_info->os_dist = 0;
3104 strncpy(host_info->os_dist_str, utsname()->release,
3105 sizeof(host_info->os_dist_str) - 1);
3106 host_info->driver_version =
3107 (DRV_MODULE_VER_MAJOR) |
3108 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3109 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
3110 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
3111 host_info->num_cpus = num_online_cpus();
3113 host_info->driver_supported_features =
3114 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
3116 rc = ena_com_set_host_attributes(ena_dev);
3118 if (rc == -EOPNOTSUPP)
3119 pr_warn("Cannot set host attributes\n");
3121 pr_err("Cannot set host attributes\n");
3129 ena_com_delete_host_info(ena_dev);
3132 static void ena_config_debug_area(struct ena_adapter *adapter)
3134 u32 debug_area_size;
3137 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
3138 if (ss_count <= 0) {
3139 netif_err(adapter, drv, adapter->netdev,
3140 "SS count is negative\n");
3144 /* allocate 32 bytes for each string and 64bit for the value */
3145 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
3147 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
3149 pr_err("Cannot allocate debug area\n");
3153 rc = ena_com_set_host_attributes(adapter->ena_dev);
3155 if (rc == -EOPNOTSUPP)
3156 netif_warn(adapter, drv, adapter->netdev,
3157 "Cannot set host attributes\n");
3159 netif_err(adapter, drv, adapter->netdev,
3160 "Cannot set host attributes\n");
3166 ena_com_delete_debug_area(adapter->ena_dev);
3169 static void ena_get_stats64(struct net_device *netdev,
3170 struct rtnl_link_stats64 *stats)
3172 struct ena_adapter *adapter = netdev_priv(netdev);
3173 struct ena_ring *rx_ring, *tx_ring;
3178 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3181 for (i = 0; i < adapter->num_io_queues; i++) {
3184 tx_ring = &adapter->tx_ring[i];
3187 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
3188 packets = tx_ring->tx_stats.cnt;
3189 bytes = tx_ring->tx_stats.bytes;
3190 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
3192 stats->tx_packets += packets;
3193 stats->tx_bytes += bytes;
3195 rx_ring = &adapter->rx_ring[i];
3198 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
3199 packets = rx_ring->rx_stats.cnt;
3200 bytes = rx_ring->rx_stats.bytes;
3201 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
3203 stats->rx_packets += packets;
3204 stats->rx_bytes += bytes;
3208 start = u64_stats_fetch_begin_irq(&adapter->syncp);
3209 rx_drops = adapter->dev_stats.rx_drops;
3210 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
3212 stats->rx_dropped = rx_drops;
3214 stats->multicast = 0;
3215 stats->collisions = 0;
3217 stats->rx_length_errors = 0;
3218 stats->rx_crc_errors = 0;
3219 stats->rx_frame_errors = 0;
3220 stats->rx_fifo_errors = 0;
3221 stats->rx_missed_errors = 0;
3222 stats->tx_window_errors = 0;
3224 stats->rx_errors = 0;
3225 stats->tx_errors = 0;
3228 static const struct net_device_ops ena_netdev_ops = {
3229 .ndo_open = ena_open,
3230 .ndo_stop = ena_close,
3231 .ndo_start_xmit = ena_start_xmit,
3232 .ndo_select_queue = ena_select_queue,
3233 .ndo_get_stats64 = ena_get_stats64,
3234 .ndo_tx_timeout = ena_tx_timeout,
3235 .ndo_change_mtu = ena_change_mtu,
3236 .ndo_set_mac_address = NULL,
3237 .ndo_validate_addr = eth_validate_addr,
3241 static int ena_device_validate_params(struct ena_adapter *adapter,
3242 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3244 struct net_device *netdev = adapter->netdev;
3247 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
3250 netif_err(adapter, drv, netdev,
3251 "Error, mac address are different\n");
3255 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
3256 netif_err(adapter, drv, netdev,
3257 "Error, device max mtu is smaller than netdev MTU\n");
3264 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
3265 struct ena_com_dev_get_features_ctx *get_feat_ctx,
3268 struct device *dev = &pdev->dev;
3269 bool readless_supported;
3274 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3276 dev_err(dev, "failed to init mmio read less\n");
3280 /* The PCIe configuration space revision id indicate if mmio reg
3283 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3284 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3286 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
3288 dev_err(dev, "Can not reset device\n");
3289 goto err_mmio_read_less;
3292 rc = ena_com_validate_version(ena_dev);
3294 dev_err(dev, "device version is too low\n");
3295 goto err_mmio_read_less;
3298 dma_width = ena_com_get_dma_width(ena_dev);
3299 if (dma_width < 0) {
3300 dev_err(dev, "Invalid dma width value %d", dma_width);
3302 goto err_mmio_read_less;
3305 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3307 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
3308 goto err_mmio_read_less;
3311 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3313 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
3315 goto err_mmio_read_less;
3318 /* ENA admin level init */
3319 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
3322 "Can not initialize ena admin queue with device\n");
3323 goto err_mmio_read_less;
3326 /* To enable the msix interrupts the driver needs to know the number
3327 * of queues. So the driver uses polling mode to retrieve this
3330 ena_com_set_admin_polling_mode(ena_dev, true);
3332 ena_config_host_info(ena_dev, pdev);
3334 /* Get Device Attributes*/
3335 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3337 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3338 goto err_admin_init;
3341 /* Try to turn all the available aenq groups */
3342 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3343 BIT(ENA_ADMIN_FATAL_ERROR) |
3344 BIT(ENA_ADMIN_WARNING) |
3345 BIT(ENA_ADMIN_NOTIFICATION) |
3346 BIT(ENA_ADMIN_KEEP_ALIVE);
3348 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3350 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3352 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3353 goto err_admin_init;
3356 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3361 ena_com_delete_host_info(ena_dev);
3362 ena_com_admin_destroy(ena_dev);
3364 ena_com_mmio_reg_read_request_destroy(ena_dev);
3369 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
3371 struct ena_com_dev *ena_dev = adapter->ena_dev;
3372 struct device *dev = &adapter->pdev->dev;
3375 rc = ena_enable_msix(adapter);
3377 dev_err(dev, "Can not reserve msix vectors\n");
3381 ena_setup_mgmnt_intr(adapter);
3383 rc = ena_request_mgmnt_irq(adapter);
3385 dev_err(dev, "Can not setup management interrupts\n");
3386 goto err_disable_msix;
3389 ena_com_set_admin_polling_mode(ena_dev, false);
3391 ena_com_admin_aenq_enable(ena_dev);
3396 ena_disable_msix(adapter);
3401 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3403 struct net_device *netdev = adapter->netdev;
3404 struct ena_com_dev *ena_dev = adapter->ena_dev;
3407 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3410 netif_carrier_off(netdev);
3412 del_timer_sync(&adapter->timer_service);
3414 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
3415 adapter->dev_up_before_reset = dev_up;
3417 ena_com_set_admin_running_state(ena_dev, false);
3419 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3422 /* Stop the device from sending AENQ events (in case reset flag is set
3423 * and device is up, ena_down() already reset the device.
3425 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3426 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3428 ena_free_mgmnt_irq(adapter);
3430 ena_disable_msix(adapter);
3432 ena_com_abort_admin_commands(ena_dev);
3434 ena_com_wait_for_abort_completion(ena_dev);
3436 ena_com_admin_destroy(ena_dev);
3438 ena_com_mmio_reg_read_request_destroy(ena_dev);
3440 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3442 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3443 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3446 static int ena_restore_device(struct ena_adapter *adapter)
3448 struct ena_com_dev_get_features_ctx get_feat_ctx;
3449 struct ena_com_dev *ena_dev = adapter->ena_dev;
3450 struct pci_dev *pdev = adapter->pdev;
3454 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3455 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
3457 dev_err(&pdev->dev, "Can not initialize device\n");
3460 adapter->wd_state = wd_state;
3462 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3464 dev_err(&pdev->dev, "Validation of device parameters failed\n");
3465 goto err_device_destroy;
3468 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3470 dev_err(&pdev->dev, "Enable MSI-X failed\n");
3471 goto err_device_destroy;
3473 /* If the interface was up before the reset bring it up */
3474 if (adapter->dev_up_before_reset) {
3475 rc = ena_up(adapter);
3477 dev_err(&pdev->dev, "Failed to create I/O queues\n");
3478 goto err_disable_msix;
3482 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3484 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3485 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3486 netif_carrier_on(adapter->netdev);
3488 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3489 adapter->last_keep_alive_jiffies = jiffies;
3491 "Device reset completed successfully, Driver info: %s\n",
3496 ena_free_mgmnt_irq(adapter);
3497 ena_disable_msix(adapter);
3499 ena_com_abort_admin_commands(ena_dev);
3500 ena_com_wait_for_abort_completion(ena_dev);
3501 ena_com_admin_destroy(ena_dev);
3502 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3503 ena_com_mmio_reg_read_request_destroy(ena_dev);
3505 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3506 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3508 "Reset attempt failed. Can not reset the device\n");
3513 static void ena_fw_reset_device(struct work_struct *work)
3515 struct ena_adapter *adapter =
3516 container_of(work, struct ena_adapter, reset_task);
3517 struct pci_dev *pdev = adapter->pdev;
3519 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3521 "device reset schedule while reset bit is off\n");
3525 ena_destroy_device(adapter, false);
3526 ena_restore_device(adapter);
3530 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3531 struct ena_ring *rx_ring)
3533 if (likely(rx_ring->first_interrupt))
3536 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3539 rx_ring->no_interrupt_event_cnt++;
3541 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3542 netif_err(adapter, rx_err, adapter->netdev,
3543 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3545 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3546 smp_mb__before_atomic();
3547 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3554 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3555 struct ena_ring *tx_ring)
3557 struct ena_tx_buffer *tx_buf;
3558 unsigned long last_jiffies;
3562 for (i = 0; i < tx_ring->ring_size; i++) {
3563 tx_buf = &tx_ring->tx_buffer_info[i];
3564 last_jiffies = tx_buf->last_jiffies;
3566 if (last_jiffies == 0)
3567 /* no pending Tx at this location */
3570 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
3571 2 * adapter->missing_tx_completion_to))) {
3572 /* If after graceful period interrupt is still not
3573 * received, we schedule a reset
3575 netif_err(adapter, tx_err, adapter->netdev,
3576 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3578 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3579 smp_mb__before_atomic();
3580 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3584 if (unlikely(time_is_before_jiffies(last_jiffies +
3585 adapter->missing_tx_completion_to))) {
3586 if (!tx_buf->print_once)
3587 netif_notice(adapter, tx_err, adapter->netdev,
3588 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
3591 tx_buf->print_once = 1;
3596 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3597 netif_err(adapter, tx_err, adapter->netdev,
3598 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3600 adapter->missing_tx_completion_threshold);
3601 adapter->reset_reason =
3602 ENA_REGS_RESET_MISS_TX_CMPL;
3603 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3607 u64_stats_update_begin(&tx_ring->syncp);
3608 tx_ring->tx_stats.missed_tx = missed_tx;
3609 u64_stats_update_end(&tx_ring->syncp);
3614 static void check_for_missing_completions(struct ena_adapter *adapter)
3616 struct ena_ring *tx_ring;
3617 struct ena_ring *rx_ring;
3621 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
3622 /* Make sure the driver doesn't turn the device in other process */
3625 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3628 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3631 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
3634 budget = ENA_MONITORED_TX_QUEUES;
3636 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
3637 tx_ring = &adapter->tx_ring[i];
3638 rx_ring = &adapter->rx_ring[i];
3640 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3644 rc = !ENA_IS_XDP_INDEX(adapter, i) ?
3645 check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
3654 adapter->last_monitored_tx_qid = i % io_queue_count;
3657 /* trigger napi schedule after 2 consecutive detections */
3658 #define EMPTY_RX_REFILL 2
3659 /* For the rare case where the device runs out of Rx descriptors and the
3660 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3662 * This case will lead to a deadlock:
3663 * The device won't send interrupts since all the new Rx packets will be dropped
3664 * The napi handler won't allocate new Rx descriptors so the device will be
3665 * able to send new packets.
3667 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3668 * It is recommended to have at least 512MB, with a minimum of 128MB for
3669 * constrained environment).
3671 * When such a situation is detected - Reschedule napi
3673 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
3675 struct ena_ring *rx_ring;
3676 int i, refill_required;
3678 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3681 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3684 for (i = 0; i < adapter->num_io_queues; i++) {
3685 rx_ring = &adapter->rx_ring[i];
3688 ena_com_free_desc(rx_ring->ena_com_io_sq);
3689 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3690 rx_ring->empty_rx_queue++;
3692 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3693 u64_stats_update_begin(&rx_ring->syncp);
3694 rx_ring->rx_stats.empty_rx_ring++;
3695 u64_stats_update_end(&rx_ring->syncp);
3697 netif_err(adapter, drv, adapter->netdev,
3698 "trigger refill for ring %d\n", i);
3700 napi_schedule(rx_ring->napi);
3701 rx_ring->empty_rx_queue = 0;
3704 rx_ring->empty_rx_queue = 0;
3709 /* Check for keep alive expiration */
3710 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3712 unsigned long keep_alive_expired;
3714 if (!adapter->wd_state)
3717 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3720 keep_alive_expired = adapter->last_keep_alive_jiffies +
3721 adapter->keep_alive_timeout;
3722 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3723 netif_err(adapter, drv, adapter->netdev,
3724 "Keep alive watchdog timeout.\n");
3725 u64_stats_update_begin(&adapter->syncp);
3726 adapter->dev_stats.wd_expired++;
3727 u64_stats_update_end(&adapter->syncp);
3728 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3729 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3733 static void check_for_admin_com_state(struct ena_adapter *adapter)
3735 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3736 netif_err(adapter, drv, adapter->netdev,
3737 "ENA admin queue is not in running state!\n");
3738 u64_stats_update_begin(&adapter->syncp);
3739 adapter->dev_stats.admin_q_pause++;
3740 u64_stats_update_end(&adapter->syncp);
3741 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
3742 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3746 static void ena_update_hints(struct ena_adapter *adapter,
3747 struct ena_admin_ena_hw_hints *hints)
3749 struct net_device *netdev = adapter->netdev;
3751 if (hints->admin_completion_tx_timeout)
3752 adapter->ena_dev->admin_queue.completion_timeout =
3753 hints->admin_completion_tx_timeout * 1000;
3755 if (hints->mmio_read_timeout)
3756 /* convert to usec */
3757 adapter->ena_dev->mmio_read.reg_read_to =
3758 hints->mmio_read_timeout * 1000;
3760 if (hints->missed_tx_completion_count_threshold_to_reset)
3761 adapter->missing_tx_completion_threshold =
3762 hints->missed_tx_completion_count_threshold_to_reset;
3764 if (hints->missing_tx_completion_timeout) {
3765 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3766 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3768 adapter->missing_tx_completion_to =
3769 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3772 if (hints->netdev_wd_timeout)
3773 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3775 if (hints->driver_watchdog_timeout) {
3776 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3777 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3779 adapter->keep_alive_timeout =
3780 msecs_to_jiffies(hints->driver_watchdog_timeout);
3784 static void ena_update_host_info(struct ena_admin_host_info *host_info,
3785 struct net_device *netdev)
3787 host_info->supported_network_features[0] =
3788 netdev->features & GENMASK_ULL(31, 0);
3789 host_info->supported_network_features[1] =
3790 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3793 static void ena_timer_service(struct timer_list *t)
3795 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
3796 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3797 struct ena_admin_host_info *host_info =
3798 adapter->ena_dev->host_attr.host_info;
3800 check_for_missing_keep_alive(adapter);
3802 check_for_admin_com_state(adapter);
3804 check_for_missing_completions(adapter);
3806 check_for_empty_rx_ring(adapter);
3809 ena_dump_stats_to_buf(adapter, debug_area);
3812 ena_update_host_info(host_info, adapter->netdev);
3814 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3815 netif_err(adapter, drv, adapter->netdev,
3816 "Trigger reset is on\n");
3817 ena_dump_stats_to_dmesg(adapter);
3818 queue_work(ena_wq, &adapter->reset_task);
3822 /* Reset the timer */
3823 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3826 static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
3827 struct ena_com_dev *ena_dev,
3828 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3830 int io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
3832 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3833 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3834 &get_feat_ctx->max_queue_ext.max_queue_ext;
3835 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
3836 max_queue_ext->max_rx_cq_num);
3838 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3839 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3841 struct ena_admin_queue_feature_desc *max_queues =
3842 &get_feat_ctx->max_queues;
3843 io_tx_sq_num = max_queues->max_sq_num;
3844 io_tx_cq_num = max_queues->max_cq_num;
3845 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
3848 /* In case of LLQ use the llq fields for the tx SQ/CQ */
3849 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3850 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
3852 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3853 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
3854 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
3855 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
3856 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
3857 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
3858 if (unlikely(!max_num_io_queues)) {
3859 dev_err(&pdev->dev, "The device doesn't have io queues\n");
3863 return max_num_io_queues;
3866 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3867 struct ena_com_dev *ena_dev,
3868 struct ena_admin_feature_llq_desc *llq,
3869 struct ena_llq_configurations *llq_default_configurations)
3873 u32 llq_feature_mask;
3875 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3876 if (!(ena_dev->supported_features & llq_feature_mask)) {
3878 "LLQ is not supported Fallback to host mode policy.\n");
3879 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3883 has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3885 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3888 "Failed to configure the device mode. Fallback to host mode policy.\n");
3889 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3893 /* Nothing to config, exit */
3894 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3899 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3900 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3904 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3905 pci_resource_start(pdev, ENA_MEM_BAR),
3906 pci_resource_len(pdev, ENA_MEM_BAR));
3908 if (!ena_dev->mem_bar)
3914 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3915 struct net_device *netdev)
3917 netdev_features_t dev_features = 0;
3919 /* Set offload features */
3920 if (feat->offload.tx &
3921 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3922 dev_features |= NETIF_F_IP_CSUM;
3924 if (feat->offload.tx &
3925 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3926 dev_features |= NETIF_F_IPV6_CSUM;
3928 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3929 dev_features |= NETIF_F_TSO;
3931 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3932 dev_features |= NETIF_F_TSO6;
3934 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3935 dev_features |= NETIF_F_TSO_ECN;
3937 if (feat->offload.rx_supported &
3938 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3939 dev_features |= NETIF_F_RXCSUM;
3941 if (feat->offload.rx_supported &
3942 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3943 dev_features |= NETIF_F_RXCSUM;
3951 netdev->hw_features |= netdev->features;
3952 netdev->vlan_features |= netdev->features;
3955 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3956 struct ena_com_dev_get_features_ctx *feat)
3958 struct net_device *netdev = adapter->netdev;
3960 /* Copy mac address */
3961 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3962 eth_hw_addr_random(netdev);
3963 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3965 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3966 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3969 /* Set offload features */
3970 ena_set_dev_offloads(feat, netdev);
3972 adapter->max_mtu = feat->dev_attr.max_mtu;
3973 netdev->max_mtu = adapter->max_mtu;
3974 netdev->min_mtu = ENA_MIN_MTU;
3977 static int ena_rss_init_default(struct ena_adapter *adapter)
3979 struct ena_com_dev *ena_dev = adapter->ena_dev;
3980 struct device *dev = &adapter->pdev->dev;
3984 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3986 dev_err(dev, "Cannot init indirect table\n");
3990 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3991 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
3992 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3993 ENA_IO_RXQ_IDX(val));
3994 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3995 dev_err(dev, "Cannot fill indirect table\n");
3996 goto err_fill_indir;
4000 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
4001 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
4002 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4003 dev_err(dev, "Cannot fill hash function\n");
4004 goto err_fill_indir;
4007 rc = ena_com_set_default_hash_ctrl(ena_dev);
4008 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4009 dev_err(dev, "Cannot fill hash control\n");
4010 goto err_fill_indir;
4016 ena_com_rss_destroy(ena_dev);
4022 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
4024 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4026 pci_release_selected_regions(pdev, release_bars);
4029 static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
4031 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
4032 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
4033 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
4034 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
4035 llq_config->llq_ring_entry_size_value = 128;
4038 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
4040 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
4041 struct ena_com_dev *ena_dev = ctx->ena_dev;
4042 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
4043 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
4044 u32 max_tx_queue_size;
4045 u32 max_rx_queue_size;
4047 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
4048 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
4049 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
4050 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
4051 max_queue_ext->max_rx_sq_depth);
4052 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
4054 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4055 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4056 llq->max_llq_depth);
4058 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4059 max_queue_ext->max_tx_sq_depth);
4061 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4062 max_queue_ext->max_per_packet_tx_descs);
4063 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4064 max_queue_ext->max_per_packet_rx_descs);
4066 struct ena_admin_queue_feature_desc *max_queues =
4067 &ctx->get_feat_ctx->max_queues;
4068 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
4069 max_queues->max_sq_depth);
4070 max_tx_queue_size = max_queues->max_cq_depth;
4072 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4073 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4074 llq->max_llq_depth);
4076 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4077 max_queues->max_sq_depth);
4079 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4080 max_queues->max_packet_tx_descs);
4081 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4082 max_queues->max_packet_rx_descs);
4085 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
4086 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
4088 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
4090 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
4093 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
4094 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
4096 ctx->max_tx_queue_size = max_tx_queue_size;
4097 ctx->max_rx_queue_size = max_rx_queue_size;
4098 ctx->tx_queue_size = tx_queue_size;
4099 ctx->rx_queue_size = rx_queue_size;
4104 /* ena_probe - Device Initialization Routine
4105 * @pdev: PCI device information struct
4106 * @ent: entry in ena_pci_tbl
4108 * Returns 0 on success, negative on failure
4110 * ena_probe initializes an adapter identified by a pci_dev structure.
4111 * The OS initialization, configuring of the adapter private structure,
4112 * and a hardware reset occur.
4114 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4116 struct ena_com_dev_get_features_ctx get_feat_ctx;
4117 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
4118 struct ena_llq_configurations llq_config;
4119 struct ena_com_dev *ena_dev = NULL;
4120 struct ena_adapter *adapter;
4121 struct net_device *netdev;
4122 static int adapters_found;
4123 u32 max_num_io_queues;
4124 char *queue_type_str;
4128 dev_dbg(&pdev->dev, "%s\n", __func__);
4130 dev_info_once(&pdev->dev, "%s", version);
4132 rc = pci_enable_device_mem(pdev);
4134 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
4138 pci_set_master(pdev);
4140 ena_dev = vzalloc(sizeof(*ena_dev));
4143 goto err_disable_device;
4146 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4147 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
4149 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
4151 goto err_free_ena_dev;
4154 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
4155 pci_resource_start(pdev, ENA_REG_BAR),
4156 pci_resource_len(pdev, ENA_REG_BAR));
4157 if (!ena_dev->reg_bar) {
4158 dev_err(&pdev->dev, "failed to remap regs bar\n");
4160 goto err_free_region;
4163 ena_dev->dmadev = &pdev->dev;
4165 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
4167 dev_err(&pdev->dev, "ena device init failed\n");
4170 goto err_free_region;
4173 set_default_llq_configurations(&llq_config);
4175 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
4178 dev_err(&pdev->dev, "ena device init failed\n");
4179 goto err_device_destroy;
4182 calc_queue_ctx.ena_dev = ena_dev;
4183 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
4184 calc_queue_ctx.pdev = pdev;
4186 /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
4187 * Updated during device initialization with the real granularity
4189 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
4190 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
4191 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
4192 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
4193 rc = ena_calc_io_queue_size(&calc_queue_ctx);
4194 if (rc || !max_num_io_queues) {
4196 goto err_device_destroy;
4199 /* dev zeroed in init_etherdev */
4200 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
4202 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
4204 goto err_device_destroy;
4207 SET_NETDEV_DEV(netdev, &pdev->dev);
4209 adapter = netdev_priv(netdev);
4210 pci_set_drvdata(pdev, adapter);
4212 adapter->ena_dev = ena_dev;
4213 adapter->netdev = netdev;
4214 adapter->pdev = pdev;
4216 ena_set_conf_feat_params(adapter, &get_feat_ctx);
4218 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4219 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
4221 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
4222 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
4223 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
4224 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
4225 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
4226 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
4228 adapter->num_io_queues = max_num_io_queues;
4229 adapter->max_num_io_queues = max_num_io_queues;
4231 adapter->xdp_first_ring = 0;
4232 adapter->xdp_num_queues = 0;
4234 adapter->last_monitored_tx_qid = 0;
4236 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
4237 adapter->wd_state = wd_state;
4239 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
4241 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
4244 "Failed to query interrupt moderation feature\n");
4245 goto err_netdev_destroy;
4247 ena_init_io_rings(adapter,
4249 adapter->xdp_num_queues +
4250 adapter->num_io_queues);
4252 netdev->netdev_ops = &ena_netdev_ops;
4253 netdev->watchdog_timeo = TX_TIMEOUT;
4254 ena_set_ethtool_ops(netdev);
4256 netdev->priv_flags |= IFF_UNICAST_FLT;
4258 u64_stats_init(&adapter->syncp);
4260 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
4263 "Failed to enable and set the admin interrupts\n");
4264 goto err_worker_destroy;
4266 rc = ena_rss_init_default(adapter);
4267 if (rc && (rc != -EOPNOTSUPP)) {
4268 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
4272 ena_config_debug_area(adapter);
4274 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4276 netif_carrier_off(netdev);
4278 rc = register_netdev(netdev);
4280 dev_err(&pdev->dev, "Cannot register net device\n");
4284 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4286 adapter->last_keep_alive_jiffies = jiffies;
4287 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4288 adapter->missing_tx_completion_to = TX_TIMEOUT;
4289 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4291 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
4293 timer_setup(&adapter->timer_service, ena_timer_service, 0);
4294 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
4296 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
4297 queue_type_str = "Regular";
4299 queue_type_str = "Low Latency";
4301 dev_info(&pdev->dev,
4302 "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
4303 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
4304 netdev->dev_addr, queue_type_str);
4306 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4313 ena_com_delete_debug_area(ena_dev);
4314 ena_com_rss_destroy(ena_dev);
4316 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
4317 /* stop submitting admin commands on a device that was reset */
4318 ena_com_set_admin_running_state(ena_dev, false);
4319 ena_free_mgmnt_irq(adapter);
4320 ena_disable_msix(adapter);
4322 del_timer(&adapter->timer_service);
4324 free_netdev(netdev);
4326 ena_com_delete_host_info(ena_dev);
4327 ena_com_admin_destroy(ena_dev);
4329 ena_release_bars(ena_dev, pdev);
4333 pci_disable_device(pdev);
4337 /*****************************************************************************/
4339 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4340 * @pdev: PCI device information struct
4341 * @shutdown: Is it a shutdown operation? If false, means it is a removal
4343 * __ena_shutoff is a helper routine that does the real work on shutdown and
4344 * removal paths; the difference between those paths is with regards to whether
4345 * dettach or unregister the netdevice.
4347 static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
4349 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4350 struct ena_com_dev *ena_dev;
4351 struct net_device *netdev;
4353 ena_dev = adapter->ena_dev;
4354 netdev = adapter->netdev;
4356 #ifdef CONFIG_RFS_ACCEL
4357 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4358 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4359 netdev->rx_cpu_rmap = NULL;
4361 #endif /* CONFIG_RFS_ACCEL */
4362 del_timer_sync(&adapter->timer_service);
4364 cancel_work_sync(&adapter->reset_task);
4366 rtnl_lock(); /* lock released inside the below if-else block */
4367 ena_destroy_device(adapter, true);
4369 netif_device_detach(netdev);
4374 unregister_netdev(netdev);
4375 free_netdev(netdev);
4378 ena_com_rss_destroy(ena_dev);
4380 ena_com_delete_debug_area(ena_dev);
4382 ena_com_delete_host_info(ena_dev);
4384 ena_release_bars(ena_dev, pdev);
4386 pci_disable_device(pdev);
4391 /* ena_remove - Device Removal Routine
4392 * @pdev: PCI device information struct
4394 * ena_remove is called by the PCI subsystem to alert the driver
4395 * that it should release a PCI device.
4398 static void ena_remove(struct pci_dev *pdev)
4400 __ena_shutoff(pdev, false);
4403 /* ena_shutdown - Device Shutdown Routine
4404 * @pdev: PCI device information struct
4406 * ena_shutdown is called by the PCI subsystem to alert the driver that
4407 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4410 static void ena_shutdown(struct pci_dev *pdev)
4412 __ena_shutoff(pdev, true);
4416 /* ena_suspend - PM suspend callback
4417 * @pdev: PCI device information struct
4418 * @state:power state
4420 static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
4422 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4424 u64_stats_update_begin(&adapter->syncp);
4425 adapter->dev_stats.suspend++;
4426 u64_stats_update_end(&adapter->syncp);
4429 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4431 "ignoring device reset request as the device is being suspended\n");
4432 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4434 ena_destroy_device(adapter, true);
4439 /* ena_resume - PM resume callback
4440 * @pdev: PCI device information struct
4443 static int ena_resume(struct pci_dev *pdev)
4445 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4448 u64_stats_update_begin(&adapter->syncp);
4449 adapter->dev_stats.resume++;
4450 u64_stats_update_end(&adapter->syncp);
4453 rc = ena_restore_device(adapter);
4459 static struct pci_driver ena_pci_driver = {
4460 .name = DRV_MODULE_NAME,
4461 .id_table = ena_pci_tbl,
4463 .remove = ena_remove,
4464 .shutdown = ena_shutdown,
4466 .suspend = ena_suspend,
4467 .resume = ena_resume,
4469 .sriov_configure = pci_sriov_configure_simple,
4472 static int __init ena_init(void)
4474 pr_info("%s", version);
4476 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4478 pr_err("Failed to create workqueue\n");
4482 return pci_register_driver(&ena_pci_driver);
4485 static void __exit ena_cleanup(void)
4487 pci_unregister_driver(&ena_pci_driver);
4490 destroy_workqueue(ena_wq);
4495 /******************************************************************************
4496 ******************************** AENQ Handlers *******************************
4497 *****************************************************************************/
4498 /* ena_update_on_link_change:
4499 * Notify the network interface about the change in link status
4501 static void ena_update_on_link_change(void *adapter_data,
4502 struct ena_admin_aenq_entry *aenq_e)
4504 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4505 struct ena_admin_aenq_link_change_desc *aenq_desc =
4506 (struct ena_admin_aenq_link_change_desc *)aenq_e;
4507 int status = aenq_desc->flags &
4508 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4511 netdev_dbg(adapter->netdev, "%s\n", __func__);
4512 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4513 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4514 netif_carrier_on(adapter->netdev);
4516 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4517 netif_carrier_off(adapter->netdev);
4521 static void ena_keep_alive_wd(void *adapter_data,
4522 struct ena_admin_aenq_entry *aenq_e)
4524 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4525 struct ena_admin_aenq_keep_alive_desc *desc;
4528 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
4529 adapter->last_keep_alive_jiffies = jiffies;
4531 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
4533 u64_stats_update_begin(&adapter->syncp);
4534 adapter->dev_stats.rx_drops = rx_drops;
4535 u64_stats_update_end(&adapter->syncp);
4538 static void ena_notification(void *adapter_data,
4539 struct ena_admin_aenq_entry *aenq_e)
4541 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4542 struct ena_admin_ena_hw_hints *hints;
4544 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4545 "Invalid group(%x) expected %x\n",
4546 aenq_e->aenq_common_desc.group,
4547 ENA_ADMIN_NOTIFICATION);
4549 switch (aenq_e->aenq_common_desc.syndrom) {
4550 case ENA_ADMIN_UPDATE_HINTS:
4551 hints = (struct ena_admin_ena_hw_hints *)
4552 (&aenq_e->inline_data_w4);
4553 ena_update_hints(adapter, hints);
4556 netif_err(adapter, drv, adapter->netdev,
4557 "Invalid aenq notification link state %d\n",
4558 aenq_e->aenq_common_desc.syndrom);
4562 /* This handler will called for unknown event group or unimplemented handlers*/
4563 static void unimplemented_aenq_handler(void *data,
4564 struct ena_admin_aenq_entry *aenq_e)
4566 struct ena_adapter *adapter = (struct ena_adapter *)data;
4568 netif_err(adapter, drv, adapter->netdev,
4569 "Unknown event was received or event with unimplemented handler\n");
4572 static struct ena_aenq_handlers aenq_handlers = {
4574 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4575 [ENA_ADMIN_NOTIFICATION] = ena_notification,
4576 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4578 .unimplemented_handler = unimplemented_aenq_handler
4581 module_init(ena_init);
4582 module_exit(ena_cleanup);