1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #ifdef CONFIG_RFS_ACCEL
9 #include <linux/cpu_rmap.h>
10 #endif /* CONFIG_RFS_ACCEL */
11 #include <linux/ethtool.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/numa.h>
15 #include <linux/pci.h>
16 #include <linux/utsname.h>
17 #include <linux/version.h>
18 #include <linux/vmalloc.h>
21 #include "ena_netdev.h"
22 #include "ena_pci_id_tbl.h"
25 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
26 MODULE_DESCRIPTION(DEVICE_NAME);
27 MODULE_LICENSE("GPL");
29 /* Time in jiffies before concluding the transmitter is hung. */
30 #define TX_TIMEOUT (5 * HZ)
32 #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
34 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
35 NETIF_MSG_IFDOWN | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
37 static struct ena_aenq_handlers aenq_handlers;
39 static struct workqueue_struct *ena_wq;
41 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
43 static int ena_rss_init_default(struct ena_adapter *adapter);
44 static void check_for_admin_com_state(struct ena_adapter *adapter);
45 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
46 static int ena_restore_device(struct ena_adapter *adapter);
48 static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
50 enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
51 struct ena_adapter *adapter = netdev_priv(dev);
52 unsigned int time_since_last_napi, threshold;
53 struct ena_ring *tx_ring;
56 if (txqueue >= adapter->num_io_queues) {
57 netdev_err(dev, "TX timeout on invalid queue %u\n", txqueue);
61 threshold = jiffies_to_usecs(dev->watchdog_timeo);
62 tx_ring = &adapter->tx_ring[txqueue];
64 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
65 napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED);
68 "TX q %d is paused for too long (threshold %u). Time since last napi %u usec. napi scheduled: %d\n",
74 if (threshold < time_since_last_napi && napi_scheduled) {
76 "napi handler hasn't been called for a long time but is scheduled\n");
77 reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
80 /* Change the state of the device to trigger reset
81 * Check that we are not in the middle or a trigger already
83 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
86 ena_reset_device(adapter, reset_reason);
87 ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
90 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
94 for (i = 0; i < adapter->num_io_queues; i++)
95 adapter->rx_ring[i].mtu = mtu;
98 static int ena_change_mtu(struct net_device *dev, int new_mtu)
100 struct ena_adapter *adapter = netdev_priv(dev);
103 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
105 netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
106 update_rx_ring_mtu(adapter, new_mtu);
109 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
116 int ena_xmit_common(struct ena_adapter *adapter,
117 struct ena_ring *ring,
118 struct ena_tx_buffer *tx_info,
119 struct ena_com_tx_ctx *ena_tx_ctx,
125 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
127 netif_dbg(adapter, tx_queued, adapter->netdev,
128 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
130 ena_ring_tx_doorbell(ring);
133 /* prepare the packet's descriptors to dma engine */
134 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
137 /* In case there isn't enough space in the queue for the packet,
138 * we simply drop it. All other failure reasons of
139 * ena_com_prepare_tx() are fatal and therefore require a device reset.
142 netif_err(adapter, tx_queued, adapter->netdev,
143 "Failed to prepare tx bufs\n");
144 ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp);
146 ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE);
150 u64_stats_update_begin(&ring->syncp);
151 ring->tx_stats.cnt++;
152 ring->tx_stats.bytes += bytes;
153 u64_stats_update_end(&ring->syncp);
155 tx_info->tx_descs = nb_hw_desc;
156 tx_info->total_tx_size = bytes;
157 tx_info->last_jiffies = jiffies;
158 tx_info->print_once = 0;
160 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
165 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
167 #ifdef CONFIG_RFS_ACCEL
171 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
172 if (!adapter->netdev->rx_cpu_rmap)
174 for (i = 0; i < adapter->num_io_queues; i++) {
175 int irq_idx = ENA_IO_IRQ_IDX(i);
177 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
178 pci_irq_vector(adapter->pdev, irq_idx));
180 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
181 adapter->netdev->rx_cpu_rmap = NULL;
185 #endif /* CONFIG_RFS_ACCEL */
189 static void ena_init_io_rings_common(struct ena_adapter *adapter,
190 struct ena_ring *ring, u16 qid)
193 ring->pdev = adapter->pdev;
194 ring->dev = &adapter->pdev->dev;
195 ring->netdev = adapter->netdev;
196 ring->napi = &adapter->ena_napi[qid].napi;
197 ring->adapter = adapter;
198 ring->ena_dev = adapter->ena_dev;
199 ring->per_napi_packets = 0;
202 ring->no_interrupt_event_cnt = 0;
203 u64_stats_init(&ring->syncp);
206 void ena_init_io_rings(struct ena_adapter *adapter,
207 int first_index, int count)
209 struct ena_com_dev *ena_dev;
210 struct ena_ring *txr, *rxr;
213 ena_dev = adapter->ena_dev;
215 for (i = first_index; i < first_index + count; i++) {
216 txr = &adapter->tx_ring[i];
217 rxr = &adapter->rx_ring[i];
219 /* TX common ring state */
220 ena_init_io_rings_common(adapter, txr, i);
222 /* TX specific ring state */
223 txr->ring_size = adapter->requested_tx_ring_size;
224 txr->tx_max_header_size = ena_dev->tx_max_header_size;
225 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
226 txr->sgl_size = adapter->max_tx_sgl_size;
227 txr->smoothed_interval =
228 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
229 txr->disable_meta_caching = adapter->disable_meta_caching;
230 spin_lock_init(&txr->xdp_tx_lock);
232 /* Don't init RX queues for xdp queues */
233 if (!ENA_IS_XDP_INDEX(adapter, i)) {
234 /* RX common ring state */
235 ena_init_io_rings_common(adapter, rxr, i);
237 /* RX specific ring state */
238 rxr->ring_size = adapter->requested_rx_ring_size;
239 rxr->rx_copybreak = adapter->rx_copybreak;
240 rxr->sgl_size = adapter->max_rx_sgl_size;
241 rxr->smoothed_interval =
242 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
243 rxr->empty_rx_queue = 0;
244 rxr->rx_headroom = NET_SKB_PAD;
245 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
246 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues];
251 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
252 * @adapter: network interface device structure
255 * Return 0 on success, negative on failure
257 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
259 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
260 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
263 if (tx_ring->tx_buffer_info) {
264 netif_err(adapter, ifup,
265 adapter->netdev, "tx_buffer_info info is not NULL");
269 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
270 node = cpu_to_node(ena_irq->cpu);
272 tx_ring->tx_buffer_info = vzalloc_node(size, node);
273 if (!tx_ring->tx_buffer_info) {
274 tx_ring->tx_buffer_info = vzalloc(size);
275 if (!tx_ring->tx_buffer_info)
276 goto err_tx_buffer_info;
279 size = sizeof(u16) * tx_ring->ring_size;
280 tx_ring->free_ids = vzalloc_node(size, node);
281 if (!tx_ring->free_ids) {
282 tx_ring->free_ids = vzalloc(size);
283 if (!tx_ring->free_ids)
284 goto err_tx_free_ids;
287 size = tx_ring->tx_max_header_size;
288 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
289 if (!tx_ring->push_buf_intermediate_buf) {
290 tx_ring->push_buf_intermediate_buf = vzalloc(size);
291 if (!tx_ring->push_buf_intermediate_buf)
292 goto err_push_buf_intermediate_buf;
295 /* Req id ring for TX out of order completions */
296 for (i = 0; i < tx_ring->ring_size; i++)
297 tx_ring->free_ids[i] = i;
299 /* Reset tx statistics */
300 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
302 tx_ring->next_to_use = 0;
303 tx_ring->next_to_clean = 0;
304 tx_ring->cpu = ena_irq->cpu;
305 tx_ring->numa_node = node;
308 err_push_buf_intermediate_buf:
309 vfree(tx_ring->free_ids);
310 tx_ring->free_ids = NULL;
312 vfree(tx_ring->tx_buffer_info);
313 tx_ring->tx_buffer_info = NULL;
318 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
319 * @adapter: network interface device structure
322 * Free all transmit software resources
324 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
326 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
328 vfree(tx_ring->tx_buffer_info);
329 tx_ring->tx_buffer_info = NULL;
331 vfree(tx_ring->free_ids);
332 tx_ring->free_ids = NULL;
334 vfree(tx_ring->push_buf_intermediate_buf);
335 tx_ring->push_buf_intermediate_buf = NULL;
338 int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
339 int first_index, int count)
343 for (i = first_index; i < first_index + count; i++) {
344 rc = ena_setup_tx_resources(adapter, i);
353 netif_err(adapter, ifup, adapter->netdev,
354 "Tx queue %d: allocation failed\n", i);
356 /* rewind the index freeing the rings as we go */
357 while (first_index < i--)
358 ena_free_tx_resources(adapter, i);
362 void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
363 int first_index, int count)
367 for (i = first_index; i < first_index + count; i++)
368 ena_free_tx_resources(adapter, i);
371 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
372 * @adapter: board private structure
374 * Free all transmit software resources
376 void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
378 ena_free_all_io_tx_resources_in_range(adapter,
380 adapter->xdp_num_queues +
381 adapter->num_io_queues);
384 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
385 * @adapter: network interface device structure
388 * Returns 0 on success, negative on failure
390 static int ena_setup_rx_resources(struct ena_adapter *adapter,
393 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
394 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
397 if (rx_ring->rx_buffer_info) {
398 netif_err(adapter, ifup, adapter->netdev,
399 "rx_buffer_info is not NULL");
403 /* alloc extra element so in rx path
404 * we can always prefetch rx_info + 1
406 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
407 node = cpu_to_node(ena_irq->cpu);
409 rx_ring->rx_buffer_info = vzalloc_node(size, node);
410 if (!rx_ring->rx_buffer_info) {
411 rx_ring->rx_buffer_info = vzalloc(size);
412 if (!rx_ring->rx_buffer_info)
416 size = sizeof(u16) * rx_ring->ring_size;
417 rx_ring->free_ids = vzalloc_node(size, node);
418 if (!rx_ring->free_ids) {
419 rx_ring->free_ids = vzalloc(size);
420 if (!rx_ring->free_ids) {
421 vfree(rx_ring->rx_buffer_info);
422 rx_ring->rx_buffer_info = NULL;
427 /* Req id ring for receiving RX pkts out of order */
428 for (i = 0; i < rx_ring->ring_size; i++)
429 rx_ring->free_ids[i] = i;
431 /* Reset rx statistics */
432 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
434 rx_ring->next_to_clean = 0;
435 rx_ring->next_to_use = 0;
436 rx_ring->cpu = ena_irq->cpu;
437 rx_ring->numa_node = node;
442 /* ena_free_rx_resources - Free I/O Rx Resources
443 * @adapter: network interface device structure
446 * Free all receive software resources
448 static void ena_free_rx_resources(struct ena_adapter *adapter,
451 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
453 vfree(rx_ring->rx_buffer_info);
454 rx_ring->rx_buffer_info = NULL;
456 vfree(rx_ring->free_ids);
457 rx_ring->free_ids = NULL;
460 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
461 * @adapter: board private structure
463 * Return 0 on success, negative on failure
465 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
469 for (i = 0; i < adapter->num_io_queues; i++) {
470 rc = ena_setup_rx_resources(adapter, i);
479 netif_err(adapter, ifup, adapter->netdev,
480 "Rx queue %d: allocation failed\n", i);
482 /* rewind the index freeing the rings as we go */
484 ena_free_rx_resources(adapter, i);
488 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
489 * @adapter: board private structure
491 * Free all receive software resources
493 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
497 for (i = 0; i < adapter->num_io_queues; i++)
498 ena_free_rx_resources(adapter, i);
501 static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
506 /* This would allocate the page on the same NUMA node the executing code
509 page = dev_alloc_page();
511 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp);
512 return ERR_PTR(-ENOSPC);
515 /* To enable NIC-side port-mirroring, AKA SPAN port,
516 * we make the buffer readable from the nic as well
518 *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
520 if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) {
521 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
524 return ERR_PTR(-EIO);
530 static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
531 struct ena_rx_buffer *rx_info)
533 int headroom = rx_ring->rx_headroom;
534 struct ena_com_buf *ena_buf;
539 /* restore page offset value in case it has been changed by device */
540 rx_info->buf_offset = headroom;
542 /* if previous allocated page is not used */
543 if (unlikely(rx_info->page))
546 /* We handle DMA here */
547 page = ena_alloc_map_page(rx_ring, &dma);
549 return PTR_ERR(page);
551 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
552 "Allocate page %p, rx_info %p\n", page, rx_info);
554 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
556 rx_info->page = page;
557 rx_info->dma_addr = dma;
558 rx_info->page_offset = 0;
559 ena_buf = &rx_info->ena_buf;
560 ena_buf->paddr = dma + headroom;
561 ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom;
566 static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
567 struct ena_rx_buffer *rx_info,
570 dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL,
574 static void ena_free_rx_page(struct ena_ring *rx_ring,
575 struct ena_rx_buffer *rx_info)
577 struct page *page = rx_info->page;
579 if (unlikely(!page)) {
580 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
581 "Trying to free unallocated buffer\n");
585 ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0);
588 rx_info->page = NULL;
591 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
593 u16 next_to_use, req_id;
597 next_to_use = rx_ring->next_to_use;
599 for (i = 0; i < num; i++) {
600 struct ena_rx_buffer *rx_info;
602 req_id = rx_ring->free_ids[next_to_use];
604 rx_info = &rx_ring->rx_buffer_info[req_id];
606 rc = ena_alloc_rx_buffer(rx_ring, rx_info);
607 if (unlikely(rc < 0)) {
608 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
609 "Failed to allocate buffer for rx queue %d\n",
613 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
617 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
618 "Failed to add buffer for rx queue %d\n",
622 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
626 if (unlikely(i < num)) {
627 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1,
629 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
630 "Refilled rx qid %d with only %d buffers (from %d)\n",
631 rx_ring->qid, i, num);
634 /* ena_com_write_sq_doorbell issues a wmb() */
636 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
638 rx_ring->next_to_use = next_to_use;
643 static void ena_free_rx_bufs(struct ena_adapter *adapter,
646 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
649 for (i = 0; i < rx_ring->ring_size; i++) {
650 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
653 ena_free_rx_page(rx_ring, rx_info);
657 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
658 * @adapter: board private structure
660 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
662 struct ena_ring *rx_ring;
665 for (i = 0; i < adapter->num_io_queues; i++) {
666 rx_ring = &adapter->rx_ring[i];
667 bufs_num = rx_ring->ring_size - 1;
668 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
670 if (unlikely(rc != bufs_num))
671 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
672 "Refilling Queue %d failed. allocated %d buffers from: %d\n",
677 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
681 for (i = 0; i < adapter->num_io_queues; i++)
682 ena_free_rx_bufs(adapter, i);
685 void ena_unmap_tx_buff(struct ena_ring *tx_ring,
686 struct ena_tx_buffer *tx_info)
688 struct ena_com_buf *ena_buf;
692 ena_buf = tx_info->bufs;
693 cnt = tx_info->num_of_bufs;
698 if (tx_info->map_linear_data) {
699 dma_unmap_single(tx_ring->dev,
700 dma_unmap_addr(ena_buf, paddr),
701 dma_unmap_len(ena_buf, len),
707 /* unmap remaining mapped pages */
708 for (i = 0; i < cnt; i++) {
709 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
710 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
715 /* ena_free_tx_bufs - Free Tx Buffers per Queue
716 * @tx_ring: TX ring for which buffers be freed
718 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
720 bool print_once = true;
723 for (i = 0; i < tx_ring->ring_size; i++) {
724 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
730 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev,
731 "Free uncompleted tx skb qid %d idx 0x%x\n",
735 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev,
736 "Free uncompleted tx skb qid %d idx 0x%x\n",
740 ena_unmap_tx_buff(tx_ring, tx_info);
742 dev_kfree_skb_any(tx_info->skb);
744 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
748 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
750 struct ena_ring *tx_ring;
753 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
754 tx_ring = &adapter->tx_ring[i];
755 ena_free_tx_bufs(tx_ring);
759 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
764 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
765 ena_qid = ENA_IO_TXQ_IDX(i);
766 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
770 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
775 for (i = 0; i < adapter->num_io_queues; i++) {
776 ena_qid = ENA_IO_RXQ_IDX(i);
777 cancel_work_sync(&adapter->ena_napi[i].dim.work);
778 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
779 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
783 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
785 ena_destroy_all_tx_queues(adapter);
786 ena_destroy_all_rx_queues(adapter);
789 int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
790 struct ena_tx_buffer *tx_info, bool is_xdp)
793 netif_err(ring->adapter,
796 "tx_info doesn't have valid %s. qid %u req_id %u",
797 is_xdp ? "xdp frame" : "skb", ring->qid, req_id);
799 netif_err(ring->adapter,
802 "Invalid req_id %u in qid %u\n",
805 ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
806 ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
811 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
813 struct ena_tx_buffer *tx_info;
815 tx_info = &tx_ring->tx_buffer_info[req_id];
816 if (likely(tx_info->skb))
819 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
822 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
824 struct netdev_queue *txq;
833 next_to_clean = tx_ring->next_to_clean;
834 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
836 while (tx_pkts < budget) {
837 struct ena_tx_buffer *tx_info;
840 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
843 if (unlikely(rc == -EINVAL))
844 handle_invalid_req_id(tx_ring, req_id, NULL, false);
848 /* validate that the request id points to a valid skb */
849 rc = validate_tx_req_id(tx_ring, req_id);
853 tx_info = &tx_ring->tx_buffer_info[req_id];
856 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
860 tx_info->last_jiffies = 0;
862 ena_unmap_tx_buff(tx_ring, tx_info);
864 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
865 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
868 tx_bytes += tx_info->total_tx_size;
871 total_done += tx_info->tx_descs;
873 tx_ring->free_ids[next_to_clean] = req_id;
874 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
878 tx_ring->next_to_clean = next_to_clean;
879 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
881 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
883 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
884 "tx_poll: q %d done. total pkts: %d\n",
885 tx_ring->qid, tx_pkts);
887 /* need to make the rings circular update visible to
888 * ena_start_xmit() before checking for netif_queue_stopped().
892 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
893 ENA_TX_WAKEUP_THRESH);
894 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
895 __netif_tx_lock(txq, smp_processor_id());
897 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
898 ENA_TX_WAKEUP_THRESH);
899 if (netif_tx_queue_stopped(txq) && above_thresh &&
900 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
901 netif_tx_wake_queue(txq);
902 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
905 __netif_tx_unlock(txq);
911 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len)
916 skb = napi_alloc_skb(rx_ring->napi, len);
918 skb = napi_build_skb(first_frag, len);
920 if (unlikely(!skb)) {
921 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
924 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
925 "Failed to allocate skb. first_frag %s\n",
926 first_frag ? "provided" : "not provided");
932 static bool ena_try_rx_buf_page_reuse(struct ena_rx_buffer *rx_info, u16 buf_len,
933 u16 len, int pkt_offset)
935 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
937 /* More than ENA_MIN_RX_BUF_SIZE left in the reused buffer
938 * for data + headroom + tailroom.
940 if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) {
941 page_ref_inc(rx_info->page);
942 rx_info->page_offset += buf_len;
943 ena_buf->paddr += buf_len;
944 ena_buf->len -= buf_len;
951 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
952 struct ena_com_rx_buf_info *ena_bufs,
956 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
957 bool is_xdp_loaded = ena_xdp_present_ring(rx_ring);
958 struct ena_rx_buffer *rx_info;
959 struct ena_adapter *adapter;
960 int page_offset, pkt_offset;
961 dma_addr_t pre_reuse_paddr;
962 u16 len, req_id, buf = 0;
963 bool reuse_rx_buf_page;
969 len = ena_bufs[buf].len;
970 req_id = ena_bufs[buf].req_id;
972 rx_info = &rx_ring->rx_buffer_info[req_id];
974 if (unlikely(!rx_info->page)) {
975 adapter = rx_ring->adapter;
976 netif_err(adapter, rx_err, rx_ring->netdev,
977 "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
978 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
979 ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
983 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
984 "rx_info %p page %p\n",
985 rx_info, rx_info->page);
987 buf_offset = rx_info->buf_offset;
988 pkt_offset = buf_offset - rx_ring->rx_headroom;
989 page_offset = rx_info->page_offset;
990 buf_addr = page_address(rx_info->page) + page_offset;
992 if (len <= rx_ring->rx_copybreak) {
993 skb = ena_alloc_skb(rx_ring, NULL, len);
997 skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
998 dma_sync_single_for_device(rx_ring->dev,
999 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
1004 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1005 "RX allocated small packet. len %d.\n", skb->len);
1006 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1007 rx_ring->free_ids[*next_to_clean] = req_id;
1008 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1009 rx_ring->ring_size);
1013 buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
1015 /* If XDP isn't loaded try to reuse part of the RX buffer */
1016 reuse_rx_buf_page = !is_xdp_loaded &&
1017 ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
1019 if (!reuse_rx_buf_page)
1020 ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
1022 skb = ena_alloc_skb(rx_ring, buf_addr, buf_len);
1026 /* Populate skb's linear part */
1027 skb_reserve(skb, buf_offset);
1029 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1032 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1033 "RX skb updated. len %d. data_len %d\n",
1034 skb->len, skb->data_len);
1036 if (!reuse_rx_buf_page)
1037 rx_info->page = NULL;
1039 rx_ring->free_ids[*next_to_clean] = req_id;
1041 ENA_RX_RING_IDX_NEXT(*next_to_clean,
1042 rx_ring->ring_size);
1043 if (likely(--descs == 0))
1047 len = ena_bufs[buf].len;
1048 req_id = ena_bufs[buf].req_id;
1050 rx_info = &rx_ring->rx_buffer_info[req_id];
1052 /* rx_info->buf_offset includes rx_ring->rx_headroom */
1053 buf_offset = rx_info->buf_offset;
1054 pkt_offset = buf_offset - rx_ring->rx_headroom;
1055 buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
1056 page_offset = rx_info->page_offset;
1058 pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
1060 reuse_rx_buf_page = !is_xdp_loaded &&
1061 ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
1063 dma_sync_single_for_cpu(rx_ring->dev,
1064 pre_reuse_paddr + pkt_offset,
1068 if (!reuse_rx_buf_page)
1069 ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
1071 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
1072 page_offset + buf_offset, len, buf_len);
1079 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1080 * @adapter: structure containing adapter specific data
1081 * @ena_rx_ctx: received packet context/metadata
1082 * @skb: skb currently being received and modified
1084 static void ena_rx_checksum(struct ena_ring *rx_ring,
1085 struct ena_com_rx_ctx *ena_rx_ctx,
1086 struct sk_buff *skb)
1088 /* Rx csum disabled */
1089 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1090 skb->ip_summed = CHECKSUM_NONE;
1094 /* For fragmented packets the checksum isn't valid */
1095 if (ena_rx_ctx->frag) {
1096 skb->ip_summed = CHECKSUM_NONE;
1100 /* if IP and error */
1101 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1102 (ena_rx_ctx->l3_csum_err))) {
1103 /* ipv4 checksum error */
1104 skb->ip_summed = CHECKSUM_NONE;
1105 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
1107 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1108 "RX IPv4 header checksum error\n");
1113 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1114 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1115 if (unlikely(ena_rx_ctx->l4_csum_err)) {
1116 /* TCP/UDP checksum error */
1117 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
1119 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1120 "RX L4 checksum error\n");
1121 skb->ip_summed = CHECKSUM_NONE;
1125 if (likely(ena_rx_ctx->l4_csum_checked)) {
1126 skb->ip_summed = CHECKSUM_UNNECESSARY;
1127 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1,
1130 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1,
1132 skb->ip_summed = CHECKSUM_NONE;
1135 skb->ip_summed = CHECKSUM_NONE;
1141 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1142 struct ena_com_rx_ctx *ena_rx_ctx,
1143 struct sk_buff *skb)
1145 enum pkt_hash_types hash_type;
1147 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1148 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1149 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1151 hash_type = PKT_HASH_TYPE_L4;
1153 hash_type = PKT_HASH_TYPE_NONE;
1155 /* Override hash type if the packet is fragmented */
1156 if (ena_rx_ctx->frag)
1157 hash_type = PKT_HASH_TYPE_NONE;
1159 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1163 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs)
1165 struct ena_rx_buffer *rx_info;
1168 /* XDP multi-buffer packets not supported */
1169 if (unlikely(num_descs > 1)) {
1170 netdev_err_once(rx_ring->adapter->netdev,
1171 "xdp: dropped unsupported multi-buffer packets\n");
1172 ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp);
1173 return ENA_XDP_DROP;
1176 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1177 xdp_prepare_buff(xdp, page_address(rx_info->page),
1178 rx_info->buf_offset,
1179 rx_ring->ena_bufs[0].len, false);
1181 ret = ena_xdp_execute(rx_ring, xdp);
1183 /* The xdp program might expand the headers */
1184 if (ret == ENA_XDP_PASS) {
1185 rx_info->buf_offset = xdp->data - xdp->data_hard_start;
1186 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1192 /* ena_clean_rx_irq - Cleanup RX irq
1193 * @rx_ring: RX ring to clean
1194 * @napi: napi handler
1195 * @budget: how many packets driver is allowed to clean
1197 * Returns the number of cleaned buffers.
1199 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1202 u16 next_to_clean = rx_ring->next_to_clean;
1203 struct ena_com_rx_ctx ena_rx_ctx;
1204 struct ena_rx_buffer *rx_info;
1205 struct ena_adapter *adapter;
1206 u32 res_budget, work_done;
1207 int rx_copybreak_pkt = 0;
1208 int refill_threshold;
1209 struct sk_buff *skb;
1210 int refill_required;
1211 struct xdp_buff xdp;
1219 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1220 "%s qid %d\n", __func__, rx_ring->qid);
1221 res_budget = budget;
1222 xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
1225 xdp_verdict = ENA_XDP_PASS;
1227 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1228 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1229 ena_rx_ctx.descs = 0;
1230 ena_rx_ctx.pkt_offset = 0;
1231 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1232 rx_ring->ena_com_io_sq,
1237 if (unlikely(ena_rx_ctx.descs == 0))
1240 /* First descriptor might have an offset set by the device */
1241 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1242 pkt_offset = ena_rx_ctx.pkt_offset;
1243 rx_info->buf_offset += pkt_offset;
1245 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1246 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1247 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1248 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1250 dma_sync_single_for_cpu(rx_ring->dev,
1251 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
1252 rx_ring->ena_bufs[0].len,
1255 if (ena_xdp_present_ring(rx_ring))
1256 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
1258 /* allocate skb and fill it */
1259 if (xdp_verdict == ENA_XDP_PASS)
1260 skb = ena_rx_skb(rx_ring,
1265 if (unlikely(!skb)) {
1266 for (i = 0; i < ena_rx_ctx.descs; i++) {
1267 int req_id = rx_ring->ena_bufs[i].req_id;
1269 rx_ring->free_ids[next_to_clean] = req_id;
1271 ENA_RX_RING_IDX_NEXT(next_to_clean,
1272 rx_ring->ring_size);
1274 /* Packets was passed for transmission, unmap it
1277 if (xdp_verdict & ENA_XDP_FORWARDED) {
1278 ena_unmap_rx_buff_attrs(rx_ring,
1279 &rx_ring->rx_buffer_info[req_id],
1280 DMA_ATTR_SKIP_CPU_SYNC);
1281 rx_ring->rx_buffer_info[req_id].page = NULL;
1284 if (xdp_verdict != ENA_XDP_PASS) {
1285 xdp_flags |= xdp_verdict;
1286 total_len += ena_rx_ctx.ena_bufs[0].len;
1293 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1295 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1297 skb_record_rx_queue(skb, rx_ring->qid);
1299 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak)
1302 total_len += skb->len;
1304 napi_gro_receive(napi, skb);
1307 } while (likely(res_budget));
1309 work_done = budget - res_budget;
1310 rx_ring->per_napi_packets += work_done;
1311 u64_stats_update_begin(&rx_ring->syncp);
1312 rx_ring->rx_stats.bytes += total_len;
1313 rx_ring->rx_stats.cnt += work_done;
1314 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1315 u64_stats_update_end(&rx_ring->syncp);
1317 rx_ring->next_to_clean = next_to_clean;
1319 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
1321 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1322 ENA_RX_REFILL_THRESH_PACKET);
1324 /* Optimization, try to batch new rx buffers */
1325 if (refill_required > refill_threshold)
1326 ena_refill_rx_bufs(rx_ring, refill_required);
1328 if (xdp_flags & ENA_XDP_REDIRECT)
1334 if (xdp_flags & ENA_XDP_REDIRECT)
1337 adapter = netdev_priv(rx_ring->netdev);
1339 if (rc == -ENOSPC) {
1340 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
1341 ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
1343 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
1345 ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
1350 static void ena_dim_work(struct work_struct *w)
1352 struct dim *dim = container_of(w, struct dim, work);
1353 struct dim_cq_moder cur_moder =
1354 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1355 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1357 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1358 dim->state = DIM_START_MEASURE;
1361 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1363 struct dim_sample dim_sample;
1364 struct ena_ring *rx_ring = ena_napi->rx_ring;
1366 if (!rx_ring->per_napi_packets)
1369 rx_ring->non_empty_napi_events++;
1371 dim_update_sample(rx_ring->non_empty_napi_events,
1372 rx_ring->rx_stats.cnt,
1373 rx_ring->rx_stats.bytes,
1376 net_dim(&ena_napi->dim, dim_sample);
1378 rx_ring->per_napi_packets = 0;
1381 void ena_unmask_interrupt(struct ena_ring *tx_ring,
1382 struct ena_ring *rx_ring)
1384 u32 rx_interval = tx_ring->smoothed_interval;
1385 struct ena_eth_io_intr_reg intr_reg;
1387 /* Rx ring can be NULL when for XDP tx queues which don't have an
1388 * accompanying rx_ring pair.
1391 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1392 rx_ring->smoothed_interval :
1393 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
1395 /* Update intr register: rx intr delay,
1396 * tx intr delay and interrupt unmask
1398 ena_com_update_intr_reg(&intr_reg,
1400 tx_ring->smoothed_interval,
1403 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1,
1406 /* It is a shared MSI-X.
1407 * Tx and Rx CQ have pointer to it.
1408 * So we use one of them to reach the intr reg
1409 * The Tx ring is used because the rx_ring is NULL for XDP queues
1411 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
1414 void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1415 struct ena_ring *rx_ring)
1417 int cpu = get_cpu();
1420 /* Check only one ring since the 2 rings are running on the same cpu */
1421 if (likely(tx_ring->cpu == cpu))
1428 numa_node = cpu_to_node(cpu);
1430 if (likely(tx_ring->numa_node == numa_node))
1435 if (numa_node != NUMA_NO_NODE) {
1436 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1437 tx_ring->numa_node = numa_node;
1439 rx_ring->numa_node = numa_node;
1440 ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1450 static int ena_io_poll(struct napi_struct *napi, int budget)
1452 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1453 struct ena_ring *tx_ring, *rx_ring;
1455 int rx_work_done = 0;
1457 int napi_comp_call = 0;
1460 tx_ring = ena_napi->tx_ring;
1461 rx_ring = ena_napi->rx_ring;
1463 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1465 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1466 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1467 napi_complete_done(napi, 0);
1471 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1472 /* On netpoll the budget is zero and the handler should only clean the
1476 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1478 /* If the device is about to reset or down, avoid unmask
1479 * the interrupt and return 0 so NAPI won't reschedule
1481 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1482 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1483 napi_complete_done(napi, 0);
1486 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1489 /* Update numa and unmask the interrupt only when schedule
1490 * from the interrupt context (vs from sk_busy_loop)
1492 if (napi_complete_done(napi, rx_work_done) &&
1493 READ_ONCE(ena_napi->interrupts_masked)) {
1494 smp_rmb(); /* make sure interrupts_masked is read */
1495 WRITE_ONCE(ena_napi->interrupts_masked, false);
1496 /* We apply adaptive moderation on Rx path only.
1497 * Tx uses static interrupt moderation.
1499 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1500 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
1502 ena_update_ring_numa_node(tx_ring, rx_ring);
1503 ena_unmask_interrupt(tx_ring, rx_ring);
1511 u64_stats_update_begin(&tx_ring->syncp);
1512 tx_ring->tx_stats.napi_comp += napi_comp_call;
1513 tx_ring->tx_stats.tx_poll++;
1514 u64_stats_update_end(&tx_ring->syncp);
1516 tx_ring->tx_stats.last_napi_jiffies = jiffies;
1521 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1523 struct ena_adapter *adapter = (struct ena_adapter *)data;
1525 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1527 /* Don't call the aenq handler before probe is done */
1528 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1529 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1534 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1535 * @irq: interrupt number
1536 * @data: pointer to a network interface private napi device structure
1538 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1540 struct ena_napi *ena_napi = data;
1542 /* Used to check HW health */
1543 WRITE_ONCE(ena_napi->first_interrupt, true);
1545 WRITE_ONCE(ena_napi->interrupts_masked, true);
1546 smp_wmb(); /* write interrupts_masked before calling napi */
1548 napi_schedule_irqoff(&ena_napi->napi);
1553 /* Reserve a single MSI-X vector for management (admin + aenq).
1554 * plus reserve one vector for each potential io queue.
1555 * the number of potential io queues is the minimum of what the device
1556 * supports and the number of vCPUs.
1558 static int ena_enable_msix(struct ena_adapter *adapter)
1560 int msix_vecs, irq_cnt;
1562 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1563 netif_err(adapter, probe, adapter->netdev,
1564 "Error, MSI-X is already enabled\n");
1568 /* Reserved the max msix vectors we might need */
1569 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1570 netif_dbg(adapter, probe, adapter->netdev,
1571 "Trying to enable MSI-X, vectors %d\n", msix_vecs);
1573 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1574 msix_vecs, PCI_IRQ_MSIX);
1577 netif_err(adapter, probe, adapter->netdev,
1578 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1582 if (irq_cnt != msix_vecs) {
1583 netif_notice(adapter, probe, adapter->netdev,
1584 "Enable only %d MSI-X (out of %d), reduce the number of queues\n",
1585 irq_cnt, msix_vecs);
1586 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1589 if (ena_init_rx_cpu_rmap(adapter))
1590 netif_warn(adapter, probe, adapter->netdev,
1591 "Failed to map IRQs to CPUs\n");
1593 adapter->msix_vecs = irq_cnt;
1594 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1599 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1603 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1604 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1605 pci_name(adapter->pdev));
1606 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
1607 ena_intr_msix_mgmnt;
1608 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1609 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1610 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1611 cpu = cpumask_first(cpu_online_mask);
1612 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
1613 cpumask_set_cpu(cpu,
1614 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
1617 static void ena_setup_io_intr(struct ena_adapter *adapter)
1619 struct net_device *netdev;
1620 int irq_idx, i, cpu;
1623 netdev = adapter->netdev;
1624 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1626 for (i = 0; i < io_queue_count; i++) {
1627 irq_idx = ENA_IO_IRQ_IDX(i);
1628 cpu = i % num_online_cpus();
1630 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1631 "%s-Tx-Rx-%d", netdev->name, i);
1632 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
1633 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
1634 adapter->irq_tbl[irq_idx].vector =
1635 pci_irq_vector(adapter->pdev, irq_idx);
1636 adapter->irq_tbl[irq_idx].cpu = cpu;
1638 cpumask_set_cpu(cpu,
1639 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
1643 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
1645 unsigned long flags = 0;
1646 struct ena_irq *irq;
1649 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1650 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1653 netif_err(adapter, probe, adapter->netdev,
1654 "Failed to request admin irq\n");
1658 netif_dbg(adapter, probe, adapter->netdev,
1659 "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1660 irq->affinity_hint_mask.bits[0], irq->vector);
1662 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1667 static int ena_request_io_irq(struct ena_adapter *adapter)
1669 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1670 unsigned long flags = 0;
1671 struct ena_irq *irq;
1674 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1675 netif_err(adapter, ifup, adapter->netdev,
1676 "Failed to request I/O IRQ: MSI-X is not enabled\n");
1680 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
1681 irq = &adapter->irq_tbl[i];
1682 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1685 netif_err(adapter, ifup, adapter->netdev,
1686 "Failed to request I/O IRQ. index %d rc %d\n",
1691 netif_dbg(adapter, ifup, adapter->netdev,
1692 "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1693 i, irq->affinity_hint_mask.bits[0], irq->vector);
1695 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1701 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
1702 irq = &adapter->irq_tbl[k];
1703 free_irq(irq->vector, irq->data);
1709 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
1711 struct ena_irq *irq;
1713 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1714 synchronize_irq(irq->vector);
1715 irq_set_affinity_hint(irq->vector, NULL);
1716 free_irq(irq->vector, irq->data);
1719 static void ena_free_io_irq(struct ena_adapter *adapter)
1721 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1722 struct ena_irq *irq;
1725 #ifdef CONFIG_RFS_ACCEL
1726 if (adapter->msix_vecs >= 1) {
1727 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1728 adapter->netdev->rx_cpu_rmap = NULL;
1730 #endif /* CONFIG_RFS_ACCEL */
1732 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
1733 irq = &adapter->irq_tbl[i];
1734 irq_set_affinity_hint(irq->vector, NULL);
1735 free_irq(irq->vector, irq->data);
1739 static void ena_disable_msix(struct ena_adapter *adapter)
1741 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
1742 pci_free_irq_vectors(adapter->pdev);
1745 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
1747 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
1750 if (!netif_running(adapter->netdev))
1753 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
1754 synchronize_irq(adapter->irq_tbl[i].vector);
1757 static void ena_del_napi_in_range(struct ena_adapter *adapter,
1763 for (i = first_index; i < first_index + count; i++) {
1764 netif_napi_del(&adapter->ena_napi[i].napi);
1766 WARN_ON(ENA_IS_XDP_INDEX(adapter, i) &&
1767 adapter->ena_napi[i].rx_ring);
1771 static void ena_init_napi_in_range(struct ena_adapter *adapter,
1772 int first_index, int count)
1774 int (*napi_handler)(struct napi_struct *napi, int budget);
1777 for (i = first_index; i < first_index + count; i++) {
1778 struct ena_napi *napi = &adapter->ena_napi[i];
1779 struct ena_ring *rx_ring, *tx_ring;
1781 memset(napi, 0, sizeof(*napi));
1783 rx_ring = &adapter->rx_ring[i];
1784 tx_ring = &adapter->tx_ring[i];
1786 napi_handler = ena_io_poll;
1787 if (ENA_IS_XDP_INDEX(adapter, i))
1788 napi_handler = ena_xdp_io_poll;
1790 netif_napi_add(adapter->netdev, &napi->napi, napi_handler);
1792 if (!ENA_IS_XDP_INDEX(adapter, i))
1793 napi->rx_ring = rx_ring;
1795 napi->tx_ring = tx_ring;
1800 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
1806 for (i = first_index; i < first_index + count; i++)
1807 napi_disable(&adapter->ena_napi[i].napi);
1810 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
1816 for (i = first_index; i < first_index + count; i++)
1817 napi_enable(&adapter->ena_napi[i].napi);
1820 /* Configure the Rx forwarding */
1821 static int ena_rss_configure(struct ena_adapter *adapter)
1823 struct ena_com_dev *ena_dev = adapter->ena_dev;
1826 /* In case the RSS table wasn't initialized by probe */
1827 if (!ena_dev->rss.tbl_log_size) {
1828 rc = ena_rss_init_default(adapter);
1829 if (rc && (rc != -EOPNOTSUPP)) {
1830 netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc);
1835 /* Set indirect table */
1836 rc = ena_com_indirect_table_set(ena_dev);
1837 if (unlikely(rc && rc != -EOPNOTSUPP))
1840 /* Configure hash function (if supported) */
1841 rc = ena_com_set_hash_function(ena_dev);
1842 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1845 /* Configure hash inputs (if supported) */
1846 rc = ena_com_set_hash_ctrl(ena_dev);
1847 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1853 static int ena_up_complete(struct ena_adapter *adapter)
1857 rc = ena_rss_configure(adapter);
1861 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1863 ena_refill_all_rx_bufs(adapter);
1865 /* enable transmits */
1866 netif_tx_start_all_queues(adapter->netdev);
1868 ena_napi_enable_in_range(adapter,
1870 adapter->xdp_num_queues + adapter->num_io_queues);
1875 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1877 struct ena_com_create_io_ctx ctx;
1878 struct ena_com_dev *ena_dev;
1879 struct ena_ring *tx_ring;
1884 ena_dev = adapter->ena_dev;
1886 tx_ring = &adapter->tx_ring[qid];
1887 msix_vector = ENA_IO_IRQ_IDX(qid);
1888 ena_qid = ENA_IO_TXQ_IDX(qid);
1890 memset(&ctx, 0x0, sizeof(ctx));
1892 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1894 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1895 ctx.msix_vector = msix_vector;
1896 ctx.queue_size = tx_ring->ring_size;
1897 ctx.numa_node = tx_ring->numa_node;
1899 rc = ena_com_create_io_queue(ena_dev, &ctx);
1901 netif_err(adapter, ifup, adapter->netdev,
1902 "Failed to create I/O TX queue num %d rc: %d\n",
1907 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1908 &tx_ring->ena_com_io_sq,
1909 &tx_ring->ena_com_io_cq);
1911 netif_err(adapter, ifup, adapter->netdev,
1912 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1914 ena_com_destroy_io_queue(ena_dev, ena_qid);
1918 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
1922 int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
1923 int first_index, int count)
1925 struct ena_com_dev *ena_dev = adapter->ena_dev;
1928 for (i = first_index; i < first_index + count; i++) {
1929 rc = ena_create_io_tx_queue(adapter, i);
1937 while (i-- > first_index)
1938 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1943 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1945 struct ena_com_dev *ena_dev;
1946 struct ena_com_create_io_ctx ctx;
1947 struct ena_ring *rx_ring;
1952 ena_dev = adapter->ena_dev;
1954 rx_ring = &adapter->rx_ring[qid];
1955 msix_vector = ENA_IO_IRQ_IDX(qid);
1956 ena_qid = ENA_IO_RXQ_IDX(qid);
1958 memset(&ctx, 0x0, sizeof(ctx));
1961 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1962 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1963 ctx.msix_vector = msix_vector;
1964 ctx.queue_size = rx_ring->ring_size;
1965 ctx.numa_node = rx_ring->numa_node;
1967 rc = ena_com_create_io_queue(ena_dev, &ctx);
1969 netif_err(adapter, ifup, adapter->netdev,
1970 "Failed to create I/O RX queue num %d rc: %d\n",
1975 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1976 &rx_ring->ena_com_io_sq,
1977 &rx_ring->ena_com_io_cq);
1979 netif_err(adapter, ifup, adapter->netdev,
1980 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1985 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
1989 ena_com_destroy_io_queue(ena_dev, ena_qid);
1993 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
1995 struct ena_com_dev *ena_dev = adapter->ena_dev;
1998 for (i = 0; i < adapter->num_io_queues; i++) {
1999 rc = ena_create_io_rx_queue(adapter, i);
2002 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
2004 ena_xdp_register_rxq_info(&adapter->rx_ring[i]);
2011 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]);
2012 cancel_work_sync(&adapter->ena_napi[i].dim.work);
2013 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
2019 static void set_io_rings_size(struct ena_adapter *adapter,
2025 for (i = 0; i < adapter->num_io_queues; i++) {
2026 adapter->tx_ring[i].ring_size = new_tx_size;
2027 adapter->rx_ring[i].ring_size = new_rx_size;
2031 /* This function allows queue allocation to backoff when the system is
2032 * low on memory. If there is not enough memory to allocate io queues
2033 * the driver will try to allocate smaller queues.
2035 * The backoff algorithm is as follows:
2036 * 1. Try to allocate TX and RX and if successful.
2037 * 1.1. return success
2039 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2041 * 3. If TX or RX is smaller than 256
2042 * 3.1. return failure.
2044 * 4.1. go back to 1.
2046 static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2048 int rc, cur_rx_ring_size, cur_tx_ring_size;
2049 int new_rx_ring_size, new_tx_ring_size;
2051 /* current queue sizes might be set to smaller than the requested
2052 * ones due to past queue allocation failures.
2054 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2055 adapter->requested_rx_ring_size);
2058 if (ena_xdp_present(adapter)) {
2059 rc = ena_setup_and_create_all_xdp_queues(adapter);
2064 rc = ena_setup_tx_resources_in_range(adapter,
2066 adapter->num_io_queues);
2070 rc = ena_create_io_tx_queues_in_range(adapter,
2072 adapter->num_io_queues);
2074 goto err_create_tx_queues;
2076 rc = ena_setup_all_rx_resources(adapter);
2080 rc = ena_create_all_io_rx_queues(adapter);
2082 goto err_create_rx_queues;
2086 err_create_rx_queues:
2087 ena_free_all_io_rx_resources(adapter);
2089 ena_destroy_all_tx_queues(adapter);
2090 err_create_tx_queues:
2091 ena_free_all_io_tx_resources(adapter);
2093 if (rc != -ENOMEM) {
2094 netif_err(adapter, ifup, adapter->netdev,
2095 "Queue creation failed with error code %d\n",
2100 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2101 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2103 netif_err(adapter, ifup, adapter->netdev,
2104 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2105 cur_tx_ring_size, cur_rx_ring_size);
2107 new_tx_ring_size = cur_tx_ring_size;
2108 new_rx_ring_size = cur_rx_ring_size;
2110 /* Decrease the size of the larger queue, or
2111 * decrease both if they are the same size.
2113 if (cur_rx_ring_size <= cur_tx_ring_size)
2114 new_tx_ring_size = cur_tx_ring_size / 2;
2115 if (cur_rx_ring_size >= cur_tx_ring_size)
2116 new_rx_ring_size = cur_rx_ring_size / 2;
2118 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2119 new_rx_ring_size < ENA_MIN_RING_SIZE) {
2120 netif_err(adapter, ifup, adapter->netdev,
2121 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2126 netif_err(adapter, ifup, adapter->netdev,
2127 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2131 set_io_rings_size(adapter, new_tx_ring_size,
2136 int ena_up(struct ena_adapter *adapter)
2138 int io_queue_count, rc, i;
2140 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
2142 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2143 ena_setup_io_intr(adapter);
2145 /* napi poll functions should be initialized before running
2146 * request_irq(), to handle a rare condition where there is a pending
2147 * interrupt, causing the ISR to fire immediately while the poll
2148 * function wasn't set yet, causing a null dereference
2150 ena_init_napi_in_range(adapter, 0, io_queue_count);
2152 /* Enabling DIM needs to happen before enabling IRQs since DIM
2153 * is run from napi routine
2155 if (ena_com_interrupt_moderation_supported(adapter->ena_dev))
2156 ena_com_enable_adaptive_moderation(adapter->ena_dev);
2158 rc = ena_request_io_irq(adapter);
2162 rc = create_queues_with_size_backoff(adapter);
2164 goto err_create_queues_with_backoff;
2166 rc = ena_up_complete(adapter);
2170 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2171 netif_carrier_on(adapter->netdev);
2173 ena_increase_stat(&adapter->dev_stats.interface_up, 1,
2176 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2178 /* Enable completion queues interrupt */
2179 for (i = 0; i < adapter->num_io_queues; i++)
2180 ena_unmask_interrupt(&adapter->tx_ring[i],
2181 &adapter->rx_ring[i]);
2183 /* schedule napi in case we had pending packets
2184 * from the last time we disable napi
2186 for (i = 0; i < io_queue_count; i++)
2187 napi_schedule(&adapter->ena_napi[i].napi);
2192 ena_destroy_all_tx_queues(adapter);
2193 ena_free_all_io_tx_resources(adapter);
2194 ena_destroy_all_rx_queues(adapter);
2195 ena_free_all_io_rx_resources(adapter);
2196 err_create_queues_with_backoff:
2197 ena_free_io_irq(adapter);
2199 ena_del_napi_in_range(adapter, 0, io_queue_count);
2204 void ena_down(struct ena_adapter *adapter)
2206 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2208 netif_dbg(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2210 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2212 ena_increase_stat(&adapter->dev_stats.interface_down, 1,
2215 netif_carrier_off(adapter->netdev);
2216 netif_tx_disable(adapter->netdev);
2218 /* After this point the napi handler won't enable the tx queue */
2219 ena_napi_disable_in_range(adapter, 0, io_queue_count);
2221 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2224 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2226 netif_err(adapter, ifdown, adapter->netdev,
2227 "Device reset failed\n");
2228 /* stop submitting admin commands on a device that was reset */
2229 ena_com_set_admin_running_state(adapter->ena_dev, false);
2232 ena_destroy_all_io_queues(adapter);
2234 ena_disable_io_intr_sync(adapter);
2235 ena_free_io_irq(adapter);
2236 ena_del_napi_in_range(adapter, 0, io_queue_count);
2238 ena_free_all_tx_bufs(adapter);
2239 ena_free_all_rx_bufs(adapter);
2240 ena_free_all_io_tx_resources(adapter);
2241 ena_free_all_io_rx_resources(adapter);
2244 /* ena_open - Called when a network interface is made active
2245 * @netdev: network interface device structure
2247 * Returns 0 on success, negative value on failure
2249 * The open entry point is called when a network interface is made
2250 * active by the system (IFF_UP). At this point all resources needed
2251 * for transmit and receive operations are allocated, the interrupt
2252 * handler is registered with the OS, the watchdog timer is started,
2253 * and the stack is notified that the interface is ready.
2255 static int ena_open(struct net_device *netdev)
2257 struct ena_adapter *adapter = netdev_priv(netdev);
2260 /* Notify the stack of the actual queue counts. */
2261 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
2263 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2267 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
2269 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2273 rc = ena_up(adapter);
2280 /* ena_close - Disables a network interface
2281 * @netdev: network interface device structure
2283 * Returns 0, this is not allowed to fail
2285 * The close entry point is called when an interface is de-activated
2286 * by the OS. The hardware is still under the drivers control, but
2287 * needs to be disabled. A global MAC reset is issued to stop the
2288 * hardware, and all transmit and receive resources are freed.
2290 static int ena_close(struct net_device *netdev)
2292 struct ena_adapter *adapter = netdev_priv(netdev);
2294 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2296 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2299 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2302 /* Check for device status and issue reset if needed*/
2303 check_for_admin_com_state(adapter);
2304 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2305 netif_err(adapter, ifdown, adapter->netdev,
2306 "Destroy failure, restarting device\n");
2307 ena_dump_stats_to_dmesg(adapter);
2308 /* rtnl lock already obtained in dev_ioctl() layer */
2309 ena_destroy_device(adapter, false);
2310 ena_restore_device(adapter);
2316 int ena_update_queue_params(struct ena_adapter *adapter,
2319 u32 new_llq_header_len)
2321 bool dev_was_up, large_llq_changed = false;
2324 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2325 ena_close(adapter->netdev);
2326 adapter->requested_tx_ring_size = new_tx_size;
2327 adapter->requested_rx_ring_size = new_rx_size;
2328 ena_init_io_rings(adapter,
2330 adapter->xdp_num_queues +
2331 adapter->num_io_queues);
2333 large_llq_changed = adapter->ena_dev->tx_mem_queue_type ==
2334 ENA_ADMIN_PLACEMENT_POLICY_DEV;
2335 large_llq_changed &=
2336 new_llq_header_len != adapter->ena_dev->tx_max_header_size;
2338 /* a check that the configuration is valid is done by caller */
2339 if (large_llq_changed) {
2340 adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled;
2342 ena_destroy_device(adapter, false);
2343 rc = ena_restore_device(adapter);
2346 return dev_was_up && !rc ? ena_up(adapter) : rc;
2349 int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)
2351 struct ena_ring *rx_ring;
2354 if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE))
2357 adapter->rx_copybreak = rx_copybreak;
2359 for (i = 0; i < adapter->num_io_queues; i++) {
2360 rx_ring = &adapter->rx_ring[i];
2361 rx_ring->rx_copybreak = rx_copybreak;
2367 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2369 struct ena_com_dev *ena_dev = adapter->ena_dev;
2370 int prev_channel_count;
2373 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2374 ena_close(adapter->netdev);
2375 prev_channel_count = adapter->num_io_queues;
2376 adapter->num_io_queues = new_channel_count;
2377 if (ena_xdp_present(adapter) &&
2378 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2379 adapter->xdp_first_ring = new_channel_count;
2380 adapter->xdp_num_queues = new_channel_count;
2381 if (prev_channel_count > new_channel_count)
2382 ena_xdp_exchange_program_rx_in_range(adapter,
2385 prev_channel_count);
2387 ena_xdp_exchange_program_rx_in_range(adapter,
2388 adapter->xdp_bpf_prog,
2393 /* We need to destroy the rss table so that the indirection
2394 * table will be reinitialized by ena_up()
2396 ena_com_rss_destroy(ena_dev);
2397 ena_init_io_rings(adapter,
2399 adapter->xdp_num_queues +
2400 adapter->num_io_queues);
2401 return dev_was_up ? ena_open(adapter->netdev) : 0;
2404 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx,
2405 struct sk_buff *skb,
2406 bool disable_meta_caching)
2408 u32 mss = skb_shinfo(skb)->gso_size;
2409 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2412 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2413 ena_tx_ctx->l4_csum_enable = 1;
2415 ena_tx_ctx->tso_enable = 1;
2416 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2417 ena_tx_ctx->l4_csum_partial = 0;
2419 ena_tx_ctx->tso_enable = 0;
2420 ena_meta->l4_hdr_len = 0;
2421 ena_tx_ctx->l4_csum_partial = 1;
2424 switch (ip_hdr(skb)->version) {
2426 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2427 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2430 ena_tx_ctx->l3_csum_enable = 1;
2431 l4_protocol = ip_hdr(skb)->protocol;
2434 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2435 l4_protocol = ipv6_hdr(skb)->nexthdr;
2441 if (l4_protocol == IPPROTO_TCP)
2442 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2444 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2446 ena_meta->mss = mss;
2447 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2448 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2449 ena_tx_ctx->meta_valid = 1;
2450 } else if (disable_meta_caching) {
2451 memset(ena_meta, 0, sizeof(*ena_meta));
2452 ena_tx_ctx->meta_valid = 1;
2454 ena_tx_ctx->meta_valid = 0;
2458 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2459 struct sk_buff *skb)
2461 int num_frags, header_len, rc;
2463 num_frags = skb_shinfo(skb)->nr_frags;
2464 header_len = skb_headlen(skb);
2466 if (num_frags < tx_ring->sgl_size)
2469 if ((num_frags == tx_ring->sgl_size) &&
2470 (header_len < tx_ring->tx_max_header_size))
2473 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp);
2475 rc = skb_linearize(skb);
2477 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1,
2484 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2485 struct ena_tx_buffer *tx_info,
2486 struct sk_buff *skb,
2490 struct ena_adapter *adapter = tx_ring->adapter;
2491 struct ena_com_buf *ena_buf;
2493 u32 skb_head_len, frag_len, last_frag;
2498 skb_head_len = skb_headlen(skb);
2500 ena_buf = tx_info->bufs;
2502 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2503 /* When the device is LLQ mode, the driver will copy
2504 * the header into the device memory space.
2505 * the ena_com layer assume the header is in a linear
2507 * This assumption might be wrong since part of the header
2508 * can be in the fragmented buffers.
2509 * Use skb_header_pointer to make sure the header is in a
2510 * linear memory space.
2513 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2514 *push_hdr = skb_header_pointer(skb, 0, push_len,
2515 tx_ring->push_buf_intermediate_buf);
2516 *header_len = push_len;
2517 if (unlikely(skb->data != *push_hdr)) {
2518 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1,
2521 delta = push_len - skb_head_len;
2525 *header_len = min_t(u32, skb_head_len,
2526 tx_ring->tx_max_header_size);
2529 netif_dbg(adapter, tx_queued, adapter->netdev,
2530 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2531 *push_hdr, push_len);
2533 if (skb_head_len > push_len) {
2534 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2535 skb_head_len - push_len, DMA_TO_DEVICE);
2536 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2537 goto error_report_dma_error;
2539 ena_buf->paddr = dma;
2540 ena_buf->len = skb_head_len - push_len;
2543 tx_info->num_of_bufs++;
2544 tx_info->map_linear_data = 1;
2546 tx_info->map_linear_data = 0;
2549 last_frag = skb_shinfo(skb)->nr_frags;
2551 for (i = 0; i < last_frag; i++) {
2552 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2554 frag_len = skb_frag_size(frag);
2556 if (unlikely(delta >= frag_len)) {
2561 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2562 frag_len - delta, DMA_TO_DEVICE);
2563 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2564 goto error_report_dma_error;
2566 ena_buf->paddr = dma;
2567 ena_buf->len = frag_len - delta;
2569 tx_info->num_of_bufs++;
2575 error_report_dma_error:
2576 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
2578 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
2580 tx_info->skb = NULL;
2582 tx_info->num_of_bufs += i;
2583 ena_unmap_tx_buff(tx_ring, tx_info);
2588 /* Called with netif_tx_lock. */
2589 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2591 struct ena_adapter *adapter = netdev_priv(dev);
2592 struct ena_tx_buffer *tx_info;
2593 struct ena_com_tx_ctx ena_tx_ctx;
2594 struct ena_ring *tx_ring;
2595 struct netdev_queue *txq;
2597 u16 next_to_use, req_id, header_len;
2600 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2601 /* Determine which tx ring we will be placed on */
2602 qid = skb_get_queue_mapping(skb);
2603 tx_ring = &adapter->tx_ring[qid];
2604 txq = netdev_get_tx_queue(dev, qid);
2606 rc = ena_check_and_linearize_skb(tx_ring, skb);
2608 goto error_drop_packet;
2610 next_to_use = tx_ring->next_to_use;
2611 req_id = tx_ring->free_ids[next_to_use];
2612 tx_info = &tx_ring->tx_buffer_info[req_id];
2613 tx_info->num_of_bufs = 0;
2615 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2617 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2619 goto error_drop_packet;
2621 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2622 ena_tx_ctx.ena_bufs = tx_info->bufs;
2623 ena_tx_ctx.push_header = push_hdr;
2624 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2625 ena_tx_ctx.req_id = req_id;
2626 ena_tx_ctx.header_len = header_len;
2628 /* set flags and meta data */
2629 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
2631 rc = ena_xmit_common(adapter,
2638 goto error_unmap_dma;
2640 netdev_tx_sent_queue(txq, skb->len);
2642 /* stop the queue when no more space available, the packet can have up
2643 * to sgl_size + 2. one for the meta descriptor and one for header
2644 * (if the header is larger than tx_max_header_size).
2646 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2647 tx_ring->sgl_size + 2))) {
2648 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2651 netif_tx_stop_queue(txq);
2652 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1,
2655 /* There is a rare condition where this function decide to
2656 * stop the queue but meanwhile clean_tx_irq updates
2657 * next_to_completion and terminates.
2658 * The queue will remain stopped forever.
2659 * To solve this issue add a mb() to make sure that
2660 * netif_tx_stop_queue() write is vissible before checking if
2661 * there is additional space in the queue.
2665 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2666 ENA_TX_WAKEUP_THRESH)) {
2667 netif_tx_wake_queue(txq);
2668 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
2673 skb_tx_timestamp(skb);
2675 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
2676 /* trigger the dma engine. ena_ring_tx_doorbell()
2677 * calls a memory barrier inside it.
2679 ena_ring_tx_doorbell(tx_ring);
2681 return NETDEV_TX_OK;
2684 ena_unmap_tx_buff(tx_ring, tx_info);
2685 tx_info->skb = NULL;
2689 return NETDEV_TX_OK;
2692 static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
2694 struct device *dev = &pdev->dev;
2695 struct ena_admin_host_info *host_info;
2698 /* Allocate only the host info */
2699 rc = ena_com_allocate_host_info(ena_dev);
2701 dev_err(dev, "Cannot allocate host info\n");
2705 host_info = ena_dev->host_attr.host_info;
2707 host_info->bdf = pci_dev_id(pdev);
2708 host_info->os_type = ENA_ADMIN_OS_LINUX;
2709 host_info->kernel_ver = LINUX_VERSION_CODE;
2710 strscpy(host_info->kernel_ver_str, utsname()->version,
2711 sizeof(host_info->kernel_ver_str) - 1);
2712 host_info->os_dist = 0;
2713 strscpy(host_info->os_dist_str, utsname()->release,
2714 sizeof(host_info->os_dist_str));
2715 host_info->driver_version =
2716 (DRV_MODULE_GEN_MAJOR) |
2717 (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2718 (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
2719 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
2720 host_info->num_cpus = num_online_cpus();
2722 host_info->driver_supported_features =
2723 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
2724 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
2725 ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
2726 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
2727 ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
2729 rc = ena_com_set_host_attributes(ena_dev);
2731 if (rc == -EOPNOTSUPP)
2732 dev_warn(dev, "Cannot set host attributes\n");
2734 dev_err(dev, "Cannot set host attributes\n");
2742 ena_com_delete_host_info(ena_dev);
2745 static void ena_config_debug_area(struct ena_adapter *adapter)
2747 u32 debug_area_size;
2750 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
2751 if (ss_count <= 0) {
2752 netif_err(adapter, drv, adapter->netdev,
2753 "SS count is negative\n");
2757 /* allocate 32 bytes for each string and 64bit for the value */
2758 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
2760 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
2762 netif_err(adapter, drv, adapter->netdev,
2763 "Cannot allocate debug area\n");
2767 rc = ena_com_set_host_attributes(adapter->ena_dev);
2769 if (rc == -EOPNOTSUPP)
2770 netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n");
2772 netif_err(adapter, drv, adapter->netdev,
2773 "Cannot set host attributes\n");
2779 ena_com_delete_debug_area(adapter->ena_dev);
2782 int ena_update_hw_stats(struct ena_adapter *adapter)
2786 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
2788 netdev_err(adapter->netdev, "Failed to get ENI stats\n");
2795 static void ena_get_stats64(struct net_device *netdev,
2796 struct rtnl_link_stats64 *stats)
2798 struct ena_adapter *adapter = netdev_priv(netdev);
2799 struct ena_ring *rx_ring, *tx_ring;
2800 u64 total_xdp_rx_drops = 0;
2806 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2809 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
2810 u64 bytes, packets, xdp_rx_drops;
2812 tx_ring = &adapter->tx_ring[i];
2815 start = u64_stats_fetch_begin(&tx_ring->syncp);
2816 packets = tx_ring->tx_stats.cnt;
2817 bytes = tx_ring->tx_stats.bytes;
2818 } while (u64_stats_fetch_retry(&tx_ring->syncp, start));
2820 stats->tx_packets += packets;
2821 stats->tx_bytes += bytes;
2823 /* In XDP there isn't an RX queue counterpart */
2824 if (ENA_IS_XDP_INDEX(adapter, i))
2827 rx_ring = &adapter->rx_ring[i];
2830 start = u64_stats_fetch_begin(&rx_ring->syncp);
2831 packets = rx_ring->rx_stats.cnt;
2832 bytes = rx_ring->rx_stats.bytes;
2833 xdp_rx_drops = rx_ring->rx_stats.xdp_drop;
2834 } while (u64_stats_fetch_retry(&rx_ring->syncp, start));
2836 stats->rx_packets += packets;
2837 stats->rx_bytes += bytes;
2838 total_xdp_rx_drops += xdp_rx_drops;
2842 start = u64_stats_fetch_begin(&adapter->syncp);
2843 rx_drops = adapter->dev_stats.rx_drops;
2844 tx_drops = adapter->dev_stats.tx_drops;
2845 } while (u64_stats_fetch_retry(&adapter->syncp, start));
2847 stats->rx_dropped = rx_drops + total_xdp_rx_drops;
2848 stats->tx_dropped = tx_drops;
2850 stats->multicast = 0;
2851 stats->collisions = 0;
2853 stats->rx_length_errors = 0;
2854 stats->rx_crc_errors = 0;
2855 stats->rx_frame_errors = 0;
2856 stats->rx_fifo_errors = 0;
2857 stats->rx_missed_errors = 0;
2858 stats->tx_window_errors = 0;
2860 stats->rx_errors = 0;
2861 stats->tx_errors = 0;
2864 static const struct net_device_ops ena_netdev_ops = {
2865 .ndo_open = ena_open,
2866 .ndo_stop = ena_close,
2867 .ndo_start_xmit = ena_start_xmit,
2868 .ndo_get_stats64 = ena_get_stats64,
2869 .ndo_tx_timeout = ena_tx_timeout,
2870 .ndo_change_mtu = ena_change_mtu,
2871 .ndo_validate_addr = eth_validate_addr,
2873 .ndo_xdp_xmit = ena_xdp_xmit,
2876 static int ena_calc_io_queue_size(struct ena_adapter *adapter,
2877 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2879 struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
2880 struct ena_com_dev *ena_dev = adapter->ena_dev;
2881 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
2882 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
2883 u32 max_tx_queue_size;
2884 u32 max_rx_queue_size;
2886 /* If this function is called after driver load, the ring sizes have already
2887 * been configured. Take it into account when recalculating ring size.
2889 if (adapter->tx_ring->ring_size)
2890 tx_queue_size = adapter->tx_ring->ring_size;
2892 if (adapter->rx_ring->ring_size)
2893 rx_queue_size = adapter->rx_ring->ring_size;
2895 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2896 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2897 &get_feat_ctx->max_queue_ext.max_queue_ext;
2898 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
2899 max_queue_ext->max_rx_sq_depth);
2900 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
2902 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2903 max_tx_queue_size = min_t(u32, max_tx_queue_size,
2904 llq->max_llq_depth);
2906 max_tx_queue_size = min_t(u32, max_tx_queue_size,
2907 max_queue_ext->max_tx_sq_depth);
2909 adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
2910 max_queue_ext->max_per_packet_tx_descs);
2911 adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
2912 max_queue_ext->max_per_packet_rx_descs);
2914 struct ena_admin_queue_feature_desc *max_queues =
2915 &get_feat_ctx->max_queues;
2916 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
2917 max_queues->max_sq_depth);
2918 max_tx_queue_size = max_queues->max_cq_depth;
2920 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2921 max_tx_queue_size = min_t(u32, max_tx_queue_size,
2922 llq->max_llq_depth);
2924 max_tx_queue_size = min_t(u32, max_tx_queue_size,
2925 max_queues->max_sq_depth);
2927 adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
2928 max_queues->max_packet_tx_descs);
2929 adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
2930 max_queues->max_packet_rx_descs);
2933 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
2934 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
2936 if (max_tx_queue_size < ENA_MIN_RING_SIZE) {
2937 netdev_err(adapter->netdev, "Device max TX queue size: %d < minimum: %d\n",
2938 max_tx_queue_size, ENA_MIN_RING_SIZE);
2942 if (max_rx_queue_size < ENA_MIN_RING_SIZE) {
2943 netdev_err(adapter->netdev, "Device max RX queue size: %d < minimum: %d\n",
2944 max_rx_queue_size, ENA_MIN_RING_SIZE);
2948 /* When forcing large headers, we multiply the entry size by 2, and therefore divide
2949 * the queue size by 2, leaving the amount of memory used by the queues unchanged.
2951 if (adapter->large_llq_header_enabled) {
2952 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
2953 ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2954 max_tx_queue_size /= 2;
2955 dev_info(&adapter->pdev->dev,
2956 "Forcing large headers and decreasing maximum TX queue size to %d\n",
2959 dev_err(&adapter->pdev->dev,
2960 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
2962 adapter->large_llq_header_enabled = false;
2966 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2968 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2971 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
2972 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
2974 adapter->max_tx_ring_size = max_tx_queue_size;
2975 adapter->max_rx_ring_size = max_rx_queue_size;
2976 adapter->requested_tx_ring_size = tx_queue_size;
2977 adapter->requested_rx_ring_size = rx_queue_size;
2982 static int ena_device_validate_params(struct ena_adapter *adapter,
2983 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2985 struct net_device *netdev = adapter->netdev;
2988 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
2991 netif_err(adapter, drv, netdev,
2992 "Error, mac address are different\n");
2996 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
2997 netif_err(adapter, drv, netdev,
2998 "Error, device max mtu is smaller than netdev MTU\n");
3005 static void set_default_llq_configurations(struct ena_adapter *adapter,
3006 struct ena_llq_configurations *llq_config,
3007 struct ena_admin_feature_llq_desc *llq)
3009 struct ena_com_dev *ena_dev = adapter->ena_dev;
3011 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3012 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3013 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3015 adapter->large_llq_header_supported =
3016 !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ));
3017 adapter->large_llq_header_supported &=
3018 !!(llq->entry_size_ctrl_supported &
3019 ENA_ADMIN_LIST_ENTRY_SIZE_256B);
3021 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
3022 adapter->large_llq_header_enabled) {
3023 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
3024 llq_config->llq_ring_entry_size_value = 256;
3026 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3027 llq_config->llq_ring_entry_size_value = 128;
3031 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3032 struct ena_com_dev *ena_dev,
3033 struct ena_admin_feature_llq_desc *llq,
3034 struct ena_llq_configurations *llq_default_configurations)
3037 u32 llq_feature_mask;
3039 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3040 if (!(ena_dev->supported_features & llq_feature_mask)) {
3041 dev_warn(&pdev->dev,
3042 "LLQ is not supported Fallback to host mode policy.\n");
3043 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3047 if (!ena_dev->mem_bar) {
3048 netdev_err(ena_dev->net_device,
3049 "LLQ is advertised as supported but device doesn't expose mem bar\n");
3050 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3054 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3057 "Failed to configure the device mode. Fallback to host mode policy.\n");
3058 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3064 static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3067 bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
3072 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3073 pci_resource_start(pdev, ENA_MEM_BAR),
3074 pci_resource_len(pdev, ENA_MEM_BAR));
3076 if (!ena_dev->mem_bar)
3082 static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
3083 struct ena_com_dev_get_features_ctx *get_feat_ctx,
3086 struct ena_com_dev *ena_dev = adapter->ena_dev;
3087 struct net_device *netdev = adapter->netdev;
3088 struct ena_llq_configurations llq_config;
3089 struct device *dev = &pdev->dev;
3090 bool readless_supported;
3095 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3097 dev_err(dev, "Failed to init mmio read less\n");
3101 /* The PCIe configuration space revision id indicate if mmio reg
3104 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3105 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3107 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
3109 dev_err(dev, "Can not reset device\n");
3110 goto err_mmio_read_less;
3113 rc = ena_com_validate_version(ena_dev);
3115 dev_err(dev, "Device version is too low\n");
3116 goto err_mmio_read_less;
3119 dma_width = ena_com_get_dma_width(ena_dev);
3120 if (dma_width < 0) {
3121 dev_err(dev, "Invalid dma width value %d", dma_width);
3123 goto err_mmio_read_less;
3126 rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
3128 dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
3129 goto err_mmio_read_less;
3132 /* ENA admin level init */
3133 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
3136 "Can not initialize ena admin queue with device\n");
3137 goto err_mmio_read_less;
3140 /* To enable the msix interrupts the driver needs to know the number
3141 * of queues. So the driver uses polling mode to retrieve this
3144 ena_com_set_admin_polling_mode(ena_dev, true);
3146 ena_config_host_info(ena_dev, pdev);
3148 /* Get Device Attributes*/
3149 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3151 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3152 goto err_admin_init;
3155 /* Try to turn all the available aenq groups */
3156 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3157 BIT(ENA_ADMIN_FATAL_ERROR) |
3158 BIT(ENA_ADMIN_WARNING) |
3159 BIT(ENA_ADMIN_NOTIFICATION) |
3160 BIT(ENA_ADMIN_KEEP_ALIVE);
3162 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3164 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3166 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3167 goto err_admin_init;
3170 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3172 set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq);
3174 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
3177 netdev_err(netdev, "Cannot set queues placement policy rc= %d\n", rc);
3178 goto err_admin_init;
3181 rc = ena_calc_io_queue_size(adapter, get_feat_ctx);
3183 goto err_admin_init;
3188 ena_com_abort_admin_commands(ena_dev);
3189 ena_com_wait_for_abort_completion(ena_dev);
3190 ena_com_delete_host_info(ena_dev);
3191 ena_com_admin_destroy(ena_dev);
3193 ena_com_mmio_reg_read_request_destroy(ena_dev);
3198 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
3200 struct ena_com_dev *ena_dev = adapter->ena_dev;
3201 struct device *dev = &adapter->pdev->dev;
3204 rc = ena_enable_msix(adapter);
3206 dev_err(dev, "Can not reserve msix vectors\n");
3210 ena_setup_mgmnt_intr(adapter);
3212 rc = ena_request_mgmnt_irq(adapter);
3214 dev_err(dev, "Can not setup management interrupts\n");
3215 goto err_disable_msix;
3218 ena_com_set_admin_polling_mode(ena_dev, false);
3220 ena_com_admin_aenq_enable(ena_dev);
3225 ena_disable_msix(adapter);
3230 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3232 struct net_device *netdev = adapter->netdev;
3233 struct ena_com_dev *ena_dev = adapter->ena_dev;
3236 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3239 netif_carrier_off(netdev);
3241 del_timer_sync(&adapter->timer_service);
3243 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
3244 adapter->dev_up_before_reset = dev_up;
3246 ena_com_set_admin_running_state(ena_dev, false);
3251 /* Stop the device from sending AENQ events (in case reset flag is set
3252 * and device is up, ena_down() already reset the device.
3254 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3255 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3257 ena_free_mgmnt_irq(adapter);
3259 ena_disable_msix(adapter);
3261 ena_com_abort_admin_commands(ena_dev);
3263 ena_com_wait_for_abort_completion(ena_dev);
3265 ena_com_admin_destroy(ena_dev);
3267 ena_com_mmio_reg_read_request_destroy(ena_dev);
3269 /* return reset reason to default value */
3270 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3272 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3273 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3276 static int ena_restore_device(struct ena_adapter *adapter)
3278 struct ena_com_dev_get_features_ctx get_feat_ctx;
3279 struct ena_com_dev *ena_dev = adapter->ena_dev;
3280 struct pci_dev *pdev = adapter->pdev;
3281 struct ena_ring *txr;
3285 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3286 rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, &wd_state);
3288 dev_err(&pdev->dev, "Can not initialize device\n");
3291 adapter->wd_state = wd_state;
3293 count = adapter->xdp_num_queues + adapter->num_io_queues;
3294 for (i = 0 ; i < count; i++) {
3295 txr = &adapter->tx_ring[i];
3296 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
3297 txr->tx_max_header_size = ena_dev->tx_max_header_size;
3300 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3302 dev_err(&pdev->dev, "Validation of device parameters failed\n");
3303 goto err_device_destroy;
3306 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3308 dev_err(&pdev->dev, "Enable MSI-X failed\n");
3309 goto err_device_destroy;
3311 /* If the interface was up before the reset bring it up */
3312 if (adapter->dev_up_before_reset) {
3313 rc = ena_up(adapter);
3315 dev_err(&pdev->dev, "Failed to create I/O queues\n");
3316 goto err_disable_msix;
3320 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3322 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3323 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3324 netif_carrier_on(adapter->netdev);
3326 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3327 adapter->last_keep_alive_jiffies = jiffies;
3331 ena_free_mgmnt_irq(adapter);
3332 ena_disable_msix(adapter);
3334 ena_com_abort_admin_commands(ena_dev);
3335 ena_com_wait_for_abort_completion(ena_dev);
3336 ena_com_admin_destroy(ena_dev);
3337 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3338 ena_com_mmio_reg_read_request_destroy(ena_dev);
3340 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3341 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3343 "Reset attempt failed. Can not reset the device\n");
3348 static void ena_fw_reset_device(struct work_struct *work)
3350 struct ena_adapter *adapter =
3351 container_of(work, struct ena_adapter, reset_task);
3355 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3356 ena_destroy_device(adapter, false);
3357 ena_restore_device(adapter);
3359 dev_err(&adapter->pdev->dev, "Device reset completed successfully\n");
3365 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3366 struct ena_ring *rx_ring)
3368 struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi);
3370 if (likely(READ_ONCE(ena_napi->first_interrupt)))
3373 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3376 rx_ring->no_interrupt_event_cnt++;
3378 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3379 netif_err(adapter, rx_err, adapter->netdev,
3380 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3383 ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3390 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3391 struct ena_ring *tx_ring)
3393 struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
3394 enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
3395 unsigned int time_since_last_napi;
3396 unsigned int missing_tx_comp_to;
3397 bool is_tx_comp_time_expired;
3398 struct ena_tx_buffer *tx_buf;
3399 unsigned long last_jiffies;
3404 missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
3406 for (i = 0; i < tx_ring->ring_size; i++) {
3407 tx_buf = &tx_ring->tx_buffer_info[i];
3408 last_jiffies = tx_buf->last_jiffies;
3410 if (last_jiffies == 0)
3411 /* no pending Tx at this location */
3414 is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
3415 2 * adapter->missing_tx_completion_to);
3417 if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) {
3418 /* If after graceful period interrupt is still not
3419 * received, we schedule a reset
3421 netif_err(adapter, tx_err, adapter->netdev,
3422 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3424 ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3428 is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
3429 adapter->missing_tx_completion_to);
3431 if (unlikely(is_tx_comp_time_expired)) {
3432 time_since_last_napi =
3433 jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
3434 napi_scheduled = !!(ena_napi->napi.state & NAPIF_STATE_SCHED);
3436 if (missing_tx_comp_to < time_since_last_napi && napi_scheduled) {
3437 /* We suspect napi isn't called because the
3438 * bottom half is not run. Require a bigger
3439 * timeout for these cases
3441 if (!time_is_before_jiffies(last_jiffies +
3442 2 * adapter->missing_tx_completion_to))
3445 reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
3450 if (tx_buf->print_once)
3453 netif_notice(adapter, tx_err, adapter->netdev,
3454 "TX hasn't completed, qid %d, index %d. %u usecs from last napi execution, napi scheduled: %d\n",
3455 tx_ring->qid, i, time_since_last_napi, napi_scheduled);
3457 tx_buf->print_once = 1;
3461 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3462 netif_err(adapter, tx_err, adapter->netdev,
3463 "Lost TX completions are above the threshold (%d > %d). Completion transmission timeout: %u.\n",
3465 adapter->missing_tx_completion_threshold,
3466 missing_tx_comp_to);
3467 netif_err(adapter, tx_err, adapter->netdev,
3468 "Resetting the device\n");
3470 ena_reset_device(adapter, reset_reason);
3474 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx,
3480 static void check_for_missing_completions(struct ena_adapter *adapter)
3482 struct ena_ring *tx_ring;
3483 struct ena_ring *rx_ring;
3487 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
3488 /* Make sure the driver doesn't turn the device in other process */
3491 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3494 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3497 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
3500 budget = ENA_MONITORED_TX_QUEUES;
3502 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
3503 tx_ring = &adapter->tx_ring[i];
3504 rx_ring = &adapter->rx_ring[i];
3506 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3510 rc = !ENA_IS_XDP_INDEX(adapter, i) ?
3511 check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
3520 adapter->last_monitored_tx_qid = i % io_queue_count;
3523 /* trigger napi schedule after 2 consecutive detections */
3524 #define EMPTY_RX_REFILL 2
3525 /* For the rare case where the device runs out of Rx descriptors and the
3526 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3528 * This case will lead to a deadlock:
3529 * The device won't send interrupts since all the new Rx packets will be dropped
3530 * The napi handler won't allocate new Rx descriptors so the device will be
3531 * able to send new packets.
3533 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3534 * It is recommended to have at least 512MB, with a minimum of 128MB for
3535 * constrained environment).
3537 * When such a situation is detected - Reschedule napi
3539 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
3541 struct ena_ring *rx_ring;
3542 int i, refill_required;
3544 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3547 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3550 for (i = 0; i < adapter->num_io_queues; i++) {
3551 rx_ring = &adapter->rx_ring[i];
3553 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3554 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3555 rx_ring->empty_rx_queue++;
3557 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3558 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1,
3561 netif_err(adapter, drv, adapter->netdev,
3562 "Trigger refill for ring %d\n", i);
3564 napi_schedule(rx_ring->napi);
3565 rx_ring->empty_rx_queue = 0;
3568 rx_ring->empty_rx_queue = 0;
3573 /* Check for keep alive expiration */
3574 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3576 unsigned long keep_alive_expired;
3578 if (!adapter->wd_state)
3581 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3584 keep_alive_expired = adapter->last_keep_alive_jiffies +
3585 adapter->keep_alive_timeout;
3586 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3587 netif_err(adapter, drv, adapter->netdev,
3588 "Keep alive watchdog timeout.\n");
3589 ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
3591 ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
3595 static void check_for_admin_com_state(struct ena_adapter *adapter)
3597 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3598 netif_err(adapter, drv, adapter->netdev,
3599 "ENA admin queue is not in running state!\n");
3600 ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
3602 ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO);
3606 static void ena_update_hints(struct ena_adapter *adapter,
3607 struct ena_admin_ena_hw_hints *hints)
3609 struct net_device *netdev = adapter->netdev;
3611 if (hints->admin_completion_tx_timeout)
3612 adapter->ena_dev->admin_queue.completion_timeout =
3613 hints->admin_completion_tx_timeout * 1000;
3615 if (hints->mmio_read_timeout)
3616 /* convert to usec */
3617 adapter->ena_dev->mmio_read.reg_read_to =
3618 hints->mmio_read_timeout * 1000;
3620 if (hints->missed_tx_completion_count_threshold_to_reset)
3621 adapter->missing_tx_completion_threshold =
3622 hints->missed_tx_completion_count_threshold_to_reset;
3624 if (hints->missing_tx_completion_timeout) {
3625 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3626 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3628 adapter->missing_tx_completion_to =
3629 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3632 if (hints->netdev_wd_timeout)
3633 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3635 if (hints->driver_watchdog_timeout) {
3636 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3637 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3639 adapter->keep_alive_timeout =
3640 msecs_to_jiffies(hints->driver_watchdog_timeout);
3644 static void ena_update_host_info(struct ena_admin_host_info *host_info,
3645 struct net_device *netdev)
3647 host_info->supported_network_features[0] =
3648 netdev->features & GENMASK_ULL(31, 0);
3649 host_info->supported_network_features[1] =
3650 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3653 static void ena_timer_service(struct timer_list *t)
3655 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
3656 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3657 struct ena_admin_host_info *host_info =
3658 adapter->ena_dev->host_attr.host_info;
3660 check_for_missing_keep_alive(adapter);
3662 check_for_admin_com_state(adapter);
3664 check_for_missing_completions(adapter);
3666 check_for_empty_rx_ring(adapter);
3669 ena_dump_stats_to_buf(adapter, debug_area);
3672 ena_update_host_info(host_info, adapter->netdev);
3674 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3675 netif_err(adapter, drv, adapter->netdev,
3676 "Trigger reset is on\n");
3677 ena_dump_stats_to_dmesg(adapter);
3678 queue_work(ena_wq, &adapter->reset_task);
3682 /* Reset the timer */
3683 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3686 static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
3687 struct ena_com_dev *ena_dev,
3688 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3690 u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
3692 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3693 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3694 &get_feat_ctx->max_queue_ext.max_queue_ext;
3695 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
3696 max_queue_ext->max_rx_cq_num);
3698 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3699 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3701 struct ena_admin_queue_feature_desc *max_queues =
3702 &get_feat_ctx->max_queues;
3703 io_tx_sq_num = max_queues->max_sq_num;
3704 io_tx_cq_num = max_queues->max_cq_num;
3705 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
3708 /* In case of LLQ use the llq fields for the tx SQ/CQ */
3709 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3710 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
3712 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3713 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
3714 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
3715 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
3716 /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
3717 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
3719 return max_num_io_queues;
3722 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3723 struct net_device *netdev)
3725 netdev_features_t dev_features = 0;
3727 /* Set offload features */
3728 if (feat->offload.tx &
3729 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3730 dev_features |= NETIF_F_IP_CSUM;
3732 if (feat->offload.tx &
3733 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3734 dev_features |= NETIF_F_IPV6_CSUM;
3736 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3737 dev_features |= NETIF_F_TSO;
3739 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3740 dev_features |= NETIF_F_TSO6;
3742 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3743 dev_features |= NETIF_F_TSO_ECN;
3745 if (feat->offload.rx_supported &
3746 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3747 dev_features |= NETIF_F_RXCSUM;
3749 if (feat->offload.rx_supported &
3750 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3751 dev_features |= NETIF_F_RXCSUM;
3759 netdev->hw_features |= netdev->features;
3760 netdev->vlan_features |= netdev->features;
3763 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3764 struct ena_com_dev_get_features_ctx *feat)
3766 struct net_device *netdev = adapter->netdev;
3768 /* Copy mac address */
3769 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3770 eth_hw_addr_random(netdev);
3771 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3773 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3774 eth_hw_addr_set(netdev, adapter->mac_addr);
3777 /* Set offload features */
3778 ena_set_dev_offloads(feat, netdev);
3780 adapter->max_mtu = feat->dev_attr.max_mtu;
3781 netdev->max_mtu = adapter->max_mtu;
3782 netdev->min_mtu = ENA_MIN_MTU;
3785 static int ena_rss_init_default(struct ena_adapter *adapter)
3787 struct ena_com_dev *ena_dev = adapter->ena_dev;
3788 struct device *dev = &adapter->pdev->dev;
3792 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3794 dev_err(dev, "Cannot init indirect table\n");
3798 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3799 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
3800 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3801 ENA_IO_RXQ_IDX(val));
3803 dev_err(dev, "Cannot fill indirect table\n");
3804 goto err_fill_indir;
3808 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE,
3810 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3811 dev_err(dev, "Cannot fill hash function\n");
3812 goto err_fill_indir;
3815 rc = ena_com_set_default_hash_ctrl(ena_dev);
3816 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3817 dev_err(dev, "Cannot fill hash control\n");
3818 goto err_fill_indir;
3824 ena_com_rss_destroy(ena_dev);
3830 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3832 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3834 pci_release_selected_regions(pdev, release_bars);
3837 /* ena_probe - Device Initialization Routine
3838 * @pdev: PCI device information struct
3839 * @ent: entry in ena_pci_tbl
3841 * Returns 0 on success, negative on failure
3843 * ena_probe initializes an adapter identified by a pci_dev structure.
3844 * The OS initialization, configuring of the adapter private structure,
3845 * and a hardware reset occur.
3847 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3849 struct ena_com_dev_get_features_ctx get_feat_ctx;
3850 struct ena_com_dev *ena_dev = NULL;
3851 struct ena_adapter *adapter;
3852 struct net_device *netdev;
3853 static int adapters_found;
3854 u32 max_num_io_queues;
3858 dev_dbg(&pdev->dev, "%s\n", __func__);
3860 rc = pci_enable_device_mem(pdev);
3862 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
3866 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
3868 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
3869 goto err_disable_device;
3872 pci_set_master(pdev);
3874 ena_dev = vzalloc(sizeof(*ena_dev));
3877 goto err_disable_device;
3880 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3881 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
3883 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
3885 goto err_free_ena_dev;
3888 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
3889 pci_resource_start(pdev, ENA_REG_BAR),
3890 pci_resource_len(pdev, ENA_REG_BAR));
3891 if (!ena_dev->reg_bar) {
3892 dev_err(&pdev->dev, "Failed to remap regs bar\n");
3894 goto err_free_region;
3897 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
3899 ena_dev->dmadev = &pdev->dev;
3901 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS);
3903 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
3905 goto err_free_region;
3908 SET_NETDEV_DEV(netdev, &pdev->dev);
3909 adapter = netdev_priv(netdev);
3910 adapter->ena_dev = ena_dev;
3911 adapter->netdev = netdev;
3912 adapter->pdev = pdev;
3913 adapter->msg_enable = DEFAULT_MSG_ENABLE;
3915 ena_dev->net_device = netdev;
3917 pci_set_drvdata(pdev, adapter);
3919 rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
3921 dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n");
3922 goto err_netdev_destroy;
3925 rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
3927 dev_err(&pdev->dev, "ENA device init failed\n");
3930 goto err_netdev_destroy;
3933 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
3934 * Updated during device initialization with the real granularity
3936 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
3937 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
3938 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
3939 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
3940 if (unlikely(!max_num_io_queues)) {
3942 goto err_device_destroy;
3945 ena_set_conf_feat_params(adapter, &get_feat_ctx);
3947 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3949 adapter->num_io_queues = max_num_io_queues;
3950 adapter->max_num_io_queues = max_num_io_queues;
3951 adapter->last_monitored_tx_qid = 0;
3953 adapter->xdp_first_ring = 0;
3954 adapter->xdp_num_queues = 0;
3956 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
3957 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3958 adapter->disable_meta_caching =
3959 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
3960 BIT(ENA_ADMIN_DISABLE_META_CACHING));
3962 adapter->wd_state = wd_state;
3964 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
3966 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
3969 "Failed to query interrupt moderation feature\n");
3970 goto err_device_destroy;
3973 ena_init_io_rings(adapter,
3975 adapter->xdp_num_queues +
3976 adapter->num_io_queues);
3978 netdev->netdev_ops = &ena_netdev_ops;
3979 netdev->watchdog_timeo = TX_TIMEOUT;
3980 ena_set_ethtool_ops(netdev);
3982 netdev->priv_flags |= IFF_UNICAST_FLT;
3984 u64_stats_init(&adapter->syncp);
3986 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3989 "Failed to enable and set the admin interrupts\n");
3990 goto err_worker_destroy;
3992 rc = ena_rss_init_default(adapter);
3993 if (rc && (rc != -EOPNOTSUPP)) {
3994 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
3998 ena_config_debug_area(adapter);
4000 if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
4001 netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
4002 NETDEV_XDP_ACT_REDIRECT;
4004 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4006 netif_carrier_off(netdev);
4008 rc = register_netdev(netdev);
4010 dev_err(&pdev->dev, "Cannot register net device\n");
4014 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4016 adapter->last_keep_alive_jiffies = jiffies;
4017 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4018 adapter->missing_tx_completion_to = TX_TIMEOUT;
4019 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4021 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
4023 timer_setup(&adapter->timer_service, ena_timer_service, 0);
4024 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
4026 dev_info(&pdev->dev,
4027 "%s found at mem %lx, mac addr %pM\n",
4028 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
4031 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4038 ena_com_delete_debug_area(ena_dev);
4039 ena_com_rss_destroy(ena_dev);
4041 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
4042 /* stop submitting admin commands on a device that was reset */
4043 ena_com_set_admin_running_state(ena_dev, false);
4044 ena_free_mgmnt_irq(adapter);
4045 ena_disable_msix(adapter);
4047 del_timer(&adapter->timer_service);
4049 ena_com_delete_host_info(ena_dev);
4050 ena_com_admin_destroy(ena_dev);
4052 free_netdev(netdev);
4054 ena_release_bars(ena_dev, pdev);
4058 pci_disable_device(pdev);
4062 /*****************************************************************************/
4064 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4065 * @pdev: PCI device information struct
4066 * @shutdown: Is it a shutdown operation? If false, means it is a removal
4068 * __ena_shutoff is a helper routine that does the real work on shutdown and
4069 * removal paths; the difference between those paths is with regards to whether
4070 * dettach or unregister the netdevice.
4072 static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
4074 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4075 struct ena_com_dev *ena_dev;
4076 struct net_device *netdev;
4078 ena_dev = adapter->ena_dev;
4079 netdev = adapter->netdev;
4081 #ifdef CONFIG_RFS_ACCEL
4082 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4083 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4084 netdev->rx_cpu_rmap = NULL;
4087 #endif /* CONFIG_RFS_ACCEL */
4088 /* Make sure timer and reset routine won't be called after
4089 * freeing device resources.
4091 del_timer_sync(&adapter->timer_service);
4092 cancel_work_sync(&adapter->reset_task);
4094 rtnl_lock(); /* lock released inside the below if-else block */
4095 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
4096 ena_destroy_device(adapter, true);
4099 netif_device_detach(netdev);
4104 unregister_netdev(netdev);
4105 free_netdev(netdev);
4108 ena_com_rss_destroy(ena_dev);
4110 ena_com_delete_debug_area(ena_dev);
4112 ena_com_delete_host_info(ena_dev);
4114 ena_release_bars(ena_dev, pdev);
4116 pci_disable_device(pdev);
4121 /* ena_remove - Device Removal Routine
4122 * @pdev: PCI device information struct
4124 * ena_remove is called by the PCI subsystem to alert the driver
4125 * that it should release a PCI device.
4128 static void ena_remove(struct pci_dev *pdev)
4130 __ena_shutoff(pdev, false);
4133 /* ena_shutdown - Device Shutdown Routine
4134 * @pdev: PCI device information struct
4136 * ena_shutdown is called by the PCI subsystem to alert the driver that
4137 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4140 static void ena_shutdown(struct pci_dev *pdev)
4142 __ena_shutoff(pdev, true);
4145 /* ena_suspend - PM suspend callback
4146 * @dev_d: Device information struct
4148 static int __maybe_unused ena_suspend(struct device *dev_d)
4150 struct pci_dev *pdev = to_pci_dev(dev_d);
4151 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4153 ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp);
4156 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4158 "Ignoring device reset request as the device is being suspended\n");
4159 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4161 ena_destroy_device(adapter, true);
4166 /* ena_resume - PM resume callback
4167 * @dev_d: Device information struct
4169 static int __maybe_unused ena_resume(struct device *dev_d)
4171 struct ena_adapter *adapter = dev_get_drvdata(dev_d);
4174 ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp);
4177 rc = ena_restore_device(adapter);
4182 static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume);
4184 static struct pci_driver ena_pci_driver = {
4185 .name = DRV_MODULE_NAME,
4186 .id_table = ena_pci_tbl,
4188 .remove = ena_remove,
4189 .shutdown = ena_shutdown,
4190 .driver.pm = &ena_pm_ops,
4191 .sriov_configure = pci_sriov_configure_simple,
4194 static int __init ena_init(void)
4198 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4200 pr_err("Failed to create workqueue\n");
4204 ret = pci_register_driver(&ena_pci_driver);
4206 destroy_workqueue(ena_wq);
4211 static void __exit ena_cleanup(void)
4213 pci_unregister_driver(&ena_pci_driver);
4216 destroy_workqueue(ena_wq);
4221 /******************************************************************************
4222 ******************************** AENQ Handlers *******************************
4223 *****************************************************************************/
4224 /* ena_update_on_link_change:
4225 * Notify the network interface about the change in link status
4227 static void ena_update_on_link_change(void *adapter_data,
4228 struct ena_admin_aenq_entry *aenq_e)
4230 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4231 struct ena_admin_aenq_link_change_desc *aenq_desc =
4232 (struct ena_admin_aenq_link_change_desc *)aenq_e;
4233 int status = aenq_desc->flags &
4234 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4237 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
4238 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4239 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4240 netif_carrier_on(adapter->netdev);
4242 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4243 netif_carrier_off(adapter->netdev);
4247 static void ena_keep_alive_wd(void *adapter_data,
4248 struct ena_admin_aenq_entry *aenq_e)
4250 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4251 struct ena_admin_aenq_keep_alive_desc *desc;
4255 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
4256 adapter->last_keep_alive_jiffies = jiffies;
4258 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
4259 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
4261 u64_stats_update_begin(&adapter->syncp);
4262 /* These stats are accumulated by the device, so the counters indicate
4263 * all drops since last reset.
4265 adapter->dev_stats.rx_drops = rx_drops;
4266 adapter->dev_stats.tx_drops = tx_drops;
4267 u64_stats_update_end(&adapter->syncp);
4270 static void ena_notification(void *adapter_data,
4271 struct ena_admin_aenq_entry *aenq_e)
4273 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4274 struct ena_admin_ena_hw_hints *hints;
4276 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4277 "Invalid group(%x) expected %x\n",
4278 aenq_e->aenq_common_desc.group,
4279 ENA_ADMIN_NOTIFICATION);
4281 switch (aenq_e->aenq_common_desc.syndrome) {
4282 case ENA_ADMIN_UPDATE_HINTS:
4283 hints = (struct ena_admin_ena_hw_hints *)
4284 (&aenq_e->inline_data_w4);
4285 ena_update_hints(adapter, hints);
4288 netif_err(adapter, drv, adapter->netdev,
4289 "Invalid aenq notification link state %d\n",
4290 aenq_e->aenq_common_desc.syndrome);
4294 /* This handler will called for unknown event group or unimplemented handlers*/
4295 static void unimplemented_aenq_handler(void *data,
4296 struct ena_admin_aenq_entry *aenq_e)
4298 struct ena_adapter *adapter = (struct ena_adapter *)data;
4300 netif_err(adapter, drv, adapter->netdev,
4301 "Unknown event was received or event with unimplemented handler\n");
4304 static struct ena_aenq_handlers aenq_handlers = {
4306 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4307 [ENA_ADMIN_NOTIFICATION] = ena_notification,
4308 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4310 .unimplemented_handler = unimplemented_aenq_handler
4313 module_init(ena_init);
4314 module_exit(ena_cleanup);