Commit | Line | Data |
---|---|---|
2246cbc2 | 1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
1738cd3e | 2 | /* |
2246cbc2 | 3 | * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. |
1738cd3e NB |
4 | */ |
5 | ||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
7 | ||
8 | #ifdef CONFIG_RFS_ACCEL | |
9 | #include <linux/cpu_rmap.h> | |
10 | #endif /* CONFIG_RFS_ACCEL */ | |
11 | #include <linux/ethtool.h> | |
1738cd3e NB |
12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> | |
1738cd3e NB |
14 | #include <linux/numa.h> |
15 | #include <linux/pci.h> | |
16 | #include <linux/utsname.h> | |
17 | #include <linux/version.h> | |
18 | #include <linux/vmalloc.h> | |
19 | #include <net/ip.h> | |
20 | ||
21 | #include "ena_netdev.h" | |
838c93dc | 22 | #include <linux/bpf_trace.h> |
1738cd3e NB |
23 | #include "ena_pci_id_tbl.h" |
24 | ||
1738cd3e NB |
25 | MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); |
26 | MODULE_DESCRIPTION(DEVICE_NAME); | |
27 | MODULE_LICENSE("GPL"); | |
1738cd3e NB |
28 | |
29 | /* Time in jiffies before concluding the transmitter is hung. */ | |
30 | #define TX_TIMEOUT (5 * HZ) | |
31 | ||
ce74496a SA |
32 | #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus()) |
33 | ||
1738cd3e NB |
34 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ |
35 | NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) | |
1738cd3e NB |
36 | |
37 | static struct ena_aenq_handlers aenq_handlers; | |
38 | ||
39 | static struct workqueue_struct *ena_wq; | |
40 | ||
41 | MODULE_DEVICE_TABLE(pci, ena_pci_tbl); | |
42 | ||
43 | static int ena_rss_init_default(struct ena_adapter *adapter); | |
ee4552aa | 44 | static void check_for_admin_com_state(struct ena_adapter *adapter); |
cfa324a5 | 45 | static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); |
ee4552aa | 46 | static int ena_restore_device(struct ena_adapter *adapter); |
548c4940 SJ |
47 | |
48 | static void ena_init_io_rings(struct ena_adapter *adapter, | |
49 | int first_index, int count); | |
50 | static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index, | |
51 | int count); | |
52 | static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index, | |
53 | int count); | |
54 | static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid); | |
55 | static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, | |
56 | int first_index, | |
57 | int count); | |
58 | static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid); | |
59 | static void ena_free_tx_resources(struct ena_adapter *adapter, int qid); | |
60 | static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget); | |
61 | static void ena_destroy_all_tx_queues(struct ena_adapter *adapter); | |
62 | static void ena_free_all_io_tx_resources(struct ena_adapter *adapter); | |
63 | static void ena_napi_disable_in_range(struct ena_adapter *adapter, | |
64 | int first_index, int count); | |
65 | static void ena_napi_enable_in_range(struct ena_adapter *adapter, | |
66 | int first_index, int count); | |
838c93dc | 67 | static int ena_up(struct ena_adapter *adapter); |
548c4940 SJ |
68 | static void ena_down(struct ena_adapter *adapter); |
69 | static void ena_unmask_interrupt(struct ena_ring *tx_ring, | |
70 | struct ena_ring *rx_ring); | |
71 | static void ena_update_ring_numa_node(struct ena_ring *tx_ring, | |
72 | struct ena_ring *rx_ring); | |
73 | static void ena_unmap_tx_buff(struct ena_ring *tx_ring, | |
74 | struct ena_tx_buffer *tx_info); | |
75 | static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, | |
76 | int first_index, int count); | |
1738cd3e | 77 | |
89dd735e SA |
78 | /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */ |
79 | static void ena_increase_stat(u64 *statp, u64 cnt, | |
80 | struct u64_stats_sync *syncp) | |
81 | { | |
82 | u64_stats_update_begin(syncp); | |
83 | (*statp) += cnt; | |
84 | u64_stats_update_end(syncp); | |
85 | } | |
86 | ||
9e8afb05 SA |
87 | static void ena_ring_tx_doorbell(struct ena_ring *tx_ring) |
88 | { | |
89 | ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); | |
90 | ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp); | |
91 | } | |
92 | ||
0290bd29 | 93 | static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) |
1738cd3e NB |
94 | { |
95 | struct ena_adapter *adapter = netdev_priv(dev); | |
96 | ||
3f6159db NB |
97 | /* Change the state of the device to trigger reset |
98 | * Check that we are not in the middle or a trigger already | |
99 | */ | |
100 | ||
101 | if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) | |
102 | return; | |
103 | ||
9fe890cc | 104 | ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD); |
89dd735e | 105 | ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp); |
1738cd3e NB |
106 | |
107 | netif_err(adapter, tx_err, dev, "Transmit time out\n"); | |
1738cd3e NB |
108 | } |
109 | ||
110 | static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) | |
111 | { | |
112 | int i; | |
113 | ||
faa615f9 | 114 | for (i = 0; i < adapter->num_io_queues; i++) |
1738cd3e NB |
115 | adapter->rx_ring[i].mtu = mtu; |
116 | } | |
117 | ||
118 | static int ena_change_mtu(struct net_device *dev, int new_mtu) | |
119 | { | |
120 | struct ena_adapter *adapter = netdev_priv(dev); | |
121 | int ret; | |
122 | ||
1738cd3e NB |
123 | ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); |
124 | if (!ret) { | |
bf2746e8 | 125 | netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu); |
1738cd3e NB |
126 | update_rx_ring_mtu(adapter, new_mtu); |
127 | dev->mtu = new_mtu; | |
128 | } else { | |
129 | netif_err(adapter, drv, dev, "Failed to set MTU to %d\n", | |
130 | new_mtu); | |
131 | } | |
132 | ||
133 | return ret; | |
134 | } | |
135 | ||
548c4940 SJ |
136 | static int ena_xmit_common(struct net_device *dev, |
137 | struct ena_ring *ring, | |
138 | struct ena_tx_buffer *tx_info, | |
139 | struct ena_com_tx_ctx *ena_tx_ctx, | |
140 | u16 next_to_use, | |
141 | u32 bytes) | |
142 | { | |
143 | struct ena_adapter *adapter = netdev_priv(dev); | |
144 | int rc, nb_hw_desc; | |
145 | ||
146 | if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq, | |
147 | ena_tx_ctx))) { | |
148 | netif_dbg(adapter, tx_queued, dev, | |
149 | "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", | |
150 | ring->qid); | |
9e8afb05 | 151 | ena_ring_tx_doorbell(ring); |
548c4940 SJ |
152 | } |
153 | ||
154 | /* prepare the packet's descriptors to dma engine */ | |
155 | rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx, | |
156 | &nb_hw_desc); | |
157 | ||
158 | /* In case there isn't enough space in the queue for the packet, | |
159 | * we simply drop it. All other failure reasons of | |
160 | * ena_com_prepare_tx() are fatal and therefore require a device reset. | |
161 | */ | |
162 | if (unlikely(rc)) { | |
163 | netif_err(adapter, tx_queued, dev, | |
bf2746e8 | 164 | "Failed to prepare tx bufs\n"); |
89dd735e SA |
165 | ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, |
166 | &ring->syncp); | |
9fe890cc AK |
167 | if (rc != -ENOMEM) |
168 | ena_reset_device(adapter, | |
169 | ENA_REGS_RESET_DRIVER_INVALID_STATE); | |
548c4940 SJ |
170 | return rc; |
171 | } | |
172 | ||
173 | u64_stats_update_begin(&ring->syncp); | |
174 | ring->tx_stats.cnt++; | |
175 | ring->tx_stats.bytes += bytes; | |
176 | u64_stats_update_end(&ring->syncp); | |
177 | ||
178 | tx_info->tx_descs = nb_hw_desc; | |
179 | tx_info->last_jiffies = jiffies; | |
180 | tx_info->print_once = 0; | |
181 | ||
182 | ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, | |
183 | ring->ring_size); | |
184 | return 0; | |
185 | } | |
186 | ||
187 | /* This is the XDP napi callback. XDP queues use a separate napi callback | |
188 | * than Rx/Tx queues. | |
189 | */ | |
190 | static int ena_xdp_io_poll(struct napi_struct *napi, int budget) | |
191 | { | |
192 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); | |
193 | u32 xdp_work_done, xdp_budget; | |
194 | struct ena_ring *xdp_ring; | |
195 | int napi_comp_call = 0; | |
196 | int ret; | |
197 | ||
198 | xdp_ring = ena_napi->xdp_ring; | |
199 | ||
200 | xdp_budget = budget; | |
201 | ||
202 | if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) || | |
203 | test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) { | |
204 | napi_complete_done(napi, 0); | |
205 | return 0; | |
206 | } | |
207 | ||
208 | xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget); | |
209 | ||
210 | /* If the device is about to reset or down, avoid unmask | |
211 | * the interrupt and return 0 so NAPI won't reschedule | |
212 | */ | |
213 | if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) { | |
214 | napi_complete_done(napi, 0); | |
215 | ret = 0; | |
216 | } else if (xdp_budget > xdp_work_done) { | |
217 | napi_comp_call = 1; | |
218 | if (napi_complete_done(napi, xdp_work_done)) | |
219 | ena_unmask_interrupt(xdp_ring, NULL); | |
220 | ena_update_ring_numa_node(xdp_ring, NULL); | |
221 | ret = xdp_work_done; | |
222 | } else { | |
223 | ret = xdp_budget; | |
224 | } | |
225 | ||
226 | u64_stats_update_begin(&xdp_ring->syncp); | |
227 | xdp_ring->tx_stats.napi_comp += napi_comp_call; | |
228 | xdp_ring->tx_stats.tx_poll++; | |
229 | u64_stats_update_end(&xdp_ring->syncp); | |
0ee251cd | 230 | xdp_ring->tx_stats.last_napi_jiffies = jiffies; |
548c4940 SJ |
231 | |
232 | return ret; | |
233 | } | |
234 | ||
e8223eef SA |
235 | static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, |
236 | struct ena_tx_buffer *tx_info, | |
237 | struct xdp_frame *xdpf, | |
504fd6a5 | 238 | struct ena_com_tx_ctx *ena_tx_ctx) |
548c4940 SJ |
239 | { |
240 | struct ena_adapter *adapter = xdp_ring->adapter; | |
241 | struct ena_com_buf *ena_buf; | |
504fd6a5 SA |
242 | int push_len = 0; |
243 | dma_addr_t dma; | |
244 | void *data; | |
548c4940 SJ |
245 | u32 size; |
246 | ||
e8223eef | 247 | tx_info->xdpf = xdpf; |
504fd6a5 | 248 | data = tx_info->xdpf->data; |
548c4940 | 249 | size = tx_info->xdpf->len; |
548c4940 | 250 | |
504fd6a5 SA |
251 | if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
252 | /* Designate part of the packet for LLQ */ | |
253 | push_len = min_t(u32, size, xdp_ring->tx_max_header_size); | |
548c4940 | 254 | |
504fd6a5 SA |
255 | ena_tx_ctx->push_header = data; |
256 | ||
257 | size -= push_len; | |
258 | data += push_len; | |
259 | } | |
260 | ||
261 | ena_tx_ctx->header_len = push_len; | |
262 | ||
263 | if (size > 0) { | |
548c4940 | 264 | dma = dma_map_single(xdp_ring->dev, |
504fd6a5 SA |
265 | data, |
266 | size, | |
548c4940 SJ |
267 | DMA_TO_DEVICE); |
268 | if (unlikely(dma_mapping_error(xdp_ring->dev, dma))) | |
269 | goto error_report_dma_error; | |
270 | ||
504fd6a5 | 271 | tx_info->map_linear_data = 0; |
548c4940 | 272 | |
504fd6a5 SA |
273 | ena_buf = tx_info->bufs; |
274 | ena_buf->paddr = dma; | |
275 | ena_buf->len = size; | |
276 | ||
277 | ena_tx_ctx->ena_bufs = ena_buf; | |
278 | ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1; | |
279 | } | |
548c4940 SJ |
280 | |
281 | return 0; | |
282 | ||
283 | error_report_dma_error: | |
89dd735e SA |
284 | ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1, |
285 | &xdp_ring->syncp); | |
bf2746e8 | 286 | netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n"); |
548c4940 | 287 | |
548c4940 SJ |
288 | return -EINVAL; |
289 | } | |
290 | ||
f1a25589 SA |
291 | static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, |
292 | struct net_device *dev, | |
e8223eef | 293 | struct xdp_frame *xdpf, |
f1a25589 | 294 | int flags) |
548c4940 | 295 | { |
79890d3f | 296 | struct ena_com_tx_ctx ena_tx_ctx = {}; |
548c4940 | 297 | struct ena_tx_buffer *tx_info; |
548c4940 | 298 | u16 next_to_use, req_id; |
f1a25589 | 299 | int rc; |
548c4940 | 300 | |
548c4940 SJ |
301 | next_to_use = xdp_ring->next_to_use; |
302 | req_id = xdp_ring->free_ids[next_to_use]; | |
303 | tx_info = &xdp_ring->tx_buffer_info[req_id]; | |
304 | tx_info->num_of_bufs = 0; | |
548c4940 | 305 | |
504fd6a5 | 306 | rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx); |
548c4940 | 307 | if (unlikely(rc)) |
fdc13979 | 308 | return rc; |
548c4940 | 309 | |
548c4940 | 310 | ena_tx_ctx.req_id = req_id; |
548c4940 SJ |
311 | |
312 | rc = ena_xmit_common(dev, | |
313 | xdp_ring, | |
314 | tx_info, | |
315 | &ena_tx_ctx, | |
316 | next_to_use, | |
e8223eef | 317 | xdpf->len); |
548c4940 SJ |
318 | if (rc) |
319 | goto error_unmap_dma; | |
9e8afb05 SA |
320 | |
321 | /* trigger the dma engine. ena_ring_tx_doorbell() | |
322 | * calls a memory barrier inside it. | |
548c4940 | 323 | */ |
9e8afb05 SA |
324 | if (flags & XDP_XMIT_FLUSH) |
325 | ena_ring_tx_doorbell(xdp_ring); | |
548c4940 | 326 | |
f1a25589 | 327 | return rc; |
548c4940 SJ |
328 | |
329 | error_unmap_dma: | |
330 | ena_unmap_tx_buff(xdp_ring, tx_info); | |
331 | tx_info->xdpf = NULL; | |
f1a25589 SA |
332 | return rc; |
333 | } | |
334 | ||
335 | static int ena_xdp_xmit(struct net_device *dev, int n, | |
336 | struct xdp_frame **frames, u32 flags) | |
337 | { | |
338 | struct ena_adapter *adapter = netdev_priv(dev); | |
f1a25589 | 339 | struct ena_ring *xdp_ring; |
fdc13979 | 340 | int qid, i, nxmit = 0; |
f1a25589 SA |
341 | |
342 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) | |
343 | return -EINVAL; | |
344 | ||
345 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | |
346 | return -ENETDOWN; | |
347 | ||
348 | /* We assume that all rings have the same XDP program */ | |
349 | if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog)) | |
350 | return -ENXIO; | |
351 | ||
352 | qid = smp_processor_id() % adapter->xdp_num_queues; | |
353 | qid += adapter->xdp_first_ring; | |
354 | xdp_ring = &adapter->tx_ring[qid]; | |
355 | ||
356 | /* Other CPU ids might try to send thorugh this queue */ | |
357 | spin_lock(&xdp_ring->xdp_tx_lock); | |
358 | ||
359 | for (i = 0; i < n; i++) { | |
fdc13979 LB |
360 | if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0)) |
361 | break; | |
362 | nxmit++; | |
f1a25589 SA |
363 | } |
364 | ||
365 | /* Ring doorbell to make device aware of the packets */ | |
9e8afb05 SA |
366 | if (flags & XDP_XMIT_FLUSH) |
367 | ena_ring_tx_doorbell(xdp_ring); | |
f1a25589 SA |
368 | |
369 | spin_unlock(&xdp_ring->xdp_tx_lock); | |
370 | ||
371 | /* Return number of packets sent */ | |
fdc13979 | 372 | return nxmit; |
548c4940 SJ |
373 | } |
374 | ||
e8223eef | 375 | static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) |
838c93dc | 376 | { |
59811faa | 377 | u32 verdict = ENA_XDP_PASS; |
838c93dc | 378 | struct bpf_prog *xdp_prog; |
f1a25589 | 379 | struct ena_ring *xdp_ring; |
e8223eef | 380 | struct xdp_frame *xdpf; |
4cd28b21 | 381 | u64 *xdp_stat; |
838c93dc | 382 | |
838c93dc SJ |
383 | xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); |
384 | ||
385 | if (!xdp_prog) | |
386 | goto out; | |
387 | ||
388 | verdict = bpf_prog_run_xdp(xdp_prog, xdp); | |
389 | ||
a318c70a SA |
390 | switch (verdict) { |
391 | case XDP_TX: | |
e8223eef SA |
392 | xdpf = xdp_convert_buff_to_frame(xdp); |
393 | if (unlikely(!xdpf)) { | |
394 | trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); | |
395 | xdp_stat = &rx_ring->rx_stats.xdp_aborted; | |
59811faa | 396 | verdict = ENA_XDP_DROP; |
a318c70a SA |
397 | break; |
398 | } | |
4cd28b21 | 399 | |
f1a25589 | 400 | /* Find xmit queue */ |
e4ac382e | 401 | xdp_ring = rx_ring->xdp_ring; |
f1a25589 SA |
402 | |
403 | /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ | |
404 | spin_lock(&xdp_ring->xdp_tx_lock); | |
405 | ||
fdc13979 LB |
406 | if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, |
407 | XDP_XMIT_FLUSH)) | |
408 | xdp_return_frame(xdpf); | |
f1a25589 SA |
409 | |
410 | spin_unlock(&xdp_ring->xdp_tx_lock); | |
a318c70a | 411 | xdp_stat = &rx_ring->rx_stats.xdp_tx; |
59811faa | 412 | verdict = ENA_XDP_TX; |
a318c70a SA |
413 | break; |
414 | case XDP_REDIRECT: | |
415 | if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { | |
416 | xdp_stat = &rx_ring->rx_stats.xdp_redirect; | |
59811faa | 417 | verdict = ENA_XDP_REDIRECT; |
a318c70a | 418 | break; |
e8223eef | 419 | } |
225353c0 SA |
420 | trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); |
421 | xdp_stat = &rx_ring->rx_stats.xdp_aborted; | |
59811faa | 422 | verdict = ENA_XDP_DROP; |
225353c0 | 423 | break; |
a318c70a | 424 | case XDP_ABORTED: |
838c93dc | 425 | trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); |
4cd28b21 | 426 | xdp_stat = &rx_ring->rx_stats.xdp_aborted; |
59811faa | 427 | verdict = ENA_XDP_DROP; |
a318c70a SA |
428 | break; |
429 | case XDP_DROP: | |
4cd28b21 | 430 | xdp_stat = &rx_ring->rx_stats.xdp_drop; |
59811faa | 431 | verdict = ENA_XDP_DROP; |
a318c70a SA |
432 | break; |
433 | case XDP_PASS: | |
4cd28b21 | 434 | xdp_stat = &rx_ring->rx_stats.xdp_pass; |
59811faa | 435 | verdict = ENA_XDP_PASS; |
a318c70a SA |
436 | break; |
437 | default: | |
c8064e5b | 438 | bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict); |
4cd28b21 | 439 | xdp_stat = &rx_ring->rx_stats.xdp_invalid; |
59811faa | 440 | verdict = ENA_XDP_DROP; |
4cd28b21 SJ |
441 | } |
442 | ||
89dd735e | 443 | ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); |
838c93dc | 444 | out: |
838c93dc SJ |
445 | return verdict; |
446 | } | |
447 | ||
548c4940 SJ |
448 | static void ena_init_all_xdp_queues(struct ena_adapter *adapter) |
449 | { | |
450 | adapter->xdp_first_ring = adapter->num_io_queues; | |
451 | adapter->xdp_num_queues = adapter->num_io_queues; | |
452 | ||
453 | ena_init_io_rings(adapter, | |
454 | adapter->xdp_first_ring, | |
455 | adapter->xdp_num_queues); | |
456 | } | |
457 | ||
458 | static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter) | |
459 | { | |
460 | int rc = 0; | |
461 | ||
462 | rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring, | |
463 | adapter->xdp_num_queues); | |
464 | if (rc) | |
465 | goto setup_err; | |
466 | ||
467 | rc = ena_create_io_tx_queues_in_range(adapter, | |
468 | adapter->xdp_first_ring, | |
469 | adapter->xdp_num_queues); | |
470 | if (rc) | |
471 | goto create_err; | |
472 | ||
473 | return 0; | |
474 | ||
475 | create_err: | |
476 | ena_free_all_io_tx_resources(adapter); | |
477 | setup_err: | |
478 | return rc; | |
479 | } | |
480 | ||
481 | /* Provides a way for both kernel and bpf-prog to know | |
482 | * more about the RX-queue a given XDP frame arrived on. | |
483 | */ | |
484 | static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring) | |
485 | { | |
486 | int rc; | |
487 | ||
b02e5a0e | 488 | rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0); |
548c4940 SJ |
489 | |
490 | if (rc) { | |
491 | netif_err(rx_ring->adapter, ifup, rx_ring->netdev, | |
492 | "Failed to register xdp rx queue info. RX queue num %d rc: %d\n", | |
493 | rx_ring->qid, rc); | |
494 | goto err; | |
495 | } | |
496 | ||
497 | rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, | |
498 | NULL); | |
499 | ||
500 | if (rc) { | |
501 | netif_err(rx_ring->adapter, ifup, rx_ring->netdev, | |
502 | "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n", | |
503 | rx_ring->qid, rc); | |
504 | xdp_rxq_info_unreg(&rx_ring->xdp_rxq); | |
505 | } | |
506 | ||
507 | err: | |
508 | return rc; | |
509 | } | |
510 | ||
511 | static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring) | |
512 | { | |
513 | xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq); | |
514 | xdp_rxq_info_unreg(&rx_ring->xdp_rxq); | |
515 | } | |
516 | ||
32109c70 Y |
517 | static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter, |
518 | struct bpf_prog *prog, | |
519 | int first, int count) | |
838c93dc | 520 | { |
9c9e5399 | 521 | struct bpf_prog *old_bpf_prog; |
838c93dc SJ |
522 | struct ena_ring *rx_ring; |
523 | int i = 0; | |
524 | ||
525 | for (i = first; i < count; i++) { | |
526 | rx_ring = &adapter->rx_ring[i]; | |
9c9e5399 DA |
527 | old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog); |
528 | ||
529 | if (!old_bpf_prog && prog) { | |
548c4940 | 530 | ena_xdp_register_rxq_info(rx_ring); |
838c93dc | 531 | rx_ring->rx_headroom = XDP_PACKET_HEADROOM; |
9c9e5399 | 532 | } else if (old_bpf_prog && !prog) { |
548c4940 | 533 | ena_xdp_unregister_rxq_info(rx_ring); |
9e5269a9 | 534 | rx_ring->rx_headroom = NET_SKB_PAD; |
548c4940 | 535 | } |
838c93dc SJ |
536 | } |
537 | } | |
538 | ||
32109c70 Y |
539 | static void ena_xdp_exchange_program(struct ena_adapter *adapter, |
540 | struct bpf_prog *prog) | |
838c93dc SJ |
541 | { |
542 | struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog); | |
543 | ||
544 | ena_xdp_exchange_program_rx_in_range(adapter, | |
545 | prog, | |
546 | 0, | |
547 | adapter->num_io_queues); | |
548 | ||
549 | if (old_bpf_prog) | |
550 | bpf_prog_put(old_bpf_prog); | |
551 | } | |
552 | ||
548c4940 SJ |
553 | static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter) |
554 | { | |
555 | bool was_up; | |
556 | int rc; | |
557 | ||
558 | was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
559 | ||
560 | if (was_up) | |
561 | ena_down(adapter); | |
562 | ||
563 | adapter->xdp_first_ring = 0; | |
564 | adapter->xdp_num_queues = 0; | |
565 | ena_xdp_exchange_program(adapter, NULL); | |
566 | if (was_up) { | |
567 | rc = ena_up(adapter); | |
568 | if (rc) | |
569 | return rc; | |
570 | } | |
571 | return 0; | |
572 | } | |
573 | ||
838c93dc SJ |
574 | static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf) |
575 | { | |
576 | struct ena_adapter *adapter = netdev_priv(netdev); | |
577 | struct bpf_prog *prog = bpf->prog; | |
548c4940 | 578 | struct bpf_prog *old_bpf_prog; |
838c93dc SJ |
579 | int rc, prev_mtu; |
580 | bool is_up; | |
581 | ||
582 | is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
548c4940 SJ |
583 | rc = ena_xdp_allowed(adapter); |
584 | if (rc == ENA_XDP_ALLOWED) { | |
585 | old_bpf_prog = adapter->xdp_bpf_prog; | |
586 | if (prog) { | |
587 | if (!is_up) { | |
588 | ena_init_all_xdp_queues(adapter); | |
589 | } else if (!old_bpf_prog) { | |
590 | ena_down(adapter); | |
591 | ena_init_all_xdp_queues(adapter); | |
592 | } | |
593 | ena_xdp_exchange_program(adapter, prog); | |
838c93dc | 594 | |
548c4940 SJ |
595 | if (is_up && !old_bpf_prog) { |
596 | rc = ena_up(adapter); | |
597 | if (rc) | |
598 | return rc; | |
599 | } | |
66c0e13a | 600 | xdp_features_set_redirect_target(netdev, false); |
548c4940 | 601 | } else if (old_bpf_prog) { |
66c0e13a | 602 | xdp_features_clear_redirect_target(netdev); |
548c4940 | 603 | rc = ena_destroy_and_free_all_xdp_queues(adapter); |
838c93dc SJ |
604 | if (rc) |
605 | return rc; | |
606 | } | |
838c93dc | 607 | |
548c4940 SJ |
608 | prev_mtu = netdev->max_mtu; |
609 | netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu; | |
610 | ||
611 | if (!old_bpf_prog) | |
612 | netif_info(adapter, drv, adapter->netdev, | |
bf2746e8 | 613 | "XDP program is set, changing the max_mtu from %d to %d", |
548c4940 SJ |
614 | prev_mtu, netdev->max_mtu); |
615 | ||
616 | } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) { | |
617 | netif_err(adapter, drv, adapter->netdev, | |
618 | "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on", | |
838c93dc | 619 | netdev->mtu, ENA_XDP_MAX_MTU); |
548c4940 SJ |
620 | NL_SET_ERR_MSG_MOD(bpf->extack, |
621 | "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info"); | |
622 | return -EINVAL; | |
623 | } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) { | |
624 | netif_err(adapter, drv, adapter->netdev, | |
625 | "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n", | |
626 | adapter->num_io_queues, adapter->max_num_io_queues); | |
627 | NL_SET_ERR_MSG_MOD(bpf->extack, | |
628 | "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info"); | |
838c93dc SJ |
629 | return -EINVAL; |
630 | } | |
631 | ||
632 | return 0; | |
633 | } | |
634 | ||
635 | /* This is the main xdp callback, it's used by the kernel to set/unset the xdp | |
636 | * program as well as to query the current xdp program id. | |
637 | */ | |
638 | static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf) | |
639 | { | |
838c93dc SJ |
640 | switch (bpf->command) { |
641 | case XDP_SETUP_PROG: | |
642 | return ena_xdp_set(netdev, bpf); | |
838c93dc SJ |
643 | default: |
644 | return -EINVAL; | |
645 | } | |
646 | return 0; | |
647 | } | |
648 | ||
1738cd3e NB |
649 | static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) |
650 | { | |
651 | #ifdef CONFIG_RFS_ACCEL | |
652 | u32 i; | |
653 | int rc; | |
654 | ||
faa615f9 | 655 | adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues); |
1738cd3e NB |
656 | if (!adapter->netdev->rx_cpu_rmap) |
657 | return -ENOMEM; | |
faa615f9 | 658 | for (i = 0; i < adapter->num_io_queues; i++) { |
1738cd3e NB |
659 | int irq_idx = ENA_IO_IRQ_IDX(i); |
660 | ||
661 | rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, | |
da6f4cf5 | 662 | pci_irq_vector(adapter->pdev, irq_idx)); |
1738cd3e NB |
663 | if (rc) { |
664 | free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); | |
665 | adapter->netdev->rx_cpu_rmap = NULL; | |
666 | return rc; | |
667 | } | |
668 | } | |
669 | #endif /* CONFIG_RFS_ACCEL */ | |
670 | return 0; | |
671 | } | |
672 | ||
673 | static void ena_init_io_rings_common(struct ena_adapter *adapter, | |
674 | struct ena_ring *ring, u16 qid) | |
675 | { | |
676 | ring->qid = qid; | |
677 | ring->pdev = adapter->pdev; | |
678 | ring->dev = &adapter->pdev->dev; | |
679 | ring->netdev = adapter->netdev; | |
680 | ring->napi = &adapter->ena_napi[qid].napi; | |
681 | ring->adapter = adapter; | |
682 | ring->ena_dev = adapter->ena_dev; | |
683 | ring->per_napi_packets = 0; | |
1738cd3e | 684 | ring->cpu = 0; |
a8ee104f | 685 | ring->numa_node = 0; |
8510e1a3 | 686 | ring->no_interrupt_event_cnt = 0; |
1738cd3e NB |
687 | u64_stats_init(&ring->syncp); |
688 | } | |
689 | ||
548c4940 SJ |
690 | static void ena_init_io_rings(struct ena_adapter *adapter, |
691 | int first_index, int count) | |
1738cd3e NB |
692 | { |
693 | struct ena_com_dev *ena_dev; | |
694 | struct ena_ring *txr, *rxr; | |
695 | int i; | |
696 | ||
697 | ena_dev = adapter->ena_dev; | |
698 | ||
548c4940 | 699 | for (i = first_index; i < first_index + count; i++) { |
1738cd3e NB |
700 | txr = &adapter->tx_ring[i]; |
701 | rxr = &adapter->rx_ring[i]; | |
702 | ||
548c4940 | 703 | /* TX common ring state */ |
1738cd3e | 704 | ena_init_io_rings_common(adapter, txr, i); |
1738cd3e NB |
705 | |
706 | /* TX specific ring state */ | |
13ca32a6 | 707 | txr->ring_size = adapter->requested_tx_ring_size; |
1738cd3e NB |
708 | txr->tx_max_header_size = ena_dev->tx_max_header_size; |
709 | txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; | |
710 | txr->sgl_size = adapter->max_tx_sgl_size; | |
711 | txr->smoothed_interval = | |
712 | ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); | |
0e3a3f6d | 713 | txr->disable_meta_caching = adapter->disable_meta_caching; |
f1a25589 | 714 | spin_lock_init(&txr->xdp_tx_lock); |
1738cd3e | 715 | |
548c4940 SJ |
716 | /* Don't init RX queues for xdp queues */ |
717 | if (!ENA_IS_XDP_INDEX(adapter, i)) { | |
718 | /* RX common ring state */ | |
719 | ena_init_io_rings_common(adapter, rxr, i); | |
720 | ||
721 | /* RX specific ring state */ | |
722 | rxr->ring_size = adapter->requested_rx_ring_size; | |
723 | rxr->rx_copybreak = adapter->rx_copybreak; | |
724 | rxr->sgl_size = adapter->max_rx_sgl_size; | |
725 | rxr->smoothed_interval = | |
726 | ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); | |
727 | rxr->empty_rx_queue = 0; | |
9e5269a9 | 728 | rxr->rx_headroom = NET_SKB_PAD; |
548c4940 | 729 | adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
e4ac382e | 730 | rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; |
548c4940 | 731 | } |
1738cd3e NB |
732 | } |
733 | } | |
734 | ||
735 | /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors) | |
736 | * @adapter: network interface device structure | |
737 | * @qid: queue index | |
738 | * | |
739 | * Return 0 on success, negative on failure | |
740 | */ | |
741 | static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) | |
742 | { | |
743 | struct ena_ring *tx_ring = &adapter->tx_ring[qid]; | |
744 | struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; | |
745 | int size, i, node; | |
746 | ||
747 | if (tx_ring->tx_buffer_info) { | |
748 | netif_err(adapter, ifup, | |
749 | adapter->netdev, "tx_buffer_info info is not NULL"); | |
750 | return -EEXIST; | |
751 | } | |
752 | ||
753 | size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; | |
754 | node = cpu_to_node(ena_irq->cpu); | |
755 | ||
756 | tx_ring->tx_buffer_info = vzalloc_node(size, node); | |
757 | if (!tx_ring->tx_buffer_info) { | |
758 | tx_ring->tx_buffer_info = vzalloc(size); | |
759 | if (!tx_ring->tx_buffer_info) | |
8ee8ee7f | 760 | goto err_tx_buffer_info; |
1738cd3e NB |
761 | } |
762 | ||
763 | size = sizeof(u16) * tx_ring->ring_size; | |
f9172498 SJ |
764 | tx_ring->free_ids = vzalloc_node(size, node); |
765 | if (!tx_ring->free_ids) { | |
766 | tx_ring->free_ids = vzalloc(size); | |
767 | if (!tx_ring->free_ids) | |
768 | goto err_tx_free_ids; | |
1738cd3e NB |
769 | } |
770 | ||
38005ca8 AK |
771 | size = tx_ring->tx_max_header_size; |
772 | tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); | |
773 | if (!tx_ring->push_buf_intermediate_buf) { | |
774 | tx_ring->push_buf_intermediate_buf = vzalloc(size); | |
8ee8ee7f SJ |
775 | if (!tx_ring->push_buf_intermediate_buf) |
776 | goto err_push_buf_intermediate_buf; | |
38005ca8 AK |
777 | } |
778 | ||
1738cd3e NB |
779 | /* Req id ring for TX out of order completions */ |
780 | for (i = 0; i < tx_ring->ring_size; i++) | |
f9172498 | 781 | tx_ring->free_ids[i] = i; |
1738cd3e NB |
782 | |
783 | /* Reset tx statistics */ | |
784 | memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); | |
785 | ||
786 | tx_ring->next_to_use = 0; | |
787 | tx_ring->next_to_clean = 0; | |
788 | tx_ring->cpu = ena_irq->cpu; | |
a8ee104f | 789 | tx_ring->numa_node = node; |
1738cd3e | 790 | return 0; |
8ee8ee7f SJ |
791 | |
792 | err_push_buf_intermediate_buf: | |
f9172498 SJ |
793 | vfree(tx_ring->free_ids); |
794 | tx_ring->free_ids = NULL; | |
795 | err_tx_free_ids: | |
8ee8ee7f SJ |
796 | vfree(tx_ring->tx_buffer_info); |
797 | tx_ring->tx_buffer_info = NULL; | |
798 | err_tx_buffer_info: | |
799 | return -ENOMEM; | |
1738cd3e NB |
800 | } |
801 | ||
802 | /* ena_free_tx_resources - Free I/O Tx Resources per Queue | |
803 | * @adapter: network interface device structure | |
804 | * @qid: queue index | |
805 | * | |
806 | * Free all transmit software resources | |
807 | */ | |
808 | static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) | |
809 | { | |
810 | struct ena_ring *tx_ring = &adapter->tx_ring[qid]; | |
811 | ||
812 | vfree(tx_ring->tx_buffer_info); | |
813 | tx_ring->tx_buffer_info = NULL; | |
814 | ||
f9172498 SJ |
815 | vfree(tx_ring->free_ids); |
816 | tx_ring->free_ids = NULL; | |
38005ca8 AK |
817 | |
818 | vfree(tx_ring->push_buf_intermediate_buf); | |
819 | tx_ring->push_buf_intermediate_buf = NULL; | |
1738cd3e NB |
820 | } |
821 | ||
548c4940 SJ |
822 | static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, |
823 | int first_index, | |
824 | int count) | |
1738cd3e NB |
825 | { |
826 | int i, rc = 0; | |
827 | ||
548c4940 | 828 | for (i = first_index; i < first_index + count; i++) { |
1738cd3e NB |
829 | rc = ena_setup_tx_resources(adapter, i); |
830 | if (rc) | |
831 | goto err_setup_tx; | |
832 | } | |
833 | ||
834 | return 0; | |
835 | ||
836 | err_setup_tx: | |
837 | ||
838 | netif_err(adapter, ifup, adapter->netdev, | |
839 | "Tx queue %d: allocation failed\n", i); | |
840 | ||
841 | /* rewind the index freeing the rings as we go */ | |
548c4940 | 842 | while (first_index < i--) |
1738cd3e NB |
843 | ena_free_tx_resources(adapter, i); |
844 | return rc; | |
845 | } | |
846 | ||
548c4940 SJ |
847 | static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, |
848 | int first_index, int count) | |
849 | { | |
850 | int i; | |
851 | ||
852 | for (i = first_index; i < first_index + count; i++) | |
853 | ena_free_tx_resources(adapter, i); | |
854 | } | |
855 | ||
1738cd3e NB |
856 | /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues |
857 | * @adapter: board private structure | |
858 | * | |
859 | * Free all transmit software resources | |
860 | */ | |
861 | static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) | |
862 | { | |
548c4940 SJ |
863 | ena_free_all_io_tx_resources_in_range(adapter, |
864 | 0, | |
865 | adapter->xdp_num_queues + | |
866 | adapter->num_io_queues); | |
1738cd3e NB |
867 | } |
868 | ||
869 | /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) | |
870 | * @adapter: network interface device structure | |
871 | * @qid: queue index | |
872 | * | |
873 | * Returns 0 on success, negative on failure | |
874 | */ | |
875 | static int ena_setup_rx_resources(struct ena_adapter *adapter, | |
876 | u32 qid) | |
877 | { | |
878 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; | |
879 | struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; | |
ad974bae | 880 | int size, node, i; |
1738cd3e NB |
881 | |
882 | if (rx_ring->rx_buffer_info) { | |
883 | netif_err(adapter, ifup, adapter->netdev, | |
884 | "rx_buffer_info is not NULL"); | |
885 | return -EEXIST; | |
886 | } | |
887 | ||
888 | /* alloc extra element so in rx path | |
889 | * we can always prefetch rx_info + 1 | |
890 | */ | |
891 | size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); | |
892 | node = cpu_to_node(ena_irq->cpu); | |
893 | ||
894 | rx_ring->rx_buffer_info = vzalloc_node(size, node); | |
895 | if (!rx_ring->rx_buffer_info) { | |
896 | rx_ring->rx_buffer_info = vzalloc(size); | |
897 | if (!rx_ring->rx_buffer_info) | |
898 | return -ENOMEM; | |
899 | } | |
900 | ||
ad974bae | 901 | size = sizeof(u16) * rx_ring->ring_size; |
f9172498 SJ |
902 | rx_ring->free_ids = vzalloc_node(size, node); |
903 | if (!rx_ring->free_ids) { | |
904 | rx_ring->free_ids = vzalloc(size); | |
905 | if (!rx_ring->free_ids) { | |
ad974bae | 906 | vfree(rx_ring->rx_buffer_info); |
8ee8ee7f | 907 | rx_ring->rx_buffer_info = NULL; |
ad974bae NB |
908 | return -ENOMEM; |
909 | } | |
910 | } | |
911 | ||
912 | /* Req id ring for receiving RX pkts out of order */ | |
913 | for (i = 0; i < rx_ring->ring_size; i++) | |
f9172498 | 914 | rx_ring->free_ids[i] = i; |
ad974bae | 915 | |
1738cd3e NB |
916 | /* Reset rx statistics */ |
917 | memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); | |
918 | ||
919 | rx_ring->next_to_clean = 0; | |
920 | rx_ring->next_to_use = 0; | |
921 | rx_ring->cpu = ena_irq->cpu; | |
a8ee104f | 922 | rx_ring->numa_node = node; |
1738cd3e NB |
923 | |
924 | return 0; | |
925 | } | |
926 | ||
927 | /* ena_free_rx_resources - Free I/O Rx Resources | |
928 | * @adapter: network interface device structure | |
929 | * @qid: queue index | |
930 | * | |
931 | * Free all receive software resources | |
932 | */ | |
933 | static void ena_free_rx_resources(struct ena_adapter *adapter, | |
934 | u32 qid) | |
935 | { | |
936 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; | |
937 | ||
938 | vfree(rx_ring->rx_buffer_info); | |
939 | rx_ring->rx_buffer_info = NULL; | |
ad974bae | 940 | |
f9172498 SJ |
941 | vfree(rx_ring->free_ids); |
942 | rx_ring->free_ids = NULL; | |
1738cd3e NB |
943 | } |
944 | ||
945 | /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues | |
946 | * @adapter: board private structure | |
947 | * | |
948 | * Return 0 on success, negative on failure | |
949 | */ | |
950 | static int ena_setup_all_rx_resources(struct ena_adapter *adapter) | |
951 | { | |
952 | int i, rc = 0; | |
953 | ||
faa615f9 | 954 | for (i = 0; i < adapter->num_io_queues; i++) { |
1738cd3e NB |
955 | rc = ena_setup_rx_resources(adapter, i); |
956 | if (rc) | |
957 | goto err_setup_rx; | |
958 | } | |
959 | ||
960 | return 0; | |
961 | ||
962 | err_setup_rx: | |
963 | ||
964 | netif_err(adapter, ifup, adapter->netdev, | |
965 | "Rx queue %d: allocation failed\n", i); | |
966 | ||
967 | /* rewind the index freeing the rings as we go */ | |
968 | while (i--) | |
969 | ena_free_rx_resources(adapter, i); | |
970 | return rc; | |
971 | } | |
972 | ||
973 | /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues | |
974 | * @adapter: board private structure | |
975 | * | |
976 | * Free all receive software resources | |
977 | */ | |
978 | static void ena_free_all_io_rx_resources(struct ena_adapter *adapter) | |
979 | { | |
980 | int i; | |
981 | ||
faa615f9 | 982 | for (i = 0; i < adapter->num_io_queues; i++) |
1738cd3e NB |
983 | ena_free_rx_resources(adapter, i); |
984 | } | |
985 | ||
6fb566c9 WY |
986 | static struct page *ena_alloc_map_page(struct ena_ring *rx_ring, |
987 | dma_addr_t *dma) | |
947c54c3 SA |
988 | { |
989 | struct page *page; | |
990 | ||
991 | /* This would allocate the page on the same NUMA node the executing code | |
992 | * is running on. | |
993 | */ | |
994 | page = dev_alloc_page(); | |
995 | if (!page) { | |
996 | ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, | |
997 | &rx_ring->syncp); | |
998 | return ERR_PTR(-ENOSPC); | |
999 | } | |
1000 | ||
1001 | /* To enable NIC-side port-mirroring, AKA SPAN port, | |
1002 | * we make the buffer readable from the nic as well | |
1003 | */ | |
1004 | *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, | |
1005 | DMA_BIDIRECTIONAL); | |
1006 | if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) { | |
1007 | ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, | |
1008 | &rx_ring->syncp); | |
1009 | __free_page(page); | |
1010 | return ERR_PTR(-EIO); | |
1011 | } | |
1012 | ||
1013 | return page; | |
1014 | } | |
1015 | ||
1016 | static int ena_alloc_rx_buffer(struct ena_ring *rx_ring, | |
1017 | struct ena_rx_buffer *rx_info) | |
1738cd3e | 1018 | { |
1396d314 | 1019 | int headroom = rx_ring->rx_headroom; |
1738cd3e NB |
1020 | struct ena_com_buf *ena_buf; |
1021 | struct page *page; | |
1022 | dma_addr_t dma; | |
9e5269a9 | 1023 | int tailroom; |
1738cd3e | 1024 | |
1396d314 | 1025 | /* restore page offset value in case it has been changed by device */ |
f7d625ad | 1026 | rx_info->buf_offset = headroom; |
1396d314 | 1027 | |
1738cd3e NB |
1028 | /* if previous allocated page is not used */ |
1029 | if (unlikely(rx_info->page)) | |
1030 | return 0; | |
1031 | ||
947c54c3 SA |
1032 | /* We handle DMA here */ |
1033 | page = ena_alloc_map_page(rx_ring, &dma); | |
1034 | if (unlikely(IS_ERR(page))) | |
1035 | return PTR_ERR(page); | |
1738cd3e | 1036 | |
1738cd3e | 1037 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
bf2746e8 | 1038 | "Allocate page %p, rx_info %p\n", page, rx_info); |
1738cd3e | 1039 | |
9e5269a9 SA |
1040 | tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
1041 | ||
1738cd3e | 1042 | rx_info->page = page; |
f7d625ad DA |
1043 | rx_info->dma_addr = dma; |
1044 | rx_info->page_offset = 0; | |
1738cd3e | 1045 | ena_buf = &rx_info->ena_buf; |
1396d314 | 1046 | ena_buf->paddr = dma + headroom; |
9e5269a9 | 1047 | ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom; |
1738cd3e NB |
1048 | |
1049 | return 0; | |
1050 | } | |
1051 | ||
f7d625ad DA |
1052 | static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring, |
1053 | struct ena_rx_buffer *rx_info, | |
1054 | unsigned long attrs) | |
a318c70a | 1055 | { |
f7d625ad DA |
1056 | dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, |
1057 | DMA_BIDIRECTIONAL, attrs); | |
a318c70a SA |
1058 | } |
1059 | ||
1738cd3e NB |
1060 | static void ena_free_rx_page(struct ena_ring *rx_ring, |
1061 | struct ena_rx_buffer *rx_info) | |
1062 | { | |
1063 | struct page *page = rx_info->page; | |
1738cd3e NB |
1064 | |
1065 | if (unlikely(!page)) { | |
1066 | netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, | |
1067 | "Trying to free unallocated buffer\n"); | |
1068 | return; | |
1069 | } | |
1070 | ||
f7d625ad | 1071 | ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0); |
1738cd3e NB |
1072 | |
1073 | __free_page(page); | |
1074 | rx_info->page = NULL; | |
1075 | } | |
1076 | ||
1077 | static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) | |
1078 | { | |
ad974bae | 1079 | u16 next_to_use, req_id; |
1738cd3e NB |
1080 | u32 i; |
1081 | int rc; | |
1082 | ||
1083 | next_to_use = rx_ring->next_to_use; | |
1084 | ||
1085 | for (i = 0; i < num; i++) { | |
ad974bae NB |
1086 | struct ena_rx_buffer *rx_info; |
1087 | ||
f9172498 | 1088 | req_id = rx_ring->free_ids[next_to_use]; |
ad974bae NB |
1089 | |
1090 | rx_info = &rx_ring->rx_buffer_info[req_id]; | |
1091 | ||
947c54c3 | 1092 | rc = ena_alloc_rx_buffer(rx_ring, rx_info); |
1738cd3e NB |
1093 | if (unlikely(rc < 0)) { |
1094 | netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, | |
bf2746e8 | 1095 | "Failed to allocate buffer for rx queue %d\n", |
1738cd3e NB |
1096 | rx_ring->qid); |
1097 | break; | |
1098 | } | |
1099 | rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, | |
1100 | &rx_info->ena_buf, | |
ad974bae | 1101 | req_id); |
1738cd3e NB |
1102 | if (unlikely(rc)) { |
1103 | netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, | |
bf2746e8 | 1104 | "Failed to add buffer for rx queue %d\n", |
1738cd3e NB |
1105 | rx_ring->qid); |
1106 | break; | |
1107 | } | |
1108 | next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, | |
1109 | rx_ring->ring_size); | |
1110 | } | |
1111 | ||
1112 | if (unlikely(i < num)) { | |
89dd735e SA |
1113 | ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1, |
1114 | &rx_ring->syncp); | |
f0525298 | 1115 | netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, |
bf2746e8 | 1116 | "Refilled rx qid %d with only %d buffers (from %d)\n", |
f0525298 | 1117 | rx_ring->qid, i, num); |
1738cd3e NB |
1118 | } |
1119 | ||
37dff155 NB |
1120 | /* ena_com_write_sq_doorbell issues a wmb() */ |
1121 | if (likely(i)) | |
1122 | ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); | |
1738cd3e NB |
1123 | |
1124 | rx_ring->next_to_use = next_to_use; | |
1125 | ||
1126 | return i; | |
1127 | } | |
1128 | ||
1129 | static void ena_free_rx_bufs(struct ena_adapter *adapter, | |
1130 | u32 qid) | |
1131 | { | |
1132 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; | |
1133 | u32 i; | |
1134 | ||
1135 | for (i = 0; i < rx_ring->ring_size; i++) { | |
1136 | struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; | |
1137 | ||
1138 | if (rx_info->page) | |
1139 | ena_free_rx_page(rx_ring, rx_info); | |
1140 | } | |
1141 | } | |
1142 | ||
1143 | /* ena_refill_all_rx_bufs - allocate all queues Rx buffers | |
1144 | * @adapter: board private structure | |
1738cd3e NB |
1145 | */ |
1146 | static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) | |
1147 | { | |
1148 | struct ena_ring *rx_ring; | |
1149 | int i, rc, bufs_num; | |
1150 | ||
faa615f9 | 1151 | for (i = 0; i < adapter->num_io_queues; i++) { |
1738cd3e NB |
1152 | rx_ring = &adapter->rx_ring[i]; |
1153 | bufs_num = rx_ring->ring_size - 1; | |
1154 | rc = ena_refill_rx_bufs(rx_ring, bufs_num); | |
1155 | ||
1156 | if (unlikely(rc != bufs_num)) | |
1157 | netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, | |
bf2746e8 | 1158 | "Refilling Queue %d failed. allocated %d buffers from: %d\n", |
1738cd3e NB |
1159 | i, rc, bufs_num); |
1160 | } | |
1161 | } | |
1162 | ||
1163 | static void ena_free_all_rx_bufs(struct ena_adapter *adapter) | |
1164 | { | |
1165 | int i; | |
1166 | ||
faa615f9 | 1167 | for (i = 0; i < adapter->num_io_queues; i++) |
1738cd3e NB |
1168 | ena_free_rx_bufs(adapter, i); |
1169 | } | |
1170 | ||
548c4940 SJ |
1171 | static void ena_unmap_tx_buff(struct ena_ring *tx_ring, |
1172 | struct ena_tx_buffer *tx_info) | |
38005ca8 AK |
1173 | { |
1174 | struct ena_com_buf *ena_buf; | |
1175 | u32 cnt; | |
1176 | int i; | |
1177 | ||
1178 | ena_buf = tx_info->bufs; | |
1179 | cnt = tx_info->num_of_bufs; | |
1180 | ||
1181 | if (unlikely(!cnt)) | |
1182 | return; | |
1183 | ||
1184 | if (tx_info->map_linear_data) { | |
1185 | dma_unmap_single(tx_ring->dev, | |
1186 | dma_unmap_addr(ena_buf, paddr), | |
1187 | dma_unmap_len(ena_buf, len), | |
1188 | DMA_TO_DEVICE); | |
1189 | ena_buf++; | |
1190 | cnt--; | |
1191 | } | |
1192 | ||
1193 | /* unmap remaining mapped pages */ | |
1194 | for (i = 0; i < cnt; i++) { | |
1195 | dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), | |
1196 | dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); | |
1197 | ena_buf++; | |
1198 | } | |
1199 | } | |
1200 | ||
1738cd3e NB |
1201 | /* ena_free_tx_bufs - Free Tx Buffers per Queue |
1202 | * @tx_ring: TX ring for which buffers be freed | |
1203 | */ | |
1204 | static void ena_free_tx_bufs(struct ena_ring *tx_ring) | |
1205 | { | |
5add6e4a | 1206 | bool print_once = true; |
1738cd3e NB |
1207 | u32 i; |
1208 | ||
1209 | for (i = 0; i < tx_ring->ring_size; i++) { | |
1210 | struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; | |
1738cd3e NB |
1211 | |
1212 | if (!tx_info->skb) | |
1213 | continue; | |
1214 | ||
5add6e4a | 1215 | if (print_once) { |
f0525298 | 1216 | netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev, |
bf2746e8 | 1217 | "Free uncompleted tx skb qid %d idx 0x%x\n", |
f0525298 | 1218 | tx_ring->qid, i); |
5add6e4a NB |
1219 | print_once = false; |
1220 | } else { | |
f0525298 | 1221 | netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev, |
bf2746e8 | 1222 | "Free uncompleted tx skb qid %d idx 0x%x\n", |
f0525298 | 1223 | tx_ring->qid, i); |
5add6e4a | 1224 | } |
1738cd3e | 1225 | |
548c4940 | 1226 | ena_unmap_tx_buff(tx_ring, tx_info); |
1738cd3e NB |
1227 | |
1228 | dev_kfree_skb_any(tx_info->skb); | |
1229 | } | |
1230 | netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, | |
1231 | tx_ring->qid)); | |
1232 | } | |
1233 | ||
1234 | static void ena_free_all_tx_bufs(struct ena_adapter *adapter) | |
1235 | { | |
1236 | struct ena_ring *tx_ring; | |
1237 | int i; | |
1238 | ||
548c4940 | 1239 | for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { |
1738cd3e NB |
1240 | tx_ring = &adapter->tx_ring[i]; |
1241 | ena_free_tx_bufs(tx_ring); | |
1242 | } | |
1243 | } | |
1244 | ||
1245 | static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) | |
1246 | { | |
1247 | u16 ena_qid; | |
1248 | int i; | |
1249 | ||
548c4940 | 1250 | for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { |
1738cd3e NB |
1251 | ena_qid = ENA_IO_TXQ_IDX(i); |
1252 | ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); | |
1253 | } | |
1254 | } | |
1255 | ||
1256 | static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) | |
1257 | { | |
1258 | u16 ena_qid; | |
1259 | int i; | |
1260 | ||
faa615f9 | 1261 | for (i = 0; i < adapter->num_io_queues; i++) { |
1738cd3e | 1262 | ena_qid = ENA_IO_RXQ_IDX(i); |
282faf61 | 1263 | cancel_work_sync(&adapter->ena_napi[i].dim.work); |
1738cd3e NB |
1264 | ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); |
1265 | } | |
1266 | } | |
1267 | ||
1268 | static void ena_destroy_all_io_queues(struct ena_adapter *adapter) | |
1269 | { | |
1270 | ena_destroy_all_tx_queues(adapter); | |
1271 | ena_destroy_all_rx_queues(adapter); | |
1272 | } | |
1273 | ||
548c4940 SJ |
1274 | static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, |
1275 | struct ena_tx_buffer *tx_info, bool is_xdp) | |
1276 | { | |
1277 | if (tx_info) | |
1278 | netif_err(ring->adapter, | |
1279 | tx_done, | |
1280 | ring->netdev, | |
9b648bb1 AK |
1281 | "tx_info doesn't have valid %s. qid %u req_id %u", |
1282 | is_xdp ? "xdp frame" : "skb", ring->qid, req_id); | |
548c4940 SJ |
1283 | else |
1284 | netif_err(ring->adapter, | |
1285 | tx_done, | |
1286 | ring->netdev, | |
9b648bb1 AK |
1287 | "Invalid req_id %u in qid %u\n", |
1288 | req_id, ring->qid); | |
548c4940 | 1289 | |
89dd735e | 1290 | ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp); |
9fe890cc | 1291 | ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID); |
548c4940 | 1292 | |
548c4940 SJ |
1293 | return -EFAULT; |
1294 | } | |
1295 | ||
1738cd3e NB |
1296 | static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) |
1297 | { | |
c255a34e | 1298 | struct ena_tx_buffer *tx_info; |
1738cd3e | 1299 | |
c255a34e AK |
1300 | tx_info = &tx_ring->tx_buffer_info[req_id]; |
1301 | if (likely(tx_info->skb)) | |
1302 | return 0; | |
1738cd3e | 1303 | |
548c4940 SJ |
1304 | return handle_invalid_req_id(tx_ring, req_id, tx_info, false); |
1305 | } | |
1738cd3e | 1306 | |
548c4940 SJ |
1307 | static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) |
1308 | { | |
c255a34e | 1309 | struct ena_tx_buffer *tx_info; |
1738cd3e | 1310 | |
c255a34e AK |
1311 | tx_info = &xdp_ring->tx_buffer_info[req_id]; |
1312 | if (likely(tx_info->xdpf)) | |
1313 | return 0; | |
548c4940 SJ |
1314 | |
1315 | return handle_invalid_req_id(xdp_ring, req_id, tx_info, true); | |
1738cd3e NB |
1316 | } |
1317 | ||
1318 | static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) | |
1319 | { | |
1320 | struct netdev_queue *txq; | |
1321 | bool above_thresh; | |
1322 | u32 tx_bytes = 0; | |
1323 | u32 total_done = 0; | |
1324 | u16 next_to_clean; | |
1325 | u16 req_id; | |
1326 | int tx_pkts = 0; | |
1327 | int rc; | |
1328 | ||
1329 | next_to_clean = tx_ring->next_to_clean; | |
1330 | txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); | |
1331 | ||
1332 | while (tx_pkts < budget) { | |
1333 | struct ena_tx_buffer *tx_info; | |
1334 | struct sk_buff *skb; | |
1738cd3e NB |
1335 | |
1336 | rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, | |
1337 | &req_id); | |
c255a34e AK |
1338 | if (rc) { |
1339 | if (unlikely(rc == -EINVAL)) | |
1340 | handle_invalid_req_id(tx_ring, req_id, NULL, | |
1341 | false); | |
1738cd3e | 1342 | break; |
c255a34e | 1343 | } |
1738cd3e | 1344 | |
c255a34e | 1345 | /* validate that the request id points to a valid skb */ |
1738cd3e NB |
1346 | rc = validate_tx_req_id(tx_ring, req_id); |
1347 | if (rc) | |
1348 | break; | |
1349 | ||
1350 | tx_info = &tx_ring->tx_buffer_info[req_id]; | |
1351 | skb = tx_info->skb; | |
1352 | ||
1353 | /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ | |
1354 | prefetch(&skb->end); | |
1355 | ||
1356 | tx_info->skb = NULL; | |
1357 | tx_info->last_jiffies = 0; | |
1358 | ||
548c4940 | 1359 | ena_unmap_tx_buff(tx_ring, tx_info); |
1738cd3e NB |
1360 | |
1361 | netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, | |
1362 | "tx_poll: q %d skb %p completed\n", tx_ring->qid, | |
1363 | skb); | |
1364 | ||
1365 | tx_bytes += skb->len; | |
1366 | dev_kfree_skb(skb); | |
1367 | tx_pkts++; | |
1368 | total_done += tx_info->tx_descs; | |
1369 | ||
f9172498 | 1370 | tx_ring->free_ids[next_to_clean] = req_id; |
1738cd3e NB |
1371 | next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, |
1372 | tx_ring->ring_size); | |
1373 | } | |
1374 | ||
1375 | tx_ring->next_to_clean = next_to_clean; | |
1376 | ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); | |
1377 | ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); | |
1378 | ||
1379 | netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); | |
1380 | ||
1381 | netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, | |
1382 | "tx_poll: q %d done. total pkts: %d\n", | |
1383 | tx_ring->qid, tx_pkts); | |
1384 | ||
1385 | /* need to make the rings circular update visible to | |
1386 | * ena_start_xmit() before checking for netif_queue_stopped(). | |
1387 | */ | |
1388 | smp_mb(); | |
1389 | ||
689b2bda AK |
1390 | above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, |
1391 | ENA_TX_WAKEUP_THRESH); | |
1738cd3e NB |
1392 | if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { |
1393 | __netif_tx_lock(txq, smp_processor_id()); | |
689b2bda AK |
1394 | above_thresh = |
1395 | ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, | |
1396 | ENA_TX_WAKEUP_THRESH); | |
a53651ec SJ |
1397 | if (netif_tx_queue_stopped(txq) && above_thresh && |
1398 | test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { | |
1738cd3e | 1399 | netif_tx_wake_queue(txq); |
89dd735e SA |
1400 | ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, |
1401 | &tx_ring->syncp); | |
1738cd3e NB |
1402 | } |
1403 | __netif_tx_unlock(txq); | |
1404 | } | |
1405 | ||
1738cd3e NB |
1406 | return tx_pkts; |
1407 | } | |
1408 | ||
f7d625ad | 1409 | static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len) |
4265114d NB |
1410 | { |
1411 | struct sk_buff *skb; | |
1412 | ||
9e5269a9 | 1413 | if (!first_frag) |
f7d625ad | 1414 | skb = napi_alloc_skb(rx_ring->napi, len); |
9e5269a9 | 1415 | else |
f7d625ad | 1416 | skb = napi_build_skb(first_frag, len); |
4265114d NB |
1417 | |
1418 | if (unlikely(!skb)) { | |
89dd735e SA |
1419 | ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, |
1420 | &rx_ring->syncp); | |
9e5269a9 | 1421 | |
4265114d | 1422 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
9e5269a9 SA |
1423 | "Failed to allocate skb. first_frag %s\n", |
1424 | first_frag ? "provided" : "not provided"); | |
4265114d NB |
1425 | } |
1426 | ||
1427 | return skb; | |
1428 | } | |
1429 | ||
f7d625ad DA |
1430 | static bool ena_try_rx_buf_page_reuse(struct ena_rx_buffer *rx_info, u16 buf_len, |
1431 | u16 len, int pkt_offset) | |
1432 | { | |
1433 | struct ena_com_buf *ena_buf = &rx_info->ena_buf; | |
1434 | ||
1435 | /* More than ENA_MIN_RX_BUF_SIZE left in the reused buffer | |
1436 | * for data + headroom + tailroom. | |
1437 | */ | |
1438 | if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) { | |
1439 | page_ref_inc(rx_info->page); | |
1440 | rx_info->page_offset += buf_len; | |
1441 | ena_buf->paddr += buf_len; | |
1442 | ena_buf->len -= buf_len; | |
1443 | return true; | |
1444 | } | |
1445 | ||
1446 | return false; | |
1447 | } | |
1448 | ||
1738cd3e NB |
1449 | static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, |
1450 | struct ena_com_rx_buf_info *ena_bufs, | |
1451 | u32 descs, | |
1452 | u16 *next_to_clean) | |
1453 | { | |
f7d625ad DA |
1454 | int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
1455 | bool is_xdp_loaded = ena_xdp_present_ring(rx_ring); | |
ad974bae | 1456 | struct ena_rx_buffer *rx_info; |
cb3d4f98 | 1457 | struct ena_adapter *adapter; |
f7d625ad DA |
1458 | int page_offset, pkt_offset; |
1459 | dma_addr_t pre_reuse_paddr; | |
ad974bae | 1460 | u16 len, req_id, buf = 0; |
f7d625ad | 1461 | bool reuse_rx_buf_page; |
a01f2cd0 | 1462 | struct sk_buff *skb; |
f7d625ad DA |
1463 | void *buf_addr; |
1464 | int buf_offset; | |
1465 | u16 buf_len; | |
1738cd3e | 1466 | |
ad974bae NB |
1467 | len = ena_bufs[buf].len; |
1468 | req_id = ena_bufs[buf].req_id; | |
30623e1e | 1469 | |
ad974bae NB |
1470 | rx_info = &rx_ring->rx_buffer_info[req_id]; |
1471 | ||
1738cd3e | 1472 | if (unlikely(!rx_info->page)) { |
cb3d4f98 AK |
1473 | adapter = rx_ring->adapter; |
1474 | netif_err(adapter, rx_err, rx_ring->netdev, | |
1475 | "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id); | |
1476 | ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); | |
9fe890cc | 1477 | ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID); |
1738cd3e NB |
1478 | return NULL; |
1479 | } | |
1480 | ||
1481 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
1482 | "rx_info %p page %p\n", | |
1483 | rx_info, rx_info->page); | |
1484 | ||
f7d625ad DA |
1485 | buf_offset = rx_info->buf_offset; |
1486 | pkt_offset = buf_offset - rx_ring->rx_headroom; | |
9e5269a9 | 1487 | page_offset = rx_info->page_offset; |
f7d625ad | 1488 | buf_addr = page_address(rx_info->page) + page_offset; |
1738cd3e NB |
1489 | |
1490 | if (len <= rx_ring->rx_copybreak) { | |
f7d625ad | 1491 | skb = ena_alloc_skb(rx_ring, NULL, len); |
4265114d | 1492 | if (unlikely(!skb)) |
1738cd3e | 1493 | return NULL; |
1738cd3e | 1494 | |
1738cd3e NB |
1495 | /* sync this buffer for CPU use */ |
1496 | dma_sync_single_for_cpu(rx_ring->dev, | |
f7d625ad | 1497 | dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, |
1738cd3e NB |
1498 | len, |
1499 | DMA_FROM_DEVICE); | |
f7d625ad | 1500 | skb_copy_to_linear_data(skb, buf_addr + buf_offset, len); |
1738cd3e | 1501 | dma_sync_single_for_device(rx_ring->dev, |
f7d625ad | 1502 | dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, |
1738cd3e NB |
1503 | len, |
1504 | DMA_FROM_DEVICE); | |
1505 | ||
1506 | skb_put(skb, len); | |
f7d625ad DA |
1507 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
1508 | "RX allocated small packet. len %d.\n", skb->len); | |
1738cd3e | 1509 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); |
f9172498 | 1510 | rx_ring->free_ids[*next_to_clean] = req_id; |
1738cd3e NB |
1511 | *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, |
1512 | rx_ring->ring_size); | |
1513 | return skb; | |
1514 | } | |
1515 | ||
f7d625ad DA |
1516 | buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom); |
1517 | ||
1518 | pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr); | |
1519 | ||
1520 | /* If XDP isn't loaded try to reuse part of the RX buffer */ | |
1521 | reuse_rx_buf_page = !is_xdp_loaded && | |
1522 | ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset); | |
9e5269a9 | 1523 | |
f7d625ad DA |
1524 | dma_sync_single_for_cpu(rx_ring->dev, |
1525 | pre_reuse_paddr + pkt_offset, | |
1526 | len, | |
1527 | DMA_FROM_DEVICE); | |
1528 | ||
1529 | if (!reuse_rx_buf_page) | |
1530 | ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); | |
1531 | ||
1532 | skb = ena_alloc_skb(rx_ring, buf_addr, buf_len); | |
4265114d | 1533 | if (unlikely(!skb)) |
1738cd3e | 1534 | return NULL; |
1738cd3e | 1535 | |
9e5269a9 | 1536 | /* Populate skb's linear part */ |
f7d625ad | 1537 | skb_reserve(skb, buf_offset); |
9e5269a9 SA |
1538 | skb_put(skb, len); |
1539 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); | |
1738cd3e | 1540 | |
9e5269a9 | 1541 | do { |
1738cd3e | 1542 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
bf2746e8 | 1543 | "RX skb updated. len %d. data_len %d\n", |
1738cd3e NB |
1544 | skb->len, skb->data_len); |
1545 | ||
f7d625ad DA |
1546 | if (!reuse_rx_buf_page) |
1547 | rx_info->page = NULL; | |
ad974bae | 1548 | |
f9172498 | 1549 | rx_ring->free_ids[*next_to_clean] = req_id; |
1738cd3e NB |
1550 | *next_to_clean = |
1551 | ENA_RX_RING_IDX_NEXT(*next_to_clean, | |
1552 | rx_ring->ring_size); | |
1553 | if (likely(--descs == 0)) | |
1554 | break; | |
ad974bae NB |
1555 | |
1556 | buf++; | |
1557 | len = ena_bufs[buf].len; | |
1558 | req_id = ena_bufs[buf].req_id; | |
30623e1e | 1559 | |
ad974bae | 1560 | rx_info = &rx_ring->rx_buffer_info[req_id]; |
9e5269a9 | 1561 | |
f7d625ad DA |
1562 | /* rx_info->buf_offset includes rx_ring->rx_headroom */ |
1563 | buf_offset = rx_info->buf_offset; | |
1564 | pkt_offset = buf_offset - rx_ring->rx_headroom; | |
1565 | buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom); | |
1566 | page_offset = rx_info->page_offset; | |
1567 | ||
1568 | pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr); | |
1569 | ||
1570 | reuse_rx_buf_page = !is_xdp_loaded && | |
1571 | ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset); | |
1572 | ||
1573 | dma_sync_single_for_cpu(rx_ring->dev, | |
1574 | pre_reuse_paddr + pkt_offset, | |
1575 | len, | |
1576 | DMA_FROM_DEVICE); | |
1577 | ||
1578 | if (!reuse_rx_buf_page) | |
1579 | ena_unmap_rx_buff_attrs(rx_ring, rx_info, | |
1580 | DMA_ATTR_SKIP_CPU_SYNC); | |
9e5269a9 SA |
1581 | |
1582 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, | |
f7d625ad | 1583 | page_offset + buf_offset, len, buf_len); |
9e5269a9 | 1584 | |
1738cd3e NB |
1585 | } while (1); |
1586 | ||
1587 | return skb; | |
1588 | } | |
1589 | ||
1590 | /* ena_rx_checksum - indicate in skb if hw indicated a good cksum | |
1591 | * @adapter: structure containing adapter specific data | |
1592 | * @ena_rx_ctx: received packet context/metadata | |
1593 | * @skb: skb currently being received and modified | |
1594 | */ | |
c2b54204 | 1595 | static void ena_rx_checksum(struct ena_ring *rx_ring, |
1738cd3e NB |
1596 | struct ena_com_rx_ctx *ena_rx_ctx, |
1597 | struct sk_buff *skb) | |
1598 | { | |
1599 | /* Rx csum disabled */ | |
1600 | if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { | |
1601 | skb->ip_summed = CHECKSUM_NONE; | |
1602 | return; | |
1603 | } | |
1604 | ||
1605 | /* For fragmented packets the checksum isn't valid */ | |
1606 | if (ena_rx_ctx->frag) { | |
1607 | skb->ip_summed = CHECKSUM_NONE; | |
1608 | return; | |
1609 | } | |
1610 | ||
1611 | /* if IP and error */ | |
1612 | if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && | |
1613 | (ena_rx_ctx->l3_csum_err))) { | |
1614 | /* ipv4 checksum error */ | |
1615 | skb->ip_summed = CHECKSUM_NONE; | |
d0e8831d | 1616 | ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, |
89dd735e | 1617 | &rx_ring->syncp); |
cd7aea18 | 1618 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
1738cd3e NB |
1619 | "RX IPv4 header checksum error\n"); |
1620 | return; | |
1621 | } | |
1622 | ||
1623 | /* if TCP/UDP */ | |
1624 | if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || | |
1625 | (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { | |
1626 | if (unlikely(ena_rx_ctx->l4_csum_err)) { | |
1627 | /* TCP/UDP checksum error */ | |
d0e8831d | 1628 | ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, |
89dd735e | 1629 | &rx_ring->syncp); |
cd7aea18 | 1630 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
1738cd3e NB |
1631 | "RX L4 checksum error\n"); |
1632 | skb->ip_summed = CHECKSUM_NONE; | |
1633 | return; | |
1634 | } | |
1635 | ||
cb36bb36 AK |
1636 | if (likely(ena_rx_ctx->l4_csum_checked)) { |
1637 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
89dd735e SA |
1638 | ena_increase_stat(&rx_ring->rx_stats.csum_good, 1, |
1639 | &rx_ring->syncp); | |
cb36bb36 | 1640 | } else { |
89dd735e SA |
1641 | ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1, |
1642 | &rx_ring->syncp); | |
cb36bb36 AK |
1643 | skb->ip_summed = CHECKSUM_NONE; |
1644 | } | |
1645 | } else { | |
1646 | skb->ip_summed = CHECKSUM_NONE; | |
1647 | return; | |
1738cd3e | 1648 | } |
cb36bb36 | 1649 | |
1738cd3e NB |
1650 | } |
1651 | ||
1652 | static void ena_set_rx_hash(struct ena_ring *rx_ring, | |
1653 | struct ena_com_rx_ctx *ena_rx_ctx, | |
1654 | struct sk_buff *skb) | |
1655 | { | |
1656 | enum pkt_hash_types hash_type; | |
1657 | ||
1658 | if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { | |
1659 | if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || | |
1660 | (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) | |
1661 | ||
1662 | hash_type = PKT_HASH_TYPE_L4; | |
1663 | else | |
1664 | hash_type = PKT_HASH_TYPE_NONE; | |
1665 | ||
1666 | /* Override hash type if the packet is fragmented */ | |
1667 | if (ena_rx_ctx->frag) | |
1668 | hash_type = PKT_HASH_TYPE_NONE; | |
1669 | ||
1670 | skb_set_hash(skb, ena_rx_ctx->hash, hash_type); | |
1671 | } | |
1672 | } | |
1673 | ||
32109c70 | 1674 | static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) |
838c93dc SJ |
1675 | { |
1676 | struct ena_rx_buffer *rx_info; | |
1677 | int ret; | |
1678 | ||
1679 | rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; | |
be9df4af | 1680 | xdp_prepare_buff(xdp, page_address(rx_info->page), |
f7d625ad | 1681 | rx_info->buf_offset, |
be9df4af | 1682 | rx_ring->ena_bufs[0].len, false); |
838c93dc SJ |
1683 | /* If for some reason we received a bigger packet than |
1684 | * we expect, then we simply drop it | |
1685 | */ | |
1686 | if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) | |
59811faa | 1687 | return ENA_XDP_DROP; |
838c93dc | 1688 | |
e8223eef | 1689 | ret = ena_xdp_execute(rx_ring, xdp); |
838c93dc SJ |
1690 | |
1691 | /* The xdp program might expand the headers */ | |
59811faa | 1692 | if (ret == ENA_XDP_PASS) { |
f7d625ad | 1693 | rx_info->buf_offset = xdp->data - xdp->data_hard_start; |
838c93dc SJ |
1694 | rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; |
1695 | } | |
1696 | ||
1697 | return ret; | |
1698 | } | |
1738cd3e NB |
1699 | /* ena_clean_rx_irq - Cleanup RX irq |
1700 | * @rx_ring: RX ring to clean | |
1701 | * @napi: napi handler | |
1702 | * @budget: how many packets driver is allowed to clean | |
1703 | * | |
1704 | * Returns the number of cleaned buffers. | |
1705 | */ | |
1706 | static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, | |
1707 | u32 budget) | |
1708 | { | |
1709 | u16 next_to_clean = rx_ring->next_to_clean; | |
1738cd3e | 1710 | struct ena_com_rx_ctx ena_rx_ctx; |
68f236df | 1711 | struct ena_rx_buffer *rx_info; |
1738cd3e | 1712 | struct ena_adapter *adapter; |
548c4940 | 1713 | u32 res_budget, work_done; |
838c93dc SJ |
1714 | int rx_copybreak_pkt = 0; |
1715 | int refill_threshold; | |
1738cd3e NB |
1716 | struct sk_buff *skb; |
1717 | int refill_required; | |
838c93dc | 1718 | struct xdp_buff xdp; |
a318c70a | 1719 | int xdp_flags = 0; |
1738cd3e | 1720 | int total_len = 0; |
838c93dc SJ |
1721 | int xdp_verdict; |
1722 | int rc = 0; | |
ad974bae | 1723 | int i; |
1738cd3e NB |
1724 | |
1725 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
1726 | "%s qid %d\n", __func__, rx_ring->qid); | |
1727 | res_budget = budget; | |
43b5169d | 1728 | xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq); |
548c4940 | 1729 | |
1738cd3e | 1730 | do { |
59811faa | 1731 | xdp_verdict = ENA_XDP_PASS; |
838c93dc | 1732 | skb = NULL; |
1738cd3e NB |
1733 | ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; |
1734 | ena_rx_ctx.max_bufs = rx_ring->sgl_size; | |
1735 | ena_rx_ctx.descs = 0; | |
68f236df | 1736 | ena_rx_ctx.pkt_offset = 0; |
1738cd3e NB |
1737 | rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, |
1738 | rx_ring->ena_com_io_sq, | |
1739 | &ena_rx_ctx); | |
1740 | if (unlikely(rc)) | |
1741 | goto error; | |
1742 | ||
1743 | if (unlikely(ena_rx_ctx.descs == 0)) | |
1744 | break; | |
1745 | ||
1396d314 | 1746 | /* First descriptor might have an offset set by the device */ |
68f236df | 1747 | rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; |
f7d625ad | 1748 | rx_info->buf_offset += ena_rx_ctx.pkt_offset; |
68f236df | 1749 | |
1738cd3e NB |
1750 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
1751 | "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", | |
1752 | rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, | |
1753 | ena_rx_ctx.l4_proto, ena_rx_ctx.hash); | |
1754 | ||
838c93dc SJ |
1755 | if (ena_xdp_present_ring(rx_ring)) |
1756 | xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); | |
1757 | ||
1738cd3e | 1758 | /* allocate skb and fill it */ |
59811faa | 1759 | if (xdp_verdict == ENA_XDP_PASS) |
838c93dc SJ |
1760 | skb = ena_rx_skb(rx_ring, |
1761 | rx_ring->ena_bufs, | |
1762 | ena_rx_ctx.descs, | |
1763 | &next_to_clean); | |
1738cd3e | 1764 | |
1738cd3e | 1765 | if (unlikely(!skb)) { |
ad974bae | 1766 | for (i = 0; i < ena_rx_ctx.descs; i++) { |
a318c70a SA |
1767 | int req_id = rx_ring->ena_bufs[i].req_id; |
1768 | ||
1769 | rx_ring->free_ids[next_to_clean] = req_id; | |
ad974bae NB |
1770 | next_to_clean = |
1771 | ENA_RX_RING_IDX_NEXT(next_to_clean, | |
1772 | rx_ring->ring_size); | |
a318c70a SA |
1773 | |
1774 | /* Packets was passed for transmission, unmap it | |
1775 | * from RX side. | |
1776 | */ | |
59811faa | 1777 | if (xdp_verdict & ENA_XDP_FORWARDED) { |
f7d625ad DA |
1778 | ena_unmap_rx_buff_attrs(rx_ring, |
1779 | &rx_ring->rx_buffer_info[req_id], | |
1780 | 0); | |
a318c70a SA |
1781 | rx_ring->rx_buffer_info[req_id].page = NULL; |
1782 | } | |
ad974bae | 1783 | } |
59811faa | 1784 | if (xdp_verdict != ENA_XDP_PASS) { |
a318c70a | 1785 | xdp_flags |= xdp_verdict; |
c7f5e34d | 1786 | total_len += ena_rx_ctx.ena_bufs[0].len; |
3921a81c | 1787 | res_budget--; |
838c93dc | 1788 | continue; |
3921a81c | 1789 | } |
1738cd3e NB |
1790 | break; |
1791 | } | |
1792 | ||
1793 | ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); | |
1794 | ||
1795 | ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); | |
1796 | ||
1797 | skb_record_rx_queue(skb, rx_ring->qid); | |
1798 | ||
9e5269a9 | 1799 | if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) |
1738cd3e | 1800 | rx_copybreak_pkt++; |
9e5269a9 SA |
1801 | |
1802 | total_len += skb->len; | |
1803 | ||
1804 | napi_gro_receive(napi, skb); | |
1738cd3e NB |
1805 | |
1806 | res_budget--; | |
1807 | } while (likely(res_budget)); | |
1808 | ||
1809 | work_done = budget - res_budget; | |
1738cd3e NB |
1810 | rx_ring->per_napi_packets += work_done; |
1811 | u64_stats_update_begin(&rx_ring->syncp); | |
1812 | rx_ring->rx_stats.bytes += total_len; | |
1813 | rx_ring->rx_stats.cnt += work_done; | |
1814 | rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; | |
1815 | u64_stats_update_end(&rx_ring->syncp); | |
1816 | ||
1817 | rx_ring->next_to_clean = next_to_clean; | |
1818 | ||
7cfe9a55 | 1819 | refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); |
0574bb80 AK |
1820 | refill_threshold = |
1821 | min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, | |
1822 | ENA_RX_REFILL_THRESH_PACKET); | |
1738cd3e NB |
1823 | |
1824 | /* Optimization, try to batch new rx buffers */ | |
1825 | if (refill_required > refill_threshold) { | |
1826 | ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); | |
1827 | ena_refill_rx_bufs(rx_ring, refill_required); | |
1828 | } | |
1829 | ||
59811faa | 1830 | if (xdp_flags & ENA_XDP_REDIRECT) |
a318c70a SA |
1831 | xdp_do_flush_map(); |
1832 | ||
1738cd3e NB |
1833 | return work_done; |
1834 | ||
1835 | error: | |
6f411fb5 SAS |
1836 | if (xdp_flags & ENA_XDP_REDIRECT) |
1837 | xdp_do_flush(); | |
1838 | ||
1738cd3e NB |
1839 | adapter = netdev_priv(rx_ring->netdev); |
1840 | ||
5b7022cf | 1841 | if (rc == -ENOSPC) { |
89dd735e SA |
1842 | ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, |
1843 | &rx_ring->syncp); | |
9fe890cc | 1844 | ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS); |
5b7022cf | 1845 | } else { |
89dd735e SA |
1846 | ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, |
1847 | &rx_ring->syncp); | |
9fe890cc | 1848 | ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID); |
5b7022cf | 1849 | } |
1738cd3e NB |
1850 | return 0; |
1851 | } | |
1852 | ||
282faf61 | 1853 | static void ena_dim_work(struct work_struct *w) |
1738cd3e | 1854 | { |
282faf61 AK |
1855 | struct dim *dim = container_of(w, struct dim, work); |
1856 | struct dim_cq_moder cur_moder = | |
1857 | net_dim_get_rx_moderation(dim->mode, dim->profile_ix); | |
1858 | struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim); | |
1859 | ||
1860 | ena_napi->rx_ring->smoothed_interval = cur_moder.usec; | |
1861 | dim->state = DIM_START_MEASURE; | |
1862 | } | |
1863 | ||
1864 | static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi) | |
1865 | { | |
1866 | struct dim_sample dim_sample; | |
1867 | struct ena_ring *rx_ring = ena_napi->rx_ring; | |
1868 | ||
1869 | if (!rx_ring->per_napi_packets) | |
1870 | return; | |
1871 | ||
1872 | rx_ring->non_empty_napi_events++; | |
1873 | ||
1874 | dim_update_sample(rx_ring->non_empty_napi_events, | |
1875 | rx_ring->rx_stats.cnt, | |
1876 | rx_ring->rx_stats.bytes, | |
1877 | &dim_sample); | |
1878 | ||
1879 | net_dim(&ena_napi->dim, dim_sample); | |
1880 | ||
1738cd3e | 1881 | rx_ring->per_napi_packets = 0; |
1738cd3e NB |
1882 | } |
1883 | ||
c2b54204 | 1884 | static void ena_unmask_interrupt(struct ena_ring *tx_ring, |
418df30f NB |
1885 | struct ena_ring *rx_ring) |
1886 | { | |
e712f3e4 | 1887 | u32 rx_interval = tx_ring->smoothed_interval; |
418df30f | 1888 | struct ena_eth_io_intr_reg intr_reg; |
e712f3e4 | 1889 | |
548c4940 SJ |
1890 | /* Rx ring can be NULL when for XDP tx queues which don't have an |
1891 | * accompanying rx_ring pair. | |
1892 | */ | |
1893 | if (rx_ring) | |
1894 | rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? | |
1895 | rx_ring->smoothed_interval : | |
1896 | ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); | |
418df30f NB |
1897 | |
1898 | /* Update intr register: rx intr delay, | |
1899 | * tx intr delay and interrupt unmask | |
1900 | */ | |
1901 | ena_com_update_intr_reg(&intr_reg, | |
7b8a2878 | 1902 | rx_interval, |
418df30f NB |
1903 | tx_ring->smoothed_interval, |
1904 | true); | |
1905 | ||
89dd735e SA |
1906 | ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1, |
1907 | &tx_ring->syncp); | |
bf2746e8 | 1908 | |
418df30f NB |
1909 | /* It is a shared MSI-X. |
1910 | * Tx and Rx CQ have pointer to it. | |
1911 | * So we use one of them to reach the intr reg | |
548c4940 | 1912 | * The Tx ring is used because the rx_ring is NULL for XDP queues |
418df30f | 1913 | */ |
548c4940 | 1914 | ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg); |
418df30f NB |
1915 | } |
1916 | ||
c2b54204 | 1917 | static void ena_update_ring_numa_node(struct ena_ring *tx_ring, |
1738cd3e NB |
1918 | struct ena_ring *rx_ring) |
1919 | { | |
1920 | int cpu = get_cpu(); | |
1921 | int numa_node; | |
1922 | ||
1923 | /* Check only one ring since the 2 rings are running on the same cpu */ | |
1924 | if (likely(tx_ring->cpu == cpu)) | |
1925 | goto out; | |
1926 | ||
a8ee104f DA |
1927 | tx_ring->cpu = cpu; |
1928 | if (rx_ring) | |
1929 | rx_ring->cpu = cpu; | |
1930 | ||
1738cd3e | 1931 | numa_node = cpu_to_node(cpu); |
a8ee104f DA |
1932 | |
1933 | if (likely(tx_ring->numa_node == numa_node)) | |
1934 | goto out; | |
1935 | ||
1738cd3e NB |
1936 | put_cpu(); |
1937 | ||
1938 | if (numa_node != NUMA_NO_NODE) { | |
1939 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); | |
a8ee104f DA |
1940 | tx_ring->numa_node = numa_node; |
1941 | if (rx_ring) { | |
1942 | rx_ring->numa_node = numa_node; | |
548c4940 SJ |
1943 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, |
1944 | numa_node); | |
a8ee104f | 1945 | } |
1738cd3e NB |
1946 | } |
1947 | ||
1738cd3e NB |
1948 | return; |
1949 | out: | |
1950 | put_cpu(); | |
1951 | } | |
1952 | ||
548c4940 SJ |
1953 | static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) |
1954 | { | |
1955 | u32 total_done = 0; | |
1956 | u16 next_to_clean; | |
548c4940 SJ |
1957 | int tx_pkts = 0; |
1958 | u16 req_id; | |
1959 | int rc; | |
1960 | ||
1961 | if (unlikely(!xdp_ring)) | |
1962 | return 0; | |
1963 | next_to_clean = xdp_ring->next_to_clean; | |
1964 | ||
1965 | while (tx_pkts < budget) { | |
1966 | struct ena_tx_buffer *tx_info; | |
1967 | struct xdp_frame *xdpf; | |
1968 | ||
1969 | rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq, | |
1970 | &req_id); | |
c255a34e AK |
1971 | if (rc) { |
1972 | if (unlikely(rc == -EINVAL)) | |
1973 | handle_invalid_req_id(xdp_ring, req_id, NULL, | |
1974 | true); | |
548c4940 | 1975 | break; |
c255a34e | 1976 | } |
548c4940 | 1977 | |
c255a34e | 1978 | /* validate that the request id points to a valid xdp_frame */ |
548c4940 SJ |
1979 | rc = validate_xdp_req_id(xdp_ring, req_id); |
1980 | if (rc) | |
1981 | break; | |
1982 | ||
1983 | tx_info = &xdp_ring->tx_buffer_info[req_id]; | |
1984 | xdpf = tx_info->xdpf; | |
1985 | ||
1986 | tx_info->xdpf = NULL; | |
1987 | tx_info->last_jiffies = 0; | |
1988 | ena_unmap_tx_buff(xdp_ring, tx_info); | |
1989 | ||
1990 | netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, | |
1991 | "tx_poll: q %d skb %p completed\n", xdp_ring->qid, | |
1992 | xdpf); | |
1993 | ||
548c4940 SJ |
1994 | tx_pkts++; |
1995 | total_done += tx_info->tx_descs; | |
1996 | ||
f8b91f25 | 1997 | xdp_return_frame(xdpf); |
548c4940 SJ |
1998 | xdp_ring->free_ids[next_to_clean] = req_id; |
1999 | next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, | |
2000 | xdp_ring->ring_size); | |
2001 | } | |
2002 | ||
2003 | xdp_ring->next_to_clean = next_to_clean; | |
2004 | ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done); | |
2005 | ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq); | |
2006 | ||
2007 | netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, | |
2008 | "tx_poll: q %d done. total pkts: %d\n", | |
2009 | xdp_ring->qid, tx_pkts); | |
2010 | ||
2011 | return tx_pkts; | |
2012 | } | |
2013 | ||
1738cd3e NB |
2014 | static int ena_io_poll(struct napi_struct *napi, int budget) |
2015 | { | |
2016 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); | |
2017 | struct ena_ring *tx_ring, *rx_ring; | |
24dee0c7 NB |
2018 | int tx_work_done; |
2019 | int rx_work_done = 0; | |
1738cd3e NB |
2020 | int tx_budget; |
2021 | int napi_comp_call = 0; | |
2022 | int ret; | |
2023 | ||
2024 | tx_ring = ena_napi->tx_ring; | |
2025 | rx_ring = ena_napi->rx_ring; | |
2026 | ||
2027 | tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; | |
2028 | ||
3f6159db NB |
2029 | if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || |
2030 | test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { | |
1738cd3e NB |
2031 | napi_complete_done(napi, 0); |
2032 | return 0; | |
2033 | } | |
2034 | ||
2035 | tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); | |
24dee0c7 NB |
2036 | /* On netpoll the budget is zero and the handler should only clean the |
2037 | * tx completions. | |
2038 | */ | |
2039 | if (likely(budget)) | |
2040 | rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); | |
1738cd3e | 2041 | |
b1669c9f NB |
2042 | /* If the device is about to reset or down, avoid unmask |
2043 | * the interrupt and return 0 so NAPI won't reschedule | |
2044 | */ | |
2045 | if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || | |
2046 | test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { | |
2047 | napi_complete_done(napi, 0); | |
2048 | ret = 0; | |
1738cd3e | 2049 | |
b1669c9f | 2050 | } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { |
1738cd3e | 2051 | napi_comp_call = 1; |
1738cd3e | 2052 | |
b1669c9f NB |
2053 | /* Update numa and unmask the interrupt only when schedule |
2054 | * from the interrupt context (vs from sk_busy_loop) | |
1738cd3e | 2055 | */ |
1e5ae350 AK |
2056 | if (napi_complete_done(napi, rx_work_done) && |
2057 | READ_ONCE(ena_napi->interrupts_masked)) { | |
2058 | smp_rmb(); /* make sure interrupts_masked is read */ | |
2059 | WRITE_ONCE(ena_napi->interrupts_masked, false); | |
282faf61 AK |
2060 | /* We apply adaptive moderation on Rx path only. |
2061 | * Tx uses static interrupt moderation. | |
2062 | */ | |
b1669c9f | 2063 | if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) |
282faf61 | 2064 | ena_adjust_adaptive_rx_intr_moderation(ena_napi); |
b1669c9f | 2065 | |
a8ee104f | 2066 | ena_update_ring_numa_node(tx_ring, rx_ring); |
418df30f | 2067 | ena_unmask_interrupt(tx_ring, rx_ring); |
b1669c9f | 2068 | } |
1738cd3e | 2069 | |
1738cd3e NB |
2070 | ret = rx_work_done; |
2071 | } else { | |
2072 | ret = budget; | |
2073 | } | |
2074 | ||
2075 | u64_stats_update_begin(&tx_ring->syncp); | |
2076 | tx_ring->tx_stats.napi_comp += napi_comp_call; | |
2077 | tx_ring->tx_stats.tx_poll++; | |
2078 | u64_stats_update_end(&tx_ring->syncp); | |
2079 | ||
0ee251cd SA |
2080 | tx_ring->tx_stats.last_napi_jiffies = jiffies; |
2081 | ||
1738cd3e NB |
2082 | return ret; |
2083 | } | |
2084 | ||
2085 | static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data) | |
2086 | { | |
2087 | struct ena_adapter *adapter = (struct ena_adapter *)data; | |
2088 | ||
2089 | ena_com_admin_q_comp_intr_handler(adapter->ena_dev); | |
2090 | ||
2091 | /* Don't call the aenq handler before probe is done */ | |
2092 | if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) | |
2093 | ena_com_aenq_intr_handler(adapter->ena_dev, data); | |
2094 | ||
2095 | return IRQ_HANDLED; | |
2096 | } | |
2097 | ||
2098 | /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx | |
2099 | * @irq: interrupt number | |
2100 | * @data: pointer to a network interface private napi device structure | |
2101 | */ | |
2102 | static irqreturn_t ena_intr_msix_io(int irq, void *data) | |
2103 | { | |
2104 | struct ena_napi *ena_napi = data; | |
2105 | ||
e4ac382e SA |
2106 | /* Used to check HW health */ |
2107 | WRITE_ONCE(ena_napi->first_interrupt, true); | |
8510e1a3 | 2108 | |
1e5ae350 AK |
2109 | WRITE_ONCE(ena_napi->interrupts_masked, true); |
2110 | smp_wmb(); /* write interrupts_masked before calling napi */ | |
2111 | ||
e745dafa | 2112 | napi_schedule_irqoff(&ena_napi->napi); |
1738cd3e NB |
2113 | |
2114 | return IRQ_HANDLED; | |
2115 | } | |
2116 | ||
06443684 NB |
2117 | /* Reserve a single MSI-X vector for management (admin + aenq). |
2118 | * plus reserve one vector for each potential io queue. | |
2119 | * the number of potential io queues is the minimum of what the device | |
2120 | * supports and the number of vCPUs. | |
2121 | */ | |
4d192660 | 2122 | static int ena_enable_msix(struct ena_adapter *adapter) |
1738cd3e | 2123 | { |
06443684 NB |
2124 | int msix_vecs, irq_cnt; |
2125 | ||
2126 | if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { | |
2127 | netif_err(adapter, probe, adapter->netdev, | |
2128 | "Error, MSI-X is already enabled\n"); | |
2129 | return -EPERM; | |
2130 | } | |
1738cd3e NB |
2131 | |
2132 | /* Reserved the max msix vectors we might need */ | |
ce1f3521 | 2133 | msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); |
1738cd3e | 2134 | netif_dbg(adapter, probe, adapter->netdev, |
bf2746e8 | 2135 | "Trying to enable MSI-X, vectors %d\n", msix_vecs); |
1738cd3e | 2136 | |
06443684 NB |
2137 | irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC, |
2138 | msix_vecs, PCI_IRQ_MSIX); | |
2139 | ||
2140 | if (irq_cnt < 0) { | |
1738cd3e | 2141 | netif_err(adapter, probe, adapter->netdev, |
06443684 | 2142 | "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt); |
1738cd3e NB |
2143 | return -ENOSPC; |
2144 | } | |
2145 | ||
06443684 NB |
2146 | if (irq_cnt != msix_vecs) { |
2147 | netif_notice(adapter, probe, adapter->netdev, | |
bf2746e8 | 2148 | "Enable only %d MSI-X (out of %d), reduce the number of queues\n", |
06443684 | 2149 | irq_cnt, msix_vecs); |
faa615f9 | 2150 | adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; |
1738cd3e NB |
2151 | } |
2152 | ||
06443684 NB |
2153 | if (ena_init_rx_cpu_rmap(adapter)) |
2154 | netif_warn(adapter, probe, adapter->netdev, | |
2155 | "Failed to map IRQs to CPUs\n"); | |
2156 | ||
2157 | adapter->msix_vecs = irq_cnt; | |
2158 | set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); | |
1738cd3e NB |
2159 | |
2160 | return 0; | |
2161 | } | |
2162 | ||
2163 | static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) | |
2164 | { | |
2165 | u32 cpu; | |
2166 | ||
2167 | snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, | |
2168 | ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", | |
2169 | pci_name(adapter->pdev)); | |
2170 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = | |
2171 | ena_intr_msix_mgmnt; | |
2172 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; | |
2173 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = | |
da6f4cf5 | 2174 | pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX); |
1738cd3e NB |
2175 | cpu = cpumask_first(cpu_online_mask); |
2176 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; | |
2177 | cpumask_set_cpu(cpu, | |
2178 | &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); | |
2179 | } | |
2180 | ||
2181 | static void ena_setup_io_intr(struct ena_adapter *adapter) | |
2182 | { | |
2183 | struct net_device *netdev; | |
2184 | int irq_idx, i, cpu; | |
548c4940 | 2185 | int io_queue_count; |
1738cd3e NB |
2186 | |
2187 | netdev = adapter->netdev; | |
548c4940 | 2188 | io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1738cd3e | 2189 | |
548c4940 | 2190 | for (i = 0; i < io_queue_count; i++) { |
1738cd3e NB |
2191 | irq_idx = ENA_IO_IRQ_IDX(i); |
2192 | cpu = i % num_online_cpus(); | |
2193 | ||
2194 | snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, | |
2195 | "%s-Tx-Rx-%d", netdev->name, i); | |
2196 | adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; | |
2197 | adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; | |
2198 | adapter->irq_tbl[irq_idx].vector = | |
da6f4cf5 | 2199 | pci_irq_vector(adapter->pdev, irq_idx); |
1738cd3e NB |
2200 | adapter->irq_tbl[irq_idx].cpu = cpu; |
2201 | ||
2202 | cpumask_set_cpu(cpu, | |
2203 | &adapter->irq_tbl[irq_idx].affinity_hint_mask); | |
2204 | } | |
2205 | } | |
2206 | ||
2207 | static int ena_request_mgmnt_irq(struct ena_adapter *adapter) | |
2208 | { | |
2209 | unsigned long flags = 0; | |
2210 | struct ena_irq *irq; | |
2211 | int rc; | |
2212 | ||
2213 | irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; | |
2214 | rc = request_irq(irq->vector, irq->handler, flags, irq->name, | |
2215 | irq->data); | |
2216 | if (rc) { | |
2217 | netif_err(adapter, probe, adapter->netdev, | |
bf2746e8 | 2218 | "Failed to request admin irq\n"); |
1738cd3e NB |
2219 | return rc; |
2220 | } | |
2221 | ||
2222 | netif_dbg(adapter, probe, adapter->netdev, | |
bf2746e8 | 2223 | "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n", |
1738cd3e NB |
2224 | irq->affinity_hint_mask.bits[0], irq->vector); |
2225 | ||
2226 | irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); | |
2227 | ||
2228 | return rc; | |
2229 | } | |
2230 | ||
2231 | static int ena_request_io_irq(struct ena_adapter *adapter) | |
2232 | { | |
e02ae6ed | 2233 | u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1738cd3e NB |
2234 | unsigned long flags = 0; |
2235 | struct ena_irq *irq; | |
2236 | int rc = 0, i, k; | |
2237 | ||
06443684 NB |
2238 | if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { |
2239 | netif_err(adapter, ifup, adapter->netdev, | |
2240 | "Failed to request I/O IRQ: MSI-X is not enabled\n"); | |
2241 | return -EINVAL; | |
2242 | } | |
2243 | ||
e02ae6ed | 2244 | for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { |
1738cd3e NB |
2245 | irq = &adapter->irq_tbl[i]; |
2246 | rc = request_irq(irq->vector, irq->handler, flags, irq->name, | |
2247 | irq->data); | |
2248 | if (rc) { | |
2249 | netif_err(adapter, ifup, adapter->netdev, | |
2250 | "Failed to request I/O IRQ. index %d rc %d\n", | |
2251 | i, rc); | |
2252 | goto err; | |
2253 | } | |
2254 | ||
2255 | netif_dbg(adapter, ifup, adapter->netdev, | |
bf2746e8 | 2256 | "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n", |
1738cd3e NB |
2257 | i, irq->affinity_hint_mask.bits[0], irq->vector); |
2258 | ||
2259 | irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); | |
2260 | } | |
2261 | ||
2262 | return rc; | |
2263 | ||
2264 | err: | |
2265 | for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) { | |
2266 | irq = &adapter->irq_tbl[k]; | |
2267 | free_irq(irq->vector, irq->data); | |
2268 | } | |
2269 | ||
2270 | return rc; | |
2271 | } | |
2272 | ||
2273 | static void ena_free_mgmnt_irq(struct ena_adapter *adapter) | |
2274 | { | |
2275 | struct ena_irq *irq; | |
2276 | ||
2277 | irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; | |
2278 | synchronize_irq(irq->vector); | |
2279 | irq_set_affinity_hint(irq->vector, NULL); | |
2280 | free_irq(irq->vector, irq->data); | |
2281 | } | |
2282 | ||
2283 | static void ena_free_io_irq(struct ena_adapter *adapter) | |
2284 | { | |
e02ae6ed | 2285 | u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1738cd3e NB |
2286 | struct ena_irq *irq; |
2287 | int i; | |
2288 | ||
2289 | #ifdef CONFIG_RFS_ACCEL | |
2290 | if (adapter->msix_vecs >= 1) { | |
2291 | free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); | |
2292 | adapter->netdev->rx_cpu_rmap = NULL; | |
2293 | } | |
2294 | #endif /* CONFIG_RFS_ACCEL */ | |
2295 | ||
e02ae6ed | 2296 | for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { |
1738cd3e NB |
2297 | irq = &adapter->irq_tbl[i]; |
2298 | irq_set_affinity_hint(irq->vector, NULL); | |
2299 | free_irq(irq->vector, irq->data); | |
2300 | } | |
2301 | } | |
2302 | ||
06443684 NB |
2303 | static void ena_disable_msix(struct ena_adapter *adapter) |
2304 | { | |
2305 | if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) | |
2306 | pci_free_irq_vectors(adapter->pdev); | |
2307 | } | |
2308 | ||
1738cd3e NB |
2309 | static void ena_disable_io_intr_sync(struct ena_adapter *adapter) |
2310 | { | |
e02ae6ed | 2311 | u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1738cd3e NB |
2312 | int i; |
2313 | ||
2314 | if (!netif_running(adapter->netdev)) | |
2315 | return; | |
2316 | ||
e02ae6ed | 2317 | for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) |
1738cd3e NB |
2318 | synchronize_irq(adapter->irq_tbl[i].vector); |
2319 | } | |
2320 | ||
548c4940 SJ |
2321 | static void ena_del_napi_in_range(struct ena_adapter *adapter, |
2322 | int first_index, | |
2323 | int count) | |
1738cd3e NB |
2324 | { |
2325 | int i; | |
2326 | ||
548c4940 | 2327 | for (i = first_index; i < first_index + count; i++) { |
8b147f6f SA |
2328 | netif_napi_del(&adapter->ena_napi[i].napi); |
2329 | ||
2330 | WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) && | |
2331 | adapter->ena_napi[i].xdp_ring); | |
548c4940 | 2332 | } |
1738cd3e NB |
2333 | } |
2334 | ||
548c4940 SJ |
2335 | static void ena_init_napi_in_range(struct ena_adapter *adapter, |
2336 | int first_index, int count) | |
1738cd3e | 2337 | { |
1738cd3e NB |
2338 | int i; |
2339 | ||
548c4940 | 2340 | for (i = first_index; i < first_index + count; i++) { |
d89d8d4d | 2341 | struct ena_napi *napi = &adapter->ena_napi[i]; |
1738cd3e | 2342 | |
b48b89f9 JK |
2343 | netif_napi_add(adapter->netdev, &napi->napi, |
2344 | ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll); | |
548c4940 SJ |
2345 | |
2346 | if (!ENA_IS_XDP_INDEX(adapter, i)) { | |
2347 | napi->rx_ring = &adapter->rx_ring[i]; | |
2348 | napi->tx_ring = &adapter->tx_ring[i]; | |
2349 | } else { | |
2350 | napi->xdp_ring = &adapter->tx_ring[i]; | |
2351 | } | |
1738cd3e NB |
2352 | napi->qid = i; |
2353 | } | |
2354 | } | |
2355 | ||
548c4940 SJ |
2356 | static void ena_napi_disable_in_range(struct ena_adapter *adapter, |
2357 | int first_index, | |
2358 | int count) | |
1738cd3e NB |
2359 | { |
2360 | int i; | |
2361 | ||
548c4940 | 2362 | for (i = first_index; i < first_index + count; i++) |
1738cd3e NB |
2363 | napi_disable(&adapter->ena_napi[i].napi); |
2364 | } | |
2365 | ||
548c4940 SJ |
2366 | static void ena_napi_enable_in_range(struct ena_adapter *adapter, |
2367 | int first_index, | |
2368 | int count) | |
1738cd3e NB |
2369 | { |
2370 | int i; | |
2371 | ||
548c4940 | 2372 | for (i = first_index; i < first_index + count; i++) |
1738cd3e NB |
2373 | napi_enable(&adapter->ena_napi[i].napi); |
2374 | } | |
2375 | ||
1738cd3e NB |
2376 | /* Configure the Rx forwarding */ |
2377 | static int ena_rss_configure(struct ena_adapter *adapter) | |
2378 | { | |
2379 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
2380 | int rc; | |
2381 | ||
2382 | /* In case the RSS table wasn't initialized by probe */ | |
2383 | if (!ena_dev->rss.tbl_log_size) { | |
2384 | rc = ena_rss_init_default(adapter); | |
d1497638 | 2385 | if (rc && (rc != -EOPNOTSUPP)) { |
1738cd3e | 2386 | netif_err(adapter, ifup, adapter->netdev, |
46143e58 | 2387 | "Failed to init RSS rc: %d\n", rc); |
1738cd3e NB |
2388 | return rc; |
2389 | } | |
2390 | } | |
2391 | ||
2392 | /* Set indirect table */ | |
2393 | rc = ena_com_indirect_table_set(ena_dev); | |
d1497638 | 2394 | if (unlikely(rc && rc != -EOPNOTSUPP)) |
1738cd3e NB |
2395 | return rc; |
2396 | ||
2397 | /* Configure hash function (if supported) */ | |
2398 | rc = ena_com_set_hash_function(ena_dev); | |
d1497638 | 2399 | if (unlikely(rc && (rc != -EOPNOTSUPP))) |
1738cd3e NB |
2400 | return rc; |
2401 | ||
2402 | /* Configure hash inputs (if supported) */ | |
2403 | rc = ena_com_set_hash_ctrl(ena_dev); | |
d1497638 | 2404 | if (unlikely(rc && (rc != -EOPNOTSUPP))) |
1738cd3e NB |
2405 | return rc; |
2406 | ||
2407 | return 0; | |
2408 | } | |
2409 | ||
2410 | static int ena_up_complete(struct ena_adapter *adapter) | |
2411 | { | |
7853b49c | 2412 | int rc; |
1738cd3e NB |
2413 | |
2414 | rc = ena_rss_configure(adapter); | |
2415 | if (rc) | |
2416 | return rc; | |
2417 | ||
1738cd3e NB |
2418 | ena_change_mtu(adapter->netdev, adapter->netdev->mtu); |
2419 | ||
2420 | ena_refill_all_rx_bufs(adapter); | |
2421 | ||
2422 | /* enable transmits */ | |
2423 | netif_tx_start_all_queues(adapter->netdev); | |
2424 | ||
548c4940 SJ |
2425 | ena_napi_enable_in_range(adapter, |
2426 | 0, | |
2427 | adapter->xdp_num_queues + adapter->num_io_queues); | |
1738cd3e | 2428 | |
1738cd3e NB |
2429 | return 0; |
2430 | } | |
2431 | ||
2432 | static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) | |
2433 | { | |
38005ca8 | 2434 | struct ena_com_create_io_ctx ctx; |
1738cd3e NB |
2435 | struct ena_com_dev *ena_dev; |
2436 | struct ena_ring *tx_ring; | |
2437 | u32 msix_vector; | |
2438 | u16 ena_qid; | |
2439 | int rc; | |
2440 | ||
2441 | ena_dev = adapter->ena_dev; | |
2442 | ||
2443 | tx_ring = &adapter->tx_ring[qid]; | |
2444 | msix_vector = ENA_IO_IRQ_IDX(qid); | |
2445 | ena_qid = ENA_IO_TXQ_IDX(qid); | |
2446 | ||
38005ca8 AK |
2447 | memset(&ctx, 0x0, sizeof(ctx)); |
2448 | ||
1738cd3e NB |
2449 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; |
2450 | ctx.qid = ena_qid; | |
2451 | ctx.mem_queue_type = ena_dev->tx_mem_queue_type; | |
2452 | ctx.msix_vector = msix_vector; | |
13ca32a6 | 2453 | ctx.queue_size = tx_ring->ring_size; |
a8ee104f | 2454 | ctx.numa_node = tx_ring->numa_node; |
1738cd3e NB |
2455 | |
2456 | rc = ena_com_create_io_queue(ena_dev, &ctx); | |
2457 | if (rc) { | |
2458 | netif_err(adapter, ifup, adapter->netdev, | |
2459 | "Failed to create I/O TX queue num %d rc: %d\n", | |
46143e58 | 2460 | qid, rc); |
1738cd3e NB |
2461 | return rc; |
2462 | } | |
2463 | ||
2464 | rc = ena_com_get_io_handlers(ena_dev, ena_qid, | |
2465 | &tx_ring->ena_com_io_sq, | |
2466 | &tx_ring->ena_com_io_cq); | |
2467 | if (rc) { | |
2468 | netif_err(adapter, ifup, adapter->netdev, | |
2469 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n", | |
2470 | qid, rc); | |
2471 | ena_com_destroy_io_queue(ena_dev, ena_qid); | |
2d2c600a | 2472 | return rc; |
1738cd3e NB |
2473 | } |
2474 | ||
2475 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); | |
2476 | return rc; | |
2477 | } | |
2478 | ||
548c4940 SJ |
2479 | static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, |
2480 | int first_index, int count) | |
1738cd3e NB |
2481 | { |
2482 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
2483 | int rc, i; | |
2484 | ||
548c4940 | 2485 | for (i = first_index; i < first_index + count; i++) { |
1738cd3e NB |
2486 | rc = ena_create_io_tx_queue(adapter, i); |
2487 | if (rc) | |
2488 | goto create_err; | |
2489 | } | |
2490 | ||
2491 | return 0; | |
2492 | ||
2493 | create_err: | |
548c4940 | 2494 | while (i-- > first_index) |
1738cd3e NB |
2495 | ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); |
2496 | ||
2497 | return rc; | |
2498 | } | |
2499 | ||
2500 | static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) | |
2501 | { | |
2502 | struct ena_com_dev *ena_dev; | |
38005ca8 | 2503 | struct ena_com_create_io_ctx ctx; |
1738cd3e NB |
2504 | struct ena_ring *rx_ring; |
2505 | u32 msix_vector; | |
2506 | u16 ena_qid; | |
2507 | int rc; | |
2508 | ||
2509 | ena_dev = adapter->ena_dev; | |
2510 | ||
2511 | rx_ring = &adapter->rx_ring[qid]; | |
2512 | msix_vector = ENA_IO_IRQ_IDX(qid); | |
2513 | ena_qid = ENA_IO_RXQ_IDX(qid); | |
2514 | ||
38005ca8 AK |
2515 | memset(&ctx, 0x0, sizeof(ctx)); |
2516 | ||
1738cd3e NB |
2517 | ctx.qid = ena_qid; |
2518 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; | |
2519 | ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
2520 | ctx.msix_vector = msix_vector; | |
13ca32a6 | 2521 | ctx.queue_size = rx_ring->ring_size; |
a8ee104f | 2522 | ctx.numa_node = rx_ring->numa_node; |
1738cd3e NB |
2523 | |
2524 | rc = ena_com_create_io_queue(ena_dev, &ctx); | |
2525 | if (rc) { | |
2526 | netif_err(adapter, ifup, adapter->netdev, | |
2527 | "Failed to create I/O RX queue num %d rc: %d\n", | |
2528 | qid, rc); | |
2529 | return rc; | |
2530 | } | |
2531 | ||
2532 | rc = ena_com_get_io_handlers(ena_dev, ena_qid, | |
2533 | &rx_ring->ena_com_io_sq, | |
2534 | &rx_ring->ena_com_io_cq); | |
2535 | if (rc) { | |
2536 | netif_err(adapter, ifup, adapter->netdev, | |
2537 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n", | |
2538 | qid, rc); | |
838c93dc | 2539 | goto err; |
1738cd3e NB |
2540 | } |
2541 | ||
2542 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); | |
2543 | ||
838c93dc SJ |
2544 | return rc; |
2545 | err: | |
2546 | ena_com_destroy_io_queue(ena_dev, ena_qid); | |
1738cd3e NB |
2547 | return rc; |
2548 | } | |
2549 | ||
2550 | static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) | |
2551 | { | |
2552 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
2553 | int rc, i; | |
2554 | ||
faa615f9 | 2555 | for (i = 0; i < adapter->num_io_queues; i++) { |
1738cd3e NB |
2556 | rc = ena_create_io_rx_queue(adapter, i); |
2557 | if (rc) | |
2558 | goto create_err; | |
282faf61 | 2559 | INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work); |
1738cd3e NB |
2560 | } |
2561 | ||
2562 | return 0; | |
2563 | ||
2564 | create_err: | |
282faf61 AK |
2565 | while (i--) { |
2566 | cancel_work_sync(&adapter->ena_napi[i].dim.work); | |
1738cd3e | 2567 | ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); |
282faf61 | 2568 | } |
1738cd3e NB |
2569 | |
2570 | return rc; | |
2571 | } | |
2572 | ||
13ca32a6 | 2573 | static void set_io_rings_size(struct ena_adapter *adapter, |
548c4940 SJ |
2574 | int new_tx_size, |
2575 | int new_rx_size) | |
13ca32a6 SJ |
2576 | { |
2577 | int i; | |
2578 | ||
faa615f9 | 2579 | for (i = 0; i < adapter->num_io_queues; i++) { |
13ca32a6 SJ |
2580 | adapter->tx_ring[i].ring_size = new_tx_size; |
2581 | adapter->rx_ring[i].ring_size = new_rx_size; | |
2582 | } | |
2583 | } | |
2584 | ||
2585 | /* This function allows queue allocation to backoff when the system is | |
2586 | * low on memory. If there is not enough memory to allocate io queues | |
2587 | * the driver will try to allocate smaller queues. | |
2588 | * | |
2589 | * The backoff algorithm is as follows: | |
2590 | * 1. Try to allocate TX and RX and if successful. | |
2591 | * 1.1. return success | |
2592 | * | |
2593 | * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same). | |
2594 | * | |
2595 | * 3. If TX or RX is smaller than 256 | |
2596 | * 3.1. return failure. | |
2597 | * 4. else | |
2598 | * 4.1. go back to 1. | |
2599 | */ | |
2600 | static int create_queues_with_size_backoff(struct ena_adapter *adapter) | |
2601 | { | |
2602 | int rc, cur_rx_ring_size, cur_tx_ring_size; | |
2603 | int new_rx_ring_size, new_tx_ring_size; | |
2604 | ||
2605 | /* current queue sizes might be set to smaller than the requested | |
2606 | * ones due to past queue allocation failures. | |
2607 | */ | |
2608 | set_io_rings_size(adapter, adapter->requested_tx_ring_size, | |
46143e58 | 2609 | adapter->requested_rx_ring_size); |
13ca32a6 SJ |
2610 | |
2611 | while (1) { | |
548c4940 SJ |
2612 | if (ena_xdp_present(adapter)) { |
2613 | rc = ena_setup_and_create_all_xdp_queues(adapter); | |
2614 | ||
2615 | if (rc) | |
2616 | goto err_setup_tx; | |
2617 | } | |
2618 | rc = ena_setup_tx_resources_in_range(adapter, | |
2619 | 0, | |
2620 | adapter->num_io_queues); | |
13ca32a6 SJ |
2621 | if (rc) |
2622 | goto err_setup_tx; | |
2623 | ||
548c4940 SJ |
2624 | rc = ena_create_io_tx_queues_in_range(adapter, |
2625 | 0, | |
2626 | adapter->num_io_queues); | |
13ca32a6 SJ |
2627 | if (rc) |
2628 | goto err_create_tx_queues; | |
2629 | ||
2630 | rc = ena_setup_all_rx_resources(adapter); | |
2631 | if (rc) | |
2632 | goto err_setup_rx; | |
2633 | ||
2634 | rc = ena_create_all_io_rx_queues(adapter); | |
2635 | if (rc) | |
2636 | goto err_create_rx_queues; | |
2637 | ||
2638 | return 0; | |
2639 | ||
2640 | err_create_rx_queues: | |
2641 | ena_free_all_io_rx_resources(adapter); | |
2642 | err_setup_rx: | |
2643 | ena_destroy_all_tx_queues(adapter); | |
2644 | err_create_tx_queues: | |
2645 | ena_free_all_io_tx_resources(adapter); | |
2646 | err_setup_tx: | |
2647 | if (rc != -ENOMEM) { | |
2648 | netif_err(adapter, ifup, adapter->netdev, | |
2649 | "Queue creation failed with error code %d\n", | |
46143e58 | 2650 | rc); |
13ca32a6 SJ |
2651 | return rc; |
2652 | } | |
2653 | ||
2654 | cur_tx_ring_size = adapter->tx_ring[0].ring_size; | |
2655 | cur_rx_ring_size = adapter->rx_ring[0].ring_size; | |
2656 | ||
2657 | netif_err(adapter, ifup, adapter->netdev, | |
2658 | "Not enough memory to create queues with sizes TX=%d, RX=%d\n", | |
2659 | cur_tx_ring_size, cur_rx_ring_size); | |
2660 | ||
2661 | new_tx_ring_size = cur_tx_ring_size; | |
2662 | new_rx_ring_size = cur_rx_ring_size; | |
2663 | ||
2664 | /* Decrease the size of the larger queue, or | |
2665 | * decrease both if they are the same size. | |
2666 | */ | |
2667 | if (cur_rx_ring_size <= cur_tx_ring_size) | |
2668 | new_tx_ring_size = cur_tx_ring_size / 2; | |
2669 | if (cur_rx_ring_size >= cur_tx_ring_size) | |
2670 | new_rx_ring_size = cur_rx_ring_size / 2; | |
2671 | ||
3e5bfb18 | 2672 | if (new_tx_ring_size < ENA_MIN_RING_SIZE || |
46143e58 | 2673 | new_rx_ring_size < ENA_MIN_RING_SIZE) { |
13ca32a6 SJ |
2674 | netif_err(adapter, ifup, adapter->netdev, |
2675 | "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n", | |
2676 | ENA_MIN_RING_SIZE); | |
2677 | return rc; | |
2678 | } | |
2679 | ||
2680 | netif_err(adapter, ifup, adapter->netdev, | |
2681 | "Retrying queue creation with sizes TX=%d, RX=%d\n", | |
2682 | new_tx_ring_size, | |
2683 | new_rx_ring_size); | |
2684 | ||
2685 | set_io_rings_size(adapter, new_tx_ring_size, | |
2686 | new_rx_ring_size); | |
2687 | } | |
2688 | } | |
2689 | ||
1738cd3e NB |
2690 | static int ena_up(struct ena_adapter *adapter) |
2691 | { | |
548c4940 | 2692 | int io_queue_count, rc, i; |
1738cd3e | 2693 | |
f0525298 | 2694 | netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); |
1738cd3e | 2695 | |
548c4940 | 2696 | io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1738cd3e NB |
2697 | ena_setup_io_intr(adapter); |
2698 | ||
78a55d05 AK |
2699 | /* napi poll functions should be initialized before running |
2700 | * request_irq(), to handle a rare condition where there is a pending | |
2701 | * interrupt, causing the ISR to fire immediately while the poll | |
2702 | * function wasn't set yet, causing a null dereference | |
2703 | */ | |
548c4940 | 2704 | ena_init_napi_in_range(adapter, 0, io_queue_count); |
78a55d05 | 2705 | |
1738cd3e NB |
2706 | rc = ena_request_io_irq(adapter); |
2707 | if (rc) | |
2708 | goto err_req_irq; | |
2709 | ||
13ca32a6 | 2710 | rc = create_queues_with_size_backoff(adapter); |
1738cd3e | 2711 | if (rc) |
13ca32a6 | 2712 | goto err_create_queues_with_backoff; |
1738cd3e NB |
2713 | |
2714 | rc = ena_up_complete(adapter); | |
2715 | if (rc) | |
2716 | goto err_up; | |
2717 | ||
2718 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) | |
2719 | netif_carrier_on(adapter->netdev); | |
2720 | ||
89dd735e SA |
2721 | ena_increase_stat(&adapter->dev_stats.interface_up, 1, |
2722 | &adapter->syncp); | |
1738cd3e NB |
2723 | |
2724 | set_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
2725 | ||
7853b49c | 2726 | /* Enable completion queues interrupt */ |
faa615f9 | 2727 | for (i = 0; i < adapter->num_io_queues; i++) |
7853b49c NB |
2728 | ena_unmask_interrupt(&adapter->tx_ring[i], |
2729 | &adapter->rx_ring[i]); | |
2730 | ||
2731 | /* schedule napi in case we had pending packets | |
2732 | * from the last time we disable napi | |
2733 | */ | |
548c4940 | 2734 | for (i = 0; i < io_queue_count; i++) |
7853b49c NB |
2735 | napi_schedule(&adapter->ena_napi[i].napi); |
2736 | ||
1738cd3e NB |
2737 | return rc; |
2738 | ||
2739 | err_up: | |
1738cd3e | 2740 | ena_destroy_all_tx_queues(adapter); |
1738cd3e | 2741 | ena_free_all_io_tx_resources(adapter); |
13ca32a6 SJ |
2742 | ena_destroy_all_rx_queues(adapter); |
2743 | ena_free_all_io_rx_resources(adapter); | |
2744 | err_create_queues_with_backoff: | |
1738cd3e NB |
2745 | ena_free_io_irq(adapter); |
2746 | err_req_irq: | |
548c4940 | 2747 | ena_del_napi_in_range(adapter, 0, io_queue_count); |
1738cd3e NB |
2748 | |
2749 | return rc; | |
2750 | } | |
2751 | ||
2752 | static void ena_down(struct ena_adapter *adapter) | |
2753 | { | |
548c4940 SJ |
2754 | int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
2755 | ||
1738cd3e NB |
2756 | netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__); |
2757 | ||
2758 | clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
2759 | ||
89dd735e SA |
2760 | ena_increase_stat(&adapter->dev_stats.interface_down, 1, |
2761 | &adapter->syncp); | |
1738cd3e | 2762 | |
1738cd3e NB |
2763 | netif_carrier_off(adapter->netdev); |
2764 | netif_tx_disable(adapter->netdev); | |
2765 | ||
3f6159db | 2766 | /* After this point the napi handler won't enable the tx queue */ |
548c4940 | 2767 | ena_napi_disable_in_range(adapter, 0, io_queue_count); |
3f6159db | 2768 | |
1738cd3e | 2769 | /* After destroy the queue there won't be any new interrupts */ |
3f6159db NB |
2770 | |
2771 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { | |
2772 | int rc; | |
2773 | ||
e2eed0e3 | 2774 | rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); |
3f6159db | 2775 | if (rc) |
f0525298 SA |
2776 | netif_err(adapter, ifdown, adapter->netdev, |
2777 | "Device reset failed\n"); | |
58a54b9c AK |
2778 | /* stop submitting admin commands on a device that was reset */ |
2779 | ena_com_set_admin_running_state(adapter->ena_dev, false); | |
3f6159db NB |
2780 | } |
2781 | ||
1738cd3e NB |
2782 | ena_destroy_all_io_queues(adapter); |
2783 | ||
2784 | ena_disable_io_intr_sync(adapter); | |
2785 | ena_free_io_irq(adapter); | |
548c4940 | 2786 | ena_del_napi_in_range(adapter, 0, io_queue_count); |
1738cd3e NB |
2787 | |
2788 | ena_free_all_tx_bufs(adapter); | |
2789 | ena_free_all_rx_bufs(adapter); | |
2790 | ena_free_all_io_tx_resources(adapter); | |
2791 | ena_free_all_io_rx_resources(adapter); | |
2792 | } | |
2793 | ||
2794 | /* ena_open - Called when a network interface is made active | |
2795 | * @netdev: network interface device structure | |
2796 | * | |
2797 | * Returns 0 on success, negative value on failure | |
2798 | * | |
2799 | * The open entry point is called when a network interface is made | |
2800 | * active by the system (IFF_UP). At this point all resources needed | |
2801 | * for transmit and receive operations are allocated, the interrupt | |
2802 | * handler is registered with the OS, the watchdog timer is started, | |
2803 | * and the stack is notified that the interface is ready. | |
2804 | */ | |
2805 | static int ena_open(struct net_device *netdev) | |
2806 | { | |
2807 | struct ena_adapter *adapter = netdev_priv(netdev); | |
2808 | int rc; | |
2809 | ||
2810 | /* Notify the stack of the actual queue counts. */ | |
faa615f9 | 2811 | rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues); |
1738cd3e NB |
2812 | if (rc) { |
2813 | netif_err(adapter, ifup, netdev, "Can't set num tx queues\n"); | |
2814 | return rc; | |
2815 | } | |
2816 | ||
faa615f9 | 2817 | rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues); |
1738cd3e NB |
2818 | if (rc) { |
2819 | netif_err(adapter, ifup, netdev, "Can't set num rx queues\n"); | |
2820 | return rc; | |
2821 | } | |
2822 | ||
2823 | rc = ena_up(adapter); | |
2824 | if (rc) | |
2825 | return rc; | |
2826 | ||
2827 | return rc; | |
2828 | } | |
2829 | ||
2830 | /* ena_close - Disables a network interface | |
2831 | * @netdev: network interface device structure | |
2832 | * | |
2833 | * Returns 0, this is not allowed to fail | |
2834 | * | |
2835 | * The close entry point is called when an interface is de-activated | |
2836 | * by the OS. The hardware is still under the drivers control, but | |
2837 | * needs to be disabled. A global MAC reset is issued to stop the | |
2838 | * hardware, and all transmit and receive resources are freed. | |
2839 | */ | |
2840 | static int ena_close(struct net_device *netdev) | |
2841 | { | |
2842 | struct ena_adapter *adapter = netdev_priv(netdev); | |
2843 | ||
2844 | netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); | |
2845 | ||
58a54b9c AK |
2846 | if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) |
2847 | return 0; | |
2848 | ||
1738cd3e NB |
2849 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
2850 | ena_down(adapter); | |
2851 | ||
ee4552aa NB |
2852 | /* Check for device status and issue reset if needed*/ |
2853 | check_for_admin_com_state(adapter); | |
2854 | if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { | |
2855 | netif_err(adapter, ifdown, adapter->netdev, | |
2856 | "Destroy failure, restarting device\n"); | |
2857 | ena_dump_stats_to_dmesg(adapter); | |
2858 | /* rtnl lock already obtained in dev_ioctl() layer */ | |
cfa324a5 | 2859 | ena_destroy_device(adapter, false); |
ee4552aa NB |
2860 | ena_restore_device(adapter); |
2861 | } | |
2862 | ||
1738cd3e NB |
2863 | return 0; |
2864 | } | |
2865 | ||
b0c59e53 SA |
2866 | int ena_update_queue_params(struct ena_adapter *adapter, |
2867 | u32 new_tx_size, | |
2868 | u32 new_rx_size, | |
2869 | u32 new_llq_header_len) | |
eece4d2a | 2870 | { |
b0c59e53 SA |
2871 | bool dev_was_up, large_llq_changed = false; |
2872 | int rc = 0; | |
eece4d2a | 2873 | |
2413ea97 | 2874 | dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); |
eece4d2a SJ |
2875 | ena_close(adapter->netdev); |
2876 | adapter->requested_tx_ring_size = new_tx_size; | |
2877 | adapter->requested_rx_ring_size = new_rx_size; | |
548c4940 SJ |
2878 | ena_init_io_rings(adapter, |
2879 | 0, | |
2880 | adapter->xdp_num_queues + | |
2881 | adapter->num_io_queues); | |
b0c59e53 SA |
2882 | |
2883 | large_llq_changed = adapter->ena_dev->tx_mem_queue_type == | |
2884 | ENA_ADMIN_PLACEMENT_POLICY_DEV; | |
2885 | large_llq_changed &= | |
2886 | new_llq_header_len != adapter->ena_dev->tx_max_header_size; | |
2887 | ||
2888 | /* a check that the configuration is valid is done by caller */ | |
2889 | if (large_llq_changed) { | |
2890 | adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled; | |
2891 | ||
2892 | ena_destroy_device(adapter, false); | |
2893 | rc = ena_restore_device(adapter); | |
2894 | } | |
2895 | ||
2896 | return dev_was_up && !rc ? ena_up(adapter) : rc; | |
2413ea97 SJ |
2897 | } |
2898 | ||
c7062aae DA |
2899 | int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak) |
2900 | { | |
2901 | struct ena_ring *rx_ring; | |
2902 | int i; | |
2903 | ||
2904 | if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE)) | |
2905 | return -EINVAL; | |
2906 | ||
2907 | adapter->rx_copybreak = rx_copybreak; | |
2908 | ||
2909 | for (i = 0; i < adapter->num_io_queues; i++) { | |
2910 | rx_ring = &adapter->rx_ring[i]; | |
2911 | rx_ring->rx_copybreak = rx_copybreak; | |
2912 | } | |
2913 | ||
2914 | return 0; | |
2915 | } | |
2916 | ||
2413ea97 SJ |
2917 | int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count) |
2918 | { | |
2919 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
838c93dc | 2920 | int prev_channel_count; |
2413ea97 SJ |
2921 | bool dev_was_up; |
2922 | ||
2923 | dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
2924 | ena_close(adapter->netdev); | |
838c93dc | 2925 | prev_channel_count = adapter->num_io_queues; |
2413ea97 | 2926 | adapter->num_io_queues = new_channel_count; |
548c4940 SJ |
2927 | if (ena_xdp_present(adapter) && |
2928 | ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) { | |
2929 | adapter->xdp_first_ring = new_channel_count; | |
2930 | adapter->xdp_num_queues = new_channel_count; | |
838c93dc SJ |
2931 | if (prev_channel_count > new_channel_count) |
2932 | ena_xdp_exchange_program_rx_in_range(adapter, | |
2933 | NULL, | |
2934 | new_channel_count, | |
2935 | prev_channel_count); | |
2936 | else | |
2937 | ena_xdp_exchange_program_rx_in_range(adapter, | |
2938 | adapter->xdp_bpf_prog, | |
2939 | prev_channel_count, | |
2940 | new_channel_count); | |
2941 | } | |
2942 | ||
2413ea97 SJ |
2943 | /* We need to destroy the rss table so that the indirection |
2944 | * table will be reinitialized by ena_up() | |
2945 | */ | |
2946 | ena_com_rss_destroy(ena_dev); | |
548c4940 SJ |
2947 | ena_init_io_rings(adapter, |
2948 | 0, | |
2949 | adapter->xdp_num_queues + | |
2950 | adapter->num_io_queues); | |
2413ea97 | 2951 | return dev_was_up ? ena_open(adapter->netdev) : 0; |
eece4d2a SJ |
2952 | } |
2953 | ||
0e3a3f6d AK |
2954 | static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, |
2955 | struct sk_buff *skb, | |
2956 | bool disable_meta_caching) | |
1738cd3e NB |
2957 | { |
2958 | u32 mss = skb_shinfo(skb)->gso_size; | |
2959 | struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; | |
2960 | u8 l4_protocol = 0; | |
2961 | ||
2962 | if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { | |
2963 | ena_tx_ctx->l4_csum_enable = 1; | |
2964 | if (mss) { | |
2965 | ena_tx_ctx->tso_enable = 1; | |
2966 | ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; | |
2967 | ena_tx_ctx->l4_csum_partial = 0; | |
2968 | } else { | |
2969 | ena_tx_ctx->tso_enable = 0; | |
2970 | ena_meta->l4_hdr_len = 0; | |
2971 | ena_tx_ctx->l4_csum_partial = 1; | |
2972 | } | |
2973 | ||
2974 | switch (ip_hdr(skb)->version) { | |
2975 | case IPVERSION: | |
2976 | ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; | |
2977 | if (ip_hdr(skb)->frag_off & htons(IP_DF)) | |
2978 | ena_tx_ctx->df = 1; | |
2979 | if (mss) | |
2980 | ena_tx_ctx->l3_csum_enable = 1; | |
2981 | l4_protocol = ip_hdr(skb)->protocol; | |
2982 | break; | |
2983 | case 6: | |
2984 | ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; | |
2985 | l4_protocol = ipv6_hdr(skb)->nexthdr; | |
2986 | break; | |
2987 | default: | |
2988 | break; | |
2989 | } | |
2990 | ||
2991 | if (l4_protocol == IPPROTO_TCP) | |
2992 | ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; | |
2993 | else | |
2994 | ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; | |
2995 | ||
2996 | ena_meta->mss = mss; | |
2997 | ena_meta->l3_hdr_len = skb_network_header_len(skb); | |
2998 | ena_meta->l3_hdr_offset = skb_network_offset(skb); | |
2999 | ena_tx_ctx->meta_valid = 1; | |
0e3a3f6d AK |
3000 | } else if (disable_meta_caching) { |
3001 | memset(ena_meta, 0, sizeof(*ena_meta)); | |
3002 | ena_tx_ctx->meta_valid = 1; | |
1738cd3e NB |
3003 | } else { |
3004 | ena_tx_ctx->meta_valid = 0; | |
3005 | } | |
3006 | } | |
3007 | ||
3008 | static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, | |
3009 | struct sk_buff *skb) | |
3010 | { | |
3011 | int num_frags, header_len, rc; | |
3012 | ||
3013 | num_frags = skb_shinfo(skb)->nr_frags; | |
3014 | header_len = skb_headlen(skb); | |
3015 | ||
3016 | if (num_frags < tx_ring->sgl_size) | |
3017 | return 0; | |
3018 | ||
3019 | if ((num_frags == tx_ring->sgl_size) && | |
3020 | (header_len < tx_ring->tx_max_header_size)) | |
3021 | return 0; | |
3022 | ||
89dd735e | 3023 | ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp); |
1738cd3e NB |
3024 | |
3025 | rc = skb_linearize(skb); | |
3026 | if (unlikely(rc)) { | |
89dd735e SA |
3027 | ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1, |
3028 | &tx_ring->syncp); | |
1738cd3e NB |
3029 | } |
3030 | ||
3031 | return rc; | |
3032 | } | |
3033 | ||
38005ca8 AK |
3034 | static int ena_tx_map_skb(struct ena_ring *tx_ring, |
3035 | struct ena_tx_buffer *tx_info, | |
3036 | struct sk_buff *skb, | |
3037 | void **push_hdr, | |
3038 | u16 *header_len) | |
1738cd3e | 3039 | { |
38005ca8 | 3040 | struct ena_adapter *adapter = tx_ring->adapter; |
1738cd3e | 3041 | struct ena_com_buf *ena_buf; |
1738cd3e | 3042 | dma_addr_t dma; |
38005ca8 AK |
3043 | u32 skb_head_len, frag_len, last_frag; |
3044 | u16 push_len = 0; | |
3045 | u16 delta = 0; | |
3046 | int i = 0; | |
1738cd3e | 3047 | |
38005ca8 | 3048 | skb_head_len = skb_headlen(skb); |
1738cd3e | 3049 | tx_info->skb = skb; |
38005ca8 | 3050 | ena_buf = tx_info->bufs; |
1738cd3e NB |
3051 | |
3052 | if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | |
38005ca8 AK |
3053 | /* When the device is LLQ mode, the driver will copy |
3054 | * the header into the device memory space. | |
3055 | * the ena_com layer assume the header is in a linear | |
3056 | * memory space. | |
3057 | * This assumption might be wrong since part of the header | |
3058 | * can be in the fragmented buffers. | |
3059 | * Use skb_header_pointer to make sure the header is in a | |
3060 | * linear memory space. | |
3061 | */ | |
3062 | ||
3063 | push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); | |
3064 | *push_hdr = skb_header_pointer(skb, 0, push_len, | |
3065 | tx_ring->push_buf_intermediate_buf); | |
3066 | *header_len = push_len; | |
3067 | if (unlikely(skb->data != *push_hdr)) { | |
89dd735e SA |
3068 | ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1, |
3069 | &tx_ring->syncp); | |
38005ca8 AK |
3070 | |
3071 | delta = push_len - skb_head_len; | |
3072 | } | |
1738cd3e | 3073 | } else { |
38005ca8 AK |
3074 | *push_hdr = NULL; |
3075 | *header_len = min_t(u32, skb_head_len, | |
3076 | tx_ring->tx_max_header_size); | |
1738cd3e NB |
3077 | } |
3078 | ||
38005ca8 | 3079 | netif_dbg(adapter, tx_queued, adapter->netdev, |
1738cd3e | 3080 | "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, |
38005ca8 | 3081 | *push_hdr, push_len); |
1738cd3e | 3082 | |
38005ca8 | 3083 | if (skb_head_len > push_len) { |
1738cd3e | 3084 | dma = dma_map_single(tx_ring->dev, skb->data + push_len, |
38005ca8 AK |
3085 | skb_head_len - push_len, DMA_TO_DEVICE); |
3086 | if (unlikely(dma_mapping_error(tx_ring->dev, dma))) | |
1738cd3e NB |
3087 | goto error_report_dma_error; |
3088 | ||
3089 | ena_buf->paddr = dma; | |
38005ca8 | 3090 | ena_buf->len = skb_head_len - push_len; |
1738cd3e NB |
3091 | |
3092 | ena_buf++; | |
3093 | tx_info->num_of_bufs++; | |
38005ca8 AK |
3094 | tx_info->map_linear_data = 1; |
3095 | } else { | |
3096 | tx_info->map_linear_data = 0; | |
1738cd3e NB |
3097 | } |
3098 | ||
3099 | last_frag = skb_shinfo(skb)->nr_frags; | |
3100 | ||
3101 | for (i = 0; i < last_frag; i++) { | |
3102 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
3103 | ||
38005ca8 AK |
3104 | frag_len = skb_frag_size(frag); |
3105 | ||
3106 | if (unlikely(delta >= frag_len)) { | |
3107 | delta -= frag_len; | |
3108 | continue; | |
3109 | } | |
3110 | ||
3111 | dma = skb_frag_dma_map(tx_ring->dev, frag, delta, | |
3112 | frag_len - delta, DMA_TO_DEVICE); | |
3113 | if (unlikely(dma_mapping_error(tx_ring->dev, dma))) | |
1738cd3e NB |
3114 | goto error_report_dma_error; |
3115 | ||
3116 | ena_buf->paddr = dma; | |
38005ca8 | 3117 | ena_buf->len = frag_len - delta; |
1738cd3e | 3118 | ena_buf++; |
38005ca8 AK |
3119 | tx_info->num_of_bufs++; |
3120 | delta = 0; | |
1738cd3e NB |
3121 | } |
3122 | ||
38005ca8 AK |
3123 | return 0; |
3124 | ||
3125 | error_report_dma_error: | |
89dd735e SA |
3126 | ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, |
3127 | &tx_ring->syncp); | |
bf2746e8 | 3128 | netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n"); |
38005ca8 AK |
3129 | |
3130 | tx_info->skb = NULL; | |
3131 | ||
3132 | tx_info->num_of_bufs += i; | |
548c4940 | 3133 | ena_unmap_tx_buff(tx_ring, tx_info); |
38005ca8 AK |
3134 | |
3135 | return -EINVAL; | |
3136 | } | |
3137 | ||
3138 | /* Called with netif_tx_lock. */ | |
3139 | static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
3140 | { | |
3141 | struct ena_adapter *adapter = netdev_priv(dev); | |
3142 | struct ena_tx_buffer *tx_info; | |
3143 | struct ena_com_tx_ctx ena_tx_ctx; | |
3144 | struct ena_ring *tx_ring; | |
3145 | struct netdev_queue *txq; | |
3146 | void *push_hdr; | |
3147 | u16 next_to_use, req_id, header_len; | |
548c4940 | 3148 | int qid, rc; |
38005ca8 AK |
3149 | |
3150 | netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); | |
3151 | /* Determine which tx ring we will be placed on */ | |
3152 | qid = skb_get_queue_mapping(skb); | |
3153 | tx_ring = &adapter->tx_ring[qid]; | |
3154 | txq = netdev_get_tx_queue(dev, qid); | |
3155 | ||
3156 | rc = ena_check_and_linearize_skb(tx_ring, skb); | |
3157 | if (unlikely(rc)) | |
3158 | goto error_drop_packet; | |
3159 | ||
3160 | skb_tx_timestamp(skb); | |
3161 | ||
3162 | next_to_use = tx_ring->next_to_use; | |
f9172498 | 3163 | req_id = tx_ring->free_ids[next_to_use]; |
38005ca8 AK |
3164 | tx_info = &tx_ring->tx_buffer_info[req_id]; |
3165 | tx_info->num_of_bufs = 0; | |
3166 | ||
3167 | WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); | |
3168 | ||
3169 | rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); | |
3170 | if (unlikely(rc)) | |
3171 | goto error_drop_packet; | |
1738cd3e NB |
3172 | |
3173 | memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); | |
3174 | ena_tx_ctx.ena_bufs = tx_info->bufs; | |
3175 | ena_tx_ctx.push_header = push_hdr; | |
3176 | ena_tx_ctx.num_bufs = tx_info->num_of_bufs; | |
3177 | ena_tx_ctx.req_id = req_id; | |
3178 | ena_tx_ctx.header_len = header_len; | |
3179 | ||
3180 | /* set flags and meta data */ | |
0e3a3f6d | 3181 | ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); |
1738cd3e | 3182 | |
548c4940 SJ |
3183 | rc = ena_xmit_common(dev, |
3184 | tx_ring, | |
3185 | tx_info, | |
3186 | &ena_tx_ctx, | |
3187 | next_to_use, | |
3188 | skb->len); | |
3189 | if (rc) | |
1738cd3e | 3190 | goto error_unmap_dma; |
1738cd3e NB |
3191 | |
3192 | netdev_tx_sent_queue(txq, skb->len); | |
3193 | ||
1738cd3e NB |
3194 | /* stop the queue when no more space available, the packet can have up |
3195 | * to sgl_size + 2. one for the meta descriptor and one for header | |
3196 | * (if the header is larger than tx_max_header_size). | |
3197 | */ | |
689b2bda AK |
3198 | if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, |
3199 | tx_ring->sgl_size + 2))) { | |
1738cd3e NB |
3200 | netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", |
3201 | __func__, qid); | |
3202 | ||
3203 | netif_tx_stop_queue(txq); | |
89dd735e SA |
3204 | ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1, |
3205 | &tx_ring->syncp); | |
1738cd3e NB |
3206 | |
3207 | /* There is a rare condition where this function decide to | |
3208 | * stop the queue but meanwhile clean_tx_irq updates | |
3209 | * next_to_completion and terminates. | |
3210 | * The queue will remain stopped forever. | |
37dff155 NB |
3211 | * To solve this issue add a mb() to make sure that |
3212 | * netif_tx_stop_queue() write is vissible before checking if | |
3213 | * there is additional space in the queue. | |
1738cd3e | 3214 | */ |
37dff155 | 3215 | smp_mb(); |
1738cd3e | 3216 | |
689b2bda AK |
3217 | if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, |
3218 | ENA_TX_WAKEUP_THRESH)) { | |
1738cd3e | 3219 | netif_tx_wake_queue(txq); |
89dd735e SA |
3220 | ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, |
3221 | &tx_ring->syncp); | |
1738cd3e NB |
3222 | } |
3223 | } | |
3224 | ||
9e8afb05 SA |
3225 | if (netif_xmit_stopped(txq) || !netdev_xmit_more()) |
3226 | /* trigger the dma engine. ena_ring_tx_doorbell() | |
3227 | * calls a memory barrier inside it. | |
37dff155 | 3228 | */ |
9e8afb05 | 3229 | ena_ring_tx_doorbell(tx_ring); |
1738cd3e NB |
3230 | |
3231 | return NETDEV_TX_OK; | |
3232 | ||
1738cd3e | 3233 | error_unmap_dma: |
548c4940 | 3234 | ena_unmap_tx_buff(tx_ring, tx_info); |
38005ca8 | 3235 | tx_info->skb = NULL; |
1738cd3e NB |
3236 | |
3237 | error_drop_packet: | |
1738cd3e NB |
3238 | dev_kfree_skb(skb); |
3239 | return NETDEV_TX_OK; | |
3240 | } | |
3241 | ||
1738cd3e | 3242 | static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, |
a350ecce | 3243 | struct net_device *sb_dev) |
1738cd3e NB |
3244 | { |
3245 | u16 qid; | |
3246 | /* we suspect that this is good for in--kernel network services that | |
3247 | * want to loop incoming skb rx to tx in normal user generated traffic, | |
3248 | * most probably we will not get to this | |
3249 | */ | |
3250 | if (skb_rx_queue_recorded(skb)) | |
3251 | qid = skb_get_rx_queue(skb); | |
3252 | else | |
a350ecce | 3253 | qid = netdev_pick_tx(dev, skb, NULL); |
1738cd3e NB |
3254 | |
3255 | return qid; | |
3256 | } | |
3257 | ||
46143e58 | 3258 | static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev) |
1738cd3e | 3259 | { |
f0525298 | 3260 | struct device *dev = &pdev->dev; |
1738cd3e NB |
3261 | struct ena_admin_host_info *host_info; |
3262 | int rc; | |
3263 | ||
3264 | /* Allocate only the host info */ | |
3265 | rc = ena_com_allocate_host_info(ena_dev); | |
3266 | if (rc) { | |
f0525298 | 3267 | dev_err(dev, "Cannot allocate host info\n"); |
1738cd3e NB |
3268 | return; |
3269 | } | |
3270 | ||
3271 | host_info = ena_dev->host_attr.host_info; | |
3272 | ||
a5e5b2cd | 3273 | host_info->bdf = pci_dev_id(pdev); |
1738cd3e NB |
3274 | host_info->os_type = ENA_ADMIN_OS_LINUX; |
3275 | host_info->kernel_ver = LINUX_VERSION_CODE; | |
f029c781 | 3276 | strscpy(host_info->kernel_ver_str, utsname()->version, |
1738cd3e NB |
3277 | sizeof(host_info->kernel_ver_str) - 1); |
3278 | host_info->os_dist = 0; | |
3279 | strncpy(host_info->os_dist_str, utsname()->release, | |
3280 | sizeof(host_info->os_dist_str) - 1); | |
92040c6d AK |
3281 | host_info->driver_version = |
3282 | (DRV_MODULE_GEN_MAJOR) | | |
3283 | (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | | |
3284 | (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) | | |
3285 | ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT); | |
095f2f1f | 3286 | host_info->num_cpus = num_online_cpus(); |
1738cd3e | 3287 | |
bd21b0cc | 3288 | host_info->driver_supported_features = |
68f236df | 3289 | ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | |
0f505c60 | 3290 | ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK | |
0ee60edf | 3291 | ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK | |
f7d625ad DA |
3292 | ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK | |
3293 | ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK; | |
bd21b0cc | 3294 | |
1738cd3e NB |
3295 | rc = ena_com_set_host_attributes(ena_dev); |
3296 | if (rc) { | |
d1497638 | 3297 | if (rc == -EOPNOTSUPP) |
f0525298 | 3298 | dev_warn(dev, "Cannot set host attributes\n"); |
1738cd3e | 3299 | else |
f0525298 | 3300 | dev_err(dev, "Cannot set host attributes\n"); |
1738cd3e NB |
3301 | |
3302 | goto err; | |
3303 | } | |
3304 | ||
3305 | return; | |
3306 | ||
3307 | err: | |
3308 | ena_com_delete_host_info(ena_dev); | |
3309 | } | |
3310 | ||
3311 | static void ena_config_debug_area(struct ena_adapter *adapter) | |
3312 | { | |
3313 | u32 debug_area_size; | |
3314 | int rc, ss_count; | |
3315 | ||
3316 | ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS); | |
3317 | if (ss_count <= 0) { | |
3318 | netif_err(adapter, drv, adapter->netdev, | |
3319 | "SS count is negative\n"); | |
3320 | return; | |
3321 | } | |
3322 | ||
3323 | /* allocate 32 bytes for each string and 64bit for the value */ | |
3324 | debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; | |
3325 | ||
3326 | rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size); | |
3327 | if (rc) { | |
f0525298 SA |
3328 | netif_err(adapter, drv, adapter->netdev, |
3329 | "Cannot allocate debug area\n"); | |
1738cd3e NB |
3330 | return; |
3331 | } | |
3332 | ||
3333 | rc = ena_com_set_host_attributes(adapter->ena_dev); | |
3334 | if (rc) { | |
d1497638 | 3335 | if (rc == -EOPNOTSUPP) |
1738cd3e NB |
3336 | netif_warn(adapter, drv, adapter->netdev, |
3337 | "Cannot set host attributes\n"); | |
3338 | else | |
3339 | netif_err(adapter, drv, adapter->netdev, | |
3340 | "Cannot set host attributes\n"); | |
3341 | goto err; | |
3342 | } | |
3343 | ||
3344 | return; | |
3345 | err: | |
3346 | ena_com_delete_debug_area(adapter->ena_dev); | |
3347 | } | |
3348 | ||
713865da SJ |
3349 | int ena_update_hw_stats(struct ena_adapter *adapter) |
3350 | { | |
394c48e0 | 3351 | int rc; |
713865da SJ |
3352 | |
3353 | rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats); | |
3354 | if (rc) { | |
394c48e0 | 3355 | netdev_err(adapter->netdev, "Failed to get ENI stats\n"); |
713865da SJ |
3356 | return rc; |
3357 | } | |
3358 | ||
3359 | return 0; | |
3360 | } | |
3361 | ||
bc1f4470 | 3362 | static void ena_get_stats64(struct net_device *netdev, |
3363 | struct rtnl_link_stats64 *stats) | |
1738cd3e NB |
3364 | { |
3365 | struct ena_adapter *adapter = netdev_priv(netdev); | |
d81db240 NB |
3366 | struct ena_ring *rx_ring, *tx_ring; |
3367 | unsigned int start; | |
3368 | u64 rx_drops; | |
5c665f8c | 3369 | u64 tx_drops; |
d81db240 | 3370 | int i; |
1738cd3e NB |
3371 | |
3372 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | |
bc1f4470 | 3373 | return; |
1738cd3e | 3374 | |
faa615f9 | 3375 | for (i = 0; i < adapter->num_io_queues; i++) { |
d81db240 NB |
3376 | u64 bytes, packets; |
3377 | ||
3378 | tx_ring = &adapter->tx_ring[i]; | |
1738cd3e | 3379 | |
d81db240 | 3380 | do { |
068c38ad | 3381 | start = u64_stats_fetch_begin(&tx_ring->syncp); |
d81db240 NB |
3382 | packets = tx_ring->tx_stats.cnt; |
3383 | bytes = tx_ring->tx_stats.bytes; | |
068c38ad | 3384 | } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); |
1738cd3e | 3385 | |
d81db240 NB |
3386 | stats->tx_packets += packets; |
3387 | stats->tx_bytes += bytes; | |
3388 | ||
3389 | rx_ring = &adapter->rx_ring[i]; | |
3390 | ||
3391 | do { | |
068c38ad | 3392 | start = u64_stats_fetch_begin(&rx_ring->syncp); |
d81db240 NB |
3393 | packets = rx_ring->rx_stats.cnt; |
3394 | bytes = rx_ring->rx_stats.bytes; | |
068c38ad | 3395 | } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); |
d81db240 NB |
3396 | |
3397 | stats->rx_packets += packets; | |
3398 | stats->rx_bytes += bytes; | |
3399 | } | |
3400 | ||
3401 | do { | |
068c38ad | 3402 | start = u64_stats_fetch_begin(&adapter->syncp); |
d81db240 | 3403 | rx_drops = adapter->dev_stats.rx_drops; |
5c665f8c | 3404 | tx_drops = adapter->dev_stats.tx_drops; |
068c38ad | 3405 | } while (u64_stats_fetch_retry(&adapter->syncp, start)); |
1738cd3e | 3406 | |
d81db240 | 3407 | stats->rx_dropped = rx_drops; |
5c665f8c | 3408 | stats->tx_dropped = tx_drops; |
1738cd3e NB |
3409 | |
3410 | stats->multicast = 0; | |
3411 | stats->collisions = 0; | |
3412 | ||
3413 | stats->rx_length_errors = 0; | |
3414 | stats->rx_crc_errors = 0; | |
3415 | stats->rx_frame_errors = 0; | |
3416 | stats->rx_fifo_errors = 0; | |
3417 | stats->rx_missed_errors = 0; | |
3418 | stats->tx_window_errors = 0; | |
3419 | ||
3420 | stats->rx_errors = 0; | |
3421 | stats->tx_errors = 0; | |
1738cd3e NB |
3422 | } |
3423 | ||
3424 | static const struct net_device_ops ena_netdev_ops = { | |
3425 | .ndo_open = ena_open, | |
3426 | .ndo_stop = ena_close, | |
3427 | .ndo_start_xmit = ena_start_xmit, | |
3428 | .ndo_select_queue = ena_select_queue, | |
3429 | .ndo_get_stats64 = ena_get_stats64, | |
3430 | .ndo_tx_timeout = ena_tx_timeout, | |
3431 | .ndo_change_mtu = ena_change_mtu, | |
3432 | .ndo_set_mac_address = NULL, | |
3433 | .ndo_validate_addr = eth_validate_addr, | |
838c93dc | 3434 | .ndo_bpf = ena_xdp, |
f1a25589 | 3435 | .ndo_xdp_xmit = ena_xdp_xmit, |
1738cd3e NB |
3436 | }; |
3437 | ||
3a091084 SA |
3438 | static void ena_calc_io_queue_size(struct ena_adapter *adapter, |
3439 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | |
3440 | { | |
3441 | struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq; | |
3442 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
3443 | u32 tx_queue_size = ENA_DEFAULT_RING_SIZE; | |
3444 | u32 rx_queue_size = ENA_DEFAULT_RING_SIZE; | |
3445 | u32 max_tx_queue_size; | |
3446 | u32 max_rx_queue_size; | |
3447 | ||
1e366688 DA |
3448 | /* If this function is called after driver load, the ring sizes have already |
3449 | * been configured. Take it into account when recalculating ring size. | |
3450 | */ | |
3451 | if (adapter->tx_ring->ring_size) | |
3452 | tx_queue_size = adapter->tx_ring->ring_size; | |
3453 | ||
3454 | if (adapter->rx_ring->ring_size) | |
3455 | rx_queue_size = adapter->rx_ring->ring_size; | |
3456 | ||
3a091084 SA |
3457 | if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { |
3458 | struct ena_admin_queue_ext_feature_fields *max_queue_ext = | |
3459 | &get_feat_ctx->max_queue_ext.max_queue_ext; | |
3460 | max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, | |
3461 | max_queue_ext->max_rx_sq_depth); | |
3462 | max_tx_queue_size = max_queue_ext->max_tx_cq_depth; | |
3463 | ||
3464 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) | |
3465 | max_tx_queue_size = min_t(u32, max_tx_queue_size, | |
3466 | llq->max_llq_depth); | |
3467 | else | |
3468 | max_tx_queue_size = min_t(u32, max_tx_queue_size, | |
3469 | max_queue_ext->max_tx_sq_depth); | |
3470 | ||
3471 | adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, | |
3472 | max_queue_ext->max_per_packet_tx_descs); | |
3473 | adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, | |
3474 | max_queue_ext->max_per_packet_rx_descs); | |
3475 | } else { | |
3476 | struct ena_admin_queue_feature_desc *max_queues = | |
3477 | &get_feat_ctx->max_queues; | |
3478 | max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, | |
3479 | max_queues->max_sq_depth); | |
3480 | max_tx_queue_size = max_queues->max_cq_depth; | |
3481 | ||
3482 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) | |
3483 | max_tx_queue_size = min_t(u32, max_tx_queue_size, | |
3484 | llq->max_llq_depth); | |
3485 | else | |
3486 | max_tx_queue_size = min_t(u32, max_tx_queue_size, | |
3487 | max_queues->max_sq_depth); | |
3488 | ||
3489 | adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, | |
3490 | max_queues->max_packet_tx_descs); | |
3491 | adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, | |
3492 | max_queues->max_packet_rx_descs); | |
3493 | } | |
3494 | ||
3495 | max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size); | |
3496 | max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size); | |
3497 | ||
1e366688 DA |
3498 | /* When forcing large headers, we multiply the entry size by 2, and therefore divide |
3499 | * the queue size by 2, leaving the amount of memory used by the queues unchanged. | |
3500 | */ | |
3501 | if (adapter->large_llq_header_enabled) { | |
3502 | if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && | |
3503 | ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | |
3504 | max_tx_queue_size /= 2; | |
3505 | dev_info(&adapter->pdev->dev, | |
3506 | "Forcing large headers and decreasing maximum TX queue size to %d\n", | |
3507 | max_tx_queue_size); | |
3508 | } else { | |
3509 | dev_err(&adapter->pdev->dev, | |
3510 | "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); | |
3511 | ||
3512 | adapter->large_llq_header_enabled = false; | |
3513 | } | |
3514 | } | |
3515 | ||
3a091084 SA |
3516 | tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, |
3517 | max_tx_queue_size); | |
3518 | rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, | |
3519 | max_rx_queue_size); | |
3520 | ||
3521 | tx_queue_size = rounddown_pow_of_two(tx_queue_size); | |
3522 | rx_queue_size = rounddown_pow_of_two(rx_queue_size); | |
3523 | ||
3524 | adapter->max_tx_ring_size = max_tx_queue_size; | |
3525 | adapter->max_rx_ring_size = max_rx_queue_size; | |
3526 | adapter->requested_tx_ring_size = tx_queue_size; | |
3527 | adapter->requested_rx_ring_size = rx_queue_size; | |
3528 | } | |
3529 | ||
1738cd3e NB |
3530 | static int ena_device_validate_params(struct ena_adapter *adapter, |
3531 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | |
3532 | { | |
3533 | struct net_device *netdev = adapter->netdev; | |
3534 | int rc; | |
3535 | ||
3536 | rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr, | |
3537 | adapter->mac_addr); | |
3538 | if (!rc) { | |
3539 | netif_err(adapter, drv, netdev, | |
3540 | "Error, mac address are different\n"); | |
3541 | return -EINVAL; | |
3542 | } | |
3543 | ||
1738cd3e NB |
3544 | if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { |
3545 | netif_err(adapter, drv, netdev, | |
3546 | "Error, device max mtu is smaller than netdev MTU\n"); | |
3547 | return -EINVAL; | |
3548 | } | |
3549 | ||
3550 | return 0; | |
3551 | } | |
3552 | ||
1e366688 DA |
3553 | static void set_default_llq_configurations(struct ena_adapter *adapter, |
3554 | struct ena_llq_configurations *llq_config, | |
3555 | struct ena_admin_feature_llq_desc *llq) | |
c29efeae | 3556 | { |
1e366688 DA |
3557 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
3558 | ||
c29efeae AK |
3559 | llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; |
3560 | llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; | |
3561 | llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; | |
1e366688 DA |
3562 | |
3563 | adapter->large_llq_header_supported = | |
3564 | !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ)); | |
3565 | adapter->large_llq_header_supported &= | |
3566 | !!(llq->entry_size_ctrl_supported & | |
3567 | ENA_ADMIN_LIST_ENTRY_SIZE_256B); | |
3568 | ||
3569 | if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && | |
3570 | adapter->large_llq_header_enabled) { | |
3571 | llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B; | |
3572 | llq_config->llq_ring_entry_size_value = 256; | |
3573 | } else { | |
3574 | llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; | |
3575 | llq_config->llq_ring_entry_size_value = 128; | |
3576 | } | |
c29efeae AK |
3577 | } |
3578 | ||
3579 | static int ena_set_queues_placement_policy(struct pci_dev *pdev, | |
3580 | struct ena_com_dev *ena_dev, | |
3581 | struct ena_admin_feature_llq_desc *llq, | |
3582 | struct ena_llq_configurations *llq_default_configurations) | |
3583 | { | |
3584 | int rc; | |
3585 | u32 llq_feature_mask; | |
3586 | ||
3587 | llq_feature_mask = 1 << ENA_ADMIN_LLQ; | |
3588 | if (!(ena_dev->supported_features & llq_feature_mask)) { | |
15efff76 | 3589 | dev_warn(&pdev->dev, |
c29efeae AK |
3590 | "LLQ is not supported Fallback to host mode policy.\n"); |
3591 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
3592 | return 0; | |
3593 | } | |
3594 | ||
1e366688 DA |
3595 | if (!ena_dev->mem_bar) { |
3596 | netdev_err(ena_dev->net_device, | |
3597 | "LLQ is advertised as supported but device doesn't expose mem bar\n"); | |
3598 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
3599 | return 0; | |
3600 | } | |
3601 | ||
c29efeae AK |
3602 | rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); |
3603 | if (unlikely(rc)) { | |
3604 | dev_err(&pdev->dev, | |
3605 | "Failed to configure the device mode. Fallback to host mode policy.\n"); | |
3606 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
3607 | } | |
3608 | ||
3609 | return 0; | |
3610 | } | |
3611 | ||
3612 | static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev, | |
3613 | int bars) | |
3614 | { | |
3615 | bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR)); | |
3616 | ||
1e366688 | 3617 | if (!has_mem_bar) |
c29efeae | 3618 | return 0; |
c29efeae AK |
3619 | |
3620 | ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, | |
3621 | pci_resource_start(pdev, ENA_MEM_BAR), | |
3622 | pci_resource_len(pdev, ENA_MEM_BAR)); | |
3623 | ||
3624 | if (!ena_dev->mem_bar) | |
3625 | return -EFAULT; | |
3626 | ||
3627 | return 0; | |
3628 | } | |
3629 | ||
1e366688 | 3630 | static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev, |
1738cd3e NB |
3631 | struct ena_com_dev_get_features_ctx *get_feat_ctx, |
3632 | bool *wd_state) | |
3633 | { | |
1e366688 | 3634 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
c29efeae | 3635 | struct ena_llq_configurations llq_config; |
1738cd3e NB |
3636 | struct device *dev = &pdev->dev; |
3637 | bool readless_supported; | |
3638 | u32 aenq_groups; | |
3639 | int dma_width; | |
3640 | int rc; | |
3641 | ||
3642 | rc = ena_com_mmio_reg_read_request_init(ena_dev); | |
3643 | if (rc) { | |
bf2746e8 | 3644 | dev_err(dev, "Failed to init mmio read less\n"); |
1738cd3e NB |
3645 | return rc; |
3646 | } | |
3647 | ||
3648 | /* The PCIe configuration space revision id indicate if mmio reg | |
3649 | * read is disabled | |
3650 | */ | |
3651 | readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); | |
3652 | ena_com_set_mmio_read_mode(ena_dev, readless_supported); | |
3653 | ||
e2eed0e3 | 3654 | rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); |
1738cd3e NB |
3655 | if (rc) { |
3656 | dev_err(dev, "Can not reset device\n"); | |
3657 | goto err_mmio_read_less; | |
3658 | } | |
3659 | ||
3660 | rc = ena_com_validate_version(ena_dev); | |
3661 | if (rc) { | |
bf2746e8 | 3662 | dev_err(dev, "Device version is too low\n"); |
1738cd3e NB |
3663 | goto err_mmio_read_less; |
3664 | } | |
3665 | ||
3666 | dma_width = ena_com_get_dma_width(ena_dev); | |
3667 | if (dma_width < 0) { | |
3668 | dev_err(dev, "Invalid dma width value %d", dma_width); | |
6e22066f | 3669 | rc = dma_width; |
1738cd3e NB |
3670 | goto err_mmio_read_less; |
3671 | } | |
3672 | ||
09323b3b | 3673 | rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width)); |
1738cd3e | 3674 | if (rc) { |
09323b3b | 3675 | dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc); |
1738cd3e NB |
3676 | goto err_mmio_read_less; |
3677 | } | |
3678 | ||
3679 | /* ENA admin level init */ | |
f1e90f6e | 3680 | rc = ena_com_admin_init(ena_dev, &aenq_handlers); |
1738cd3e NB |
3681 | if (rc) { |
3682 | dev_err(dev, | |
3683 | "Can not initialize ena admin queue with device\n"); | |
3684 | goto err_mmio_read_less; | |
3685 | } | |
3686 | ||
3687 | /* To enable the msix interrupts the driver needs to know the number | |
3688 | * of queues. So the driver uses polling mode to retrieve this | |
3689 | * information | |
3690 | */ | |
3691 | ena_com_set_admin_polling_mode(ena_dev, true); | |
3692 | ||
095f2f1f | 3693 | ena_config_host_info(ena_dev, pdev); |
dd8427a7 | 3694 | |
1738cd3e NB |
3695 | /* Get Device Attributes*/ |
3696 | rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); | |
3697 | if (rc) { | |
3698 | dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc); | |
3699 | goto err_admin_init; | |
3700 | } | |
3701 | ||
3702 | /* Try to turn all the available aenq groups */ | |
3703 | aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | | |
3704 | BIT(ENA_ADMIN_FATAL_ERROR) | | |
3705 | BIT(ENA_ADMIN_WARNING) | | |
3706 | BIT(ENA_ADMIN_NOTIFICATION) | | |
3707 | BIT(ENA_ADMIN_KEEP_ALIVE); | |
3708 | ||
3709 | aenq_groups &= get_feat_ctx->aenq.supported_groups; | |
3710 | ||
3711 | rc = ena_com_set_aenq_config(ena_dev, aenq_groups); | |
3712 | if (rc) { | |
3713 | dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc); | |
3714 | goto err_admin_init; | |
3715 | } | |
3716 | ||
3717 | *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); | |
3718 | ||
1e366688 | 3719 | set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq); |
c29efeae AK |
3720 | |
3721 | rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, | |
3722 | &llq_config); | |
3723 | if (rc) { | |
bf2746e8 | 3724 | dev_err(dev, "ENA device init failed\n"); |
c29efeae AK |
3725 | goto err_admin_init; |
3726 | } | |
3727 | ||
1e366688 DA |
3728 | ena_calc_io_queue_size(adapter, get_feat_ctx); |
3729 | ||
1738cd3e NB |
3730 | return 0; |
3731 | ||
3732 | err_admin_init: | |
dd8427a7 | 3733 | ena_com_delete_host_info(ena_dev); |
1738cd3e NB |
3734 | ena_com_admin_destroy(ena_dev); |
3735 | err_mmio_read_less: | |
3736 | ena_com_mmio_reg_read_request_destroy(ena_dev); | |
3737 | ||
3738 | return rc; | |
3739 | } | |
3740 | ||
4d192660 | 3741 | static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) |
1738cd3e NB |
3742 | { |
3743 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
3744 | struct device *dev = &adapter->pdev->dev; | |
3745 | int rc; | |
3746 | ||
4d192660 | 3747 | rc = ena_enable_msix(adapter); |
1738cd3e NB |
3748 | if (rc) { |
3749 | dev_err(dev, "Can not reserve msix vectors\n"); | |
3750 | return rc; | |
3751 | } | |
3752 | ||
3753 | ena_setup_mgmnt_intr(adapter); | |
3754 | ||
3755 | rc = ena_request_mgmnt_irq(adapter); | |
3756 | if (rc) { | |
3757 | dev_err(dev, "Can not setup management interrupts\n"); | |
3758 | goto err_disable_msix; | |
3759 | } | |
3760 | ||
3761 | ena_com_set_admin_polling_mode(ena_dev, false); | |
3762 | ||
3763 | ena_com_admin_aenq_enable(ena_dev); | |
3764 | ||
3765 | return 0; | |
3766 | ||
3767 | err_disable_msix: | |
06443684 NB |
3768 | ena_disable_msix(adapter); |
3769 | ||
1738cd3e NB |
3770 | return rc; |
3771 | } | |
3772 | ||
cfa324a5 | 3773 | static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) |
1738cd3e | 3774 | { |
1738cd3e NB |
3775 | struct net_device *netdev = adapter->netdev; |
3776 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
8c5c7abd | 3777 | bool dev_up; |
3f6159db | 3778 | |
fe870c77 NB |
3779 | if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) |
3780 | return; | |
3781 | ||
3f6159db NB |
3782 | netif_carrier_off(netdev); |
3783 | ||
1738cd3e NB |
3784 | del_timer_sync(&adapter->timer_service); |
3785 | ||
1738cd3e | 3786 | dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); |
8c5c7abd | 3787 | adapter->dev_up_before_reset = dev_up; |
cfa324a5 NB |
3788 | if (!graceful) |
3789 | ena_com_set_admin_running_state(ena_dev, false); | |
1738cd3e | 3790 | |
ee4552aa NB |
3791 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
3792 | ena_down(adapter); | |
1738cd3e | 3793 | |
bd791175 | 3794 | /* Stop the device from sending AENQ events (in case reset flag is set |
58a54b9c | 3795 | * and device is up, ena_down() already reset the device. |
8c5c7abd NB |
3796 | */ |
3797 | if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) | |
3798 | ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); | |
3799 | ||
1738cd3e NB |
3800 | ena_free_mgmnt_irq(adapter); |
3801 | ||
06443684 | 3802 | ena_disable_msix(adapter); |
1738cd3e NB |
3803 | |
3804 | ena_com_abort_admin_commands(ena_dev); | |
3805 | ||
3806 | ena_com_wait_for_abort_completion(ena_dev); | |
3807 | ||
3808 | ena_com_admin_destroy(ena_dev); | |
3809 | ||
3810 | ena_com_mmio_reg_read_request_destroy(ena_dev); | |
3811 | ||
c1c0e40b | 3812 | /* return reset reason to default value */ |
e2eed0e3 | 3813 | adapter->reset_reason = ENA_REGS_RESET_NORMAL; |
8c5c7abd | 3814 | |
3f6159db | 3815 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
fe870c77 | 3816 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
8c5c7abd | 3817 | } |
3f6159db | 3818 | |
8c5c7abd NB |
3819 | static int ena_restore_device(struct ena_adapter *adapter) |
3820 | { | |
3821 | struct ena_com_dev_get_features_ctx get_feat_ctx; | |
3822 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
3823 | struct pci_dev *pdev = adapter->pdev; | |
a416cb25 SA |
3824 | struct ena_ring *txr; |
3825 | int rc, count, i; | |
8c5c7abd | 3826 | bool wd_state; |
1738cd3e | 3827 | |
d18e4f68 | 3828 | set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); |
1e366688 | 3829 | rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, &wd_state); |
1738cd3e NB |
3830 | if (rc) { |
3831 | dev_err(&pdev->dev, "Can not initialize device\n"); | |
3832 | goto err; | |
3833 | } | |
3834 | adapter->wd_state = wd_state; | |
3835 | ||
a416cb25 SA |
3836 | count = adapter->xdp_num_queues + adapter->num_io_queues; |
3837 | for (i = 0 ; i < count; i++) { | |
3838 | txr = &adapter->tx_ring[i]; | |
3839 | txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; | |
3840 | txr->tx_max_header_size = ena_dev->tx_max_header_size; | |
3841 | } | |
3842 | ||
1738cd3e NB |
3843 | rc = ena_device_validate_params(adapter, &get_feat_ctx); |
3844 | if (rc) { | |
3845 | dev_err(&pdev->dev, "Validation of device parameters failed\n"); | |
3846 | goto err_device_destroy; | |
3847 | } | |
3848 | ||
4d192660 | 3849 | rc = ena_enable_msix_and_set_admin_interrupts(adapter); |
1738cd3e NB |
3850 | if (rc) { |
3851 | dev_err(&pdev->dev, "Enable MSI-X failed\n"); | |
3852 | goto err_device_destroy; | |
3853 | } | |
3854 | /* If the interface was up before the reset bring it up */ | |
8c5c7abd | 3855 | if (adapter->dev_up_before_reset) { |
1738cd3e NB |
3856 | rc = ena_up(adapter); |
3857 | if (rc) { | |
3858 | dev_err(&pdev->dev, "Failed to create I/O queues\n"); | |
3859 | goto err_disable_msix; | |
3860 | } | |
3861 | } | |
3862 | ||
fe870c77 | 3863 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
e1f1bd9b AK |
3864 | |
3865 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); | |
3866 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) | |
3867 | netif_carrier_on(adapter->netdev); | |
3868 | ||
1738cd3e | 3869 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
dfdde134 | 3870 | adapter->last_keep_alive_jiffies = jiffies; |
1738cd3e | 3871 | |
8c5c7abd | 3872 | return rc; |
1738cd3e NB |
3873 | err_disable_msix: |
3874 | ena_free_mgmnt_irq(adapter); | |
06443684 | 3875 | ena_disable_msix(adapter); |
1738cd3e | 3876 | err_device_destroy: |
d7703ddb AK |
3877 | ena_com_abort_admin_commands(ena_dev); |
3878 | ena_com_wait_for_abort_completion(ena_dev); | |
1738cd3e | 3879 | ena_com_admin_destroy(ena_dev); |
d7703ddb | 3880 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); |
e76ad21d | 3881 | ena_com_mmio_reg_read_request_destroy(ena_dev); |
1738cd3e | 3882 | err: |
22b331c9 | 3883 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
d18e4f68 | 3884 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); |
1738cd3e NB |
3885 | dev_err(&pdev->dev, |
3886 | "Reset attempt failed. Can not reset the device\n"); | |
8c5c7abd NB |
3887 | |
3888 | return rc; | |
3889 | } | |
3890 | ||
3891 | static void ena_fw_reset_device(struct work_struct *work) | |
3892 | { | |
3893 | struct ena_adapter *adapter = | |
3894 | container_of(work, struct ena_adapter, reset_task); | |
8c5c7abd | 3895 | |
8c5c7abd | 3896 | rtnl_lock(); |
63d4a4c1 SA |
3897 | |
3898 | if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { | |
3899 | ena_destroy_device(adapter, false); | |
3900 | ena_restore_device(adapter); | |
e3445469 AK |
3901 | |
3902 | dev_err(&adapter->pdev->dev, "Device reset completed successfully\n"); | |
63d4a4c1 SA |
3903 | } |
3904 | ||
8c5c7abd | 3905 | rtnl_unlock(); |
1738cd3e NB |
3906 | } |
3907 | ||
8510e1a3 NB |
3908 | static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, |
3909 | struct ena_ring *rx_ring) | |
3910 | { | |
e4ac382e SA |
3911 | struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi); |
3912 | ||
3913 | if (likely(READ_ONCE(ena_napi->first_interrupt))) | |
8510e1a3 NB |
3914 | return 0; |
3915 | ||
3916 | if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) | |
3917 | return 0; | |
3918 | ||
3919 | rx_ring->no_interrupt_event_cnt++; | |
3920 | ||
3921 | if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { | |
3922 | netif_err(adapter, rx_err, adapter->netdev, | |
3923 | "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", | |
3924 | rx_ring->qid); | |
9fe890cc AK |
3925 | |
3926 | ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT); | |
8510e1a3 NB |
3927 | return -EIO; |
3928 | } | |
3929 | ||
3930 | return 0; | |
3931 | } | |
3932 | ||
3933 | static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, | |
3934 | struct ena_ring *tx_ring) | |
1738cd3e | 3935 | { |
e4ac382e | 3936 | struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi); |
0ee251cd SA |
3937 | unsigned int time_since_last_napi; |
3938 | unsigned int missing_tx_comp_to; | |
3939 | bool is_tx_comp_time_expired; | |
1738cd3e NB |
3940 | struct ena_tx_buffer *tx_buf; |
3941 | unsigned long last_jiffies; | |
800c55cb | 3942 | u32 missed_tx = 0; |
11095fdb | 3943 | int i, rc = 0; |
800c55cb NB |
3944 | |
3945 | for (i = 0; i < tx_ring->ring_size; i++) { | |
3946 | tx_buf = &tx_ring->tx_buffer_info[i]; | |
3947 | last_jiffies = tx_buf->last_jiffies; | |
8510e1a3 NB |
3948 | |
3949 | if (last_jiffies == 0) | |
3950 | /* no pending Tx at this location */ | |
3951 | continue; | |
3952 | ||
0ee251cd SA |
3953 | is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies + |
3954 | 2 * adapter->missing_tx_completion_to); | |
3955 | ||
3956 | if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) { | |
8510e1a3 NB |
3957 | /* If after graceful period interrupt is still not |
3958 | * received, we schedule a reset | |
3959 | */ | |
3960 | netif_err(adapter, tx_err, adapter->netdev, | |
3961 | "Potential MSIX issue on Tx side Queue = %d. Reset the device\n", | |
3962 | tx_ring->qid); | |
9fe890cc | 3963 | ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT); |
8510e1a3 NB |
3964 | return -EIO; |
3965 | } | |
3966 | ||
0ee251cd SA |
3967 | is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies + |
3968 | adapter->missing_tx_completion_to); | |
3969 | ||
3970 | if (unlikely(is_tx_comp_time_expired)) { | |
3971 | if (!tx_buf->print_once) { | |
3972 | time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); | |
3973 | missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to); | |
800c55cb | 3974 | netif_notice(adapter, tx_err, adapter->netdev, |
0ee251cd SA |
3975 | "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n", |
3976 | tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to); | |
3977 | } | |
800c55cb NB |
3978 | |
3979 | tx_buf->print_once = 1; | |
3980 | missed_tx++; | |
800c55cb NB |
3981 | } |
3982 | } | |
3983 | ||
11095fdb NB |
3984 | if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { |
3985 | netif_err(adapter, tx_err, adapter->netdev, | |
3986 | "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", | |
3987 | missed_tx, | |
3988 | adapter->missing_tx_completion_threshold); | |
9fe890cc | 3989 | ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL); |
11095fdb NB |
3990 | rc = -EIO; |
3991 | } | |
3992 | ||
89dd735e SA |
3993 | ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx, |
3994 | &tx_ring->syncp); | |
11095fdb NB |
3995 | |
3996 | return rc; | |
800c55cb NB |
3997 | } |
3998 | ||
8510e1a3 | 3999 | static void check_for_missing_completions(struct ena_adapter *adapter) |
800c55cb | 4000 | { |
1738cd3e | 4001 | struct ena_ring *tx_ring; |
8510e1a3 | 4002 | struct ena_ring *rx_ring; |
800c55cb | 4003 | int i, budget, rc; |
548c4940 | 4004 | int io_queue_count; |
1738cd3e | 4005 | |
548c4940 | 4006 | io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues; |
1738cd3e NB |
4007 | /* Make sure the driver doesn't turn the device in other process */ |
4008 | smp_rmb(); | |
4009 | ||
4010 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | |
4011 | return; | |
4012 | ||
3f6159db NB |
4013 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) |
4014 | return; | |
4015 | ||
82ef30f1 NB |
4016 | if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) |
4017 | return; | |
4018 | ||
1738cd3e NB |
4019 | budget = ENA_MONITORED_TX_QUEUES; |
4020 | ||
548c4940 | 4021 | for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) { |
1738cd3e | 4022 | tx_ring = &adapter->tx_ring[i]; |
8510e1a3 NB |
4023 | rx_ring = &adapter->rx_ring[i]; |
4024 | ||
4025 | rc = check_missing_comp_in_tx_queue(adapter, tx_ring); | |
4026 | if (unlikely(rc)) | |
4027 | return; | |
1738cd3e | 4028 | |
548c4940 SJ |
4029 | rc = !ENA_IS_XDP_INDEX(adapter, i) ? |
4030 | check_for_rx_interrupt_queue(adapter, rx_ring) : 0; | |
800c55cb NB |
4031 | if (unlikely(rc)) |
4032 | return; | |
1738cd3e NB |
4033 | |
4034 | budget--; | |
4035 | if (!budget) | |
4036 | break; | |
4037 | } | |
4038 | ||
548c4940 | 4039 | adapter->last_monitored_tx_qid = i % io_queue_count; |
1738cd3e NB |
4040 | } |
4041 | ||
a3af7c18 NB |
4042 | /* trigger napi schedule after 2 consecutive detections */ |
4043 | #define EMPTY_RX_REFILL 2 | |
4044 | /* For the rare case where the device runs out of Rx descriptors and the | |
4045 | * napi handler failed to refill new Rx descriptors (due to a lack of memory | |
4046 | * for example). | |
4047 | * This case will lead to a deadlock: | |
4048 | * The device won't send interrupts since all the new Rx packets will be dropped | |
4049 | * The napi handler won't allocate new Rx descriptors so the device will be | |
4050 | * able to send new packets. | |
4051 | * | |
4052 | * This scenario can happen when the kernel's vm.min_free_kbytes is too small. | |
4053 | * It is recommended to have at least 512MB, with a minimum of 128MB for | |
4054 | * constrained environment). | |
4055 | * | |
4056 | * When such a situation is detected - Reschedule napi | |
4057 | */ | |
4058 | static void check_for_empty_rx_ring(struct ena_adapter *adapter) | |
4059 | { | |
4060 | struct ena_ring *rx_ring; | |
4061 | int i, refill_required; | |
4062 | ||
4063 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | |
4064 | return; | |
4065 | ||
4066 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) | |
4067 | return; | |
4068 | ||
faa615f9 | 4069 | for (i = 0; i < adapter->num_io_queues; i++) { |
a3af7c18 NB |
4070 | rx_ring = &adapter->rx_ring[i]; |
4071 | ||
7cfe9a55 | 4072 | refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); |
a3af7c18 NB |
4073 | if (unlikely(refill_required == (rx_ring->ring_size - 1))) { |
4074 | rx_ring->empty_rx_queue++; | |
4075 | ||
4076 | if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { | |
89dd735e SA |
4077 | ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1, |
4078 | &rx_ring->syncp); | |
a3af7c18 NB |
4079 | |
4080 | netif_err(adapter, drv, adapter->netdev, | |
bf2746e8 | 4081 | "Trigger refill for ring %d\n", i); |
a3af7c18 NB |
4082 | |
4083 | napi_schedule(rx_ring->napi); | |
4084 | rx_ring->empty_rx_queue = 0; | |
4085 | } | |
4086 | } else { | |
4087 | rx_ring->empty_rx_queue = 0; | |
4088 | } | |
4089 | } | |
4090 | } | |
4091 | ||
1738cd3e NB |
4092 | /* Check for keep alive expiration */ |
4093 | static void check_for_missing_keep_alive(struct ena_adapter *adapter) | |
4094 | { | |
4095 | unsigned long keep_alive_expired; | |
4096 | ||
4097 | if (!adapter->wd_state) | |
4098 | return; | |
4099 | ||
82ef30f1 NB |
4100 | if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) |
4101 | return; | |
4102 | ||
2a6e5fa2 AK |
4103 | keep_alive_expired = adapter->last_keep_alive_jiffies + |
4104 | adapter->keep_alive_timeout; | |
1738cd3e NB |
4105 | if (unlikely(time_is_before_jiffies(keep_alive_expired))) { |
4106 | netif_err(adapter, drv, adapter->netdev, | |
4107 | "Keep alive watchdog timeout.\n"); | |
89dd735e SA |
4108 | ena_increase_stat(&adapter->dev_stats.wd_expired, 1, |
4109 | &adapter->syncp); | |
9fe890cc | 4110 | ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); |
1738cd3e NB |
4111 | } |
4112 | } | |
4113 | ||
4114 | static void check_for_admin_com_state(struct ena_adapter *adapter) | |
4115 | { | |
4116 | if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { | |
4117 | netif_err(adapter, drv, adapter->netdev, | |
4118 | "ENA admin queue is not in running state!\n"); | |
89dd735e SA |
4119 | ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1, |
4120 | &adapter->syncp); | |
9fe890cc | 4121 | ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO); |
1738cd3e NB |
4122 | } |
4123 | } | |
4124 | ||
82ef30f1 NB |
4125 | static void ena_update_hints(struct ena_adapter *adapter, |
4126 | struct ena_admin_ena_hw_hints *hints) | |
4127 | { | |
4128 | struct net_device *netdev = adapter->netdev; | |
4129 | ||
4130 | if (hints->admin_completion_tx_timeout) | |
4131 | adapter->ena_dev->admin_queue.completion_timeout = | |
4132 | hints->admin_completion_tx_timeout * 1000; | |
4133 | ||
4134 | if (hints->mmio_read_timeout) | |
4135 | /* convert to usec */ | |
4136 | adapter->ena_dev->mmio_read.reg_read_to = | |
4137 | hints->mmio_read_timeout * 1000; | |
4138 | ||
4139 | if (hints->missed_tx_completion_count_threshold_to_reset) | |
4140 | adapter->missing_tx_completion_threshold = | |
4141 | hints->missed_tx_completion_count_threshold_to_reset; | |
4142 | ||
4143 | if (hints->missing_tx_completion_timeout) { | |
4144 | if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) | |
4145 | adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT; | |
4146 | else | |
4147 | adapter->missing_tx_completion_to = | |
4148 | msecs_to_jiffies(hints->missing_tx_completion_timeout); | |
4149 | } | |
4150 | ||
4151 | if (hints->netdev_wd_timeout) | |
4152 | netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout); | |
4153 | ||
4154 | if (hints->driver_watchdog_timeout) { | |
4155 | if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) | |
4156 | adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; | |
4157 | else | |
4158 | adapter->keep_alive_timeout = | |
4159 | msecs_to_jiffies(hints->driver_watchdog_timeout); | |
4160 | } | |
4161 | } | |
4162 | ||
1738cd3e NB |
4163 | static void ena_update_host_info(struct ena_admin_host_info *host_info, |
4164 | struct net_device *netdev) | |
4165 | { | |
4166 | host_info->supported_network_features[0] = | |
4167 | netdev->features & GENMASK_ULL(31, 0); | |
4168 | host_info->supported_network_features[1] = | |
4169 | (netdev->features & GENMASK_ULL(63, 32)) >> 32; | |
4170 | } | |
4171 | ||
e99e88a9 | 4172 | static void ena_timer_service(struct timer_list *t) |
1738cd3e | 4173 | { |
e99e88a9 | 4174 | struct ena_adapter *adapter = from_timer(adapter, t, timer_service); |
1738cd3e NB |
4175 | u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; |
4176 | struct ena_admin_host_info *host_info = | |
4177 | adapter->ena_dev->host_attr.host_info; | |
4178 | ||
4179 | check_for_missing_keep_alive(adapter); | |
4180 | ||
4181 | check_for_admin_com_state(adapter); | |
4182 | ||
8510e1a3 | 4183 | check_for_missing_completions(adapter); |
1738cd3e | 4184 | |
a3af7c18 NB |
4185 | check_for_empty_rx_ring(adapter); |
4186 | ||
1738cd3e NB |
4187 | if (debug_area) |
4188 | ena_dump_stats_to_buf(adapter, debug_area); | |
4189 | ||
4190 | if (host_info) | |
4191 | ena_update_host_info(host_info, adapter->netdev); | |
4192 | ||
3f6159db | 4193 | if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { |
1738cd3e NB |
4194 | netif_err(adapter, drv, adapter->netdev, |
4195 | "Trigger reset is on\n"); | |
4196 | ena_dump_stats_to_dmesg(adapter); | |
4197 | queue_work(ena_wq, &adapter->reset_task); | |
4198 | return; | |
4199 | } | |
4200 | ||
4201 | /* Reset the timer */ | |
2a6e5fa2 | 4202 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
1738cd3e NB |
4203 | } |
4204 | ||
ba6f6b41 | 4205 | static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, |
736ce3f4 SJ |
4206 | struct ena_com_dev *ena_dev, |
4207 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | |
1738cd3e | 4208 | { |
ba6f6b41 | 4209 | u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; |
31aa9857 SJ |
4210 | |
4211 | if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { | |
4212 | struct ena_admin_queue_ext_feature_fields *max_queue_ext = | |
4213 | &get_feat_ctx->max_queue_ext.max_queue_ext; | |
736ce3f4 | 4214 | io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num, |
31aa9857 | 4215 | max_queue_ext->max_rx_cq_num); |
1738cd3e | 4216 | |
31aa9857 SJ |
4217 | io_tx_sq_num = max_queue_ext->max_tx_sq_num; |
4218 | io_tx_cq_num = max_queue_ext->max_tx_cq_num; | |
4219 | } else { | |
4220 | struct ena_admin_queue_feature_desc *max_queues = | |
4221 | &get_feat_ctx->max_queues; | |
4222 | io_tx_sq_num = max_queues->max_sq_num; | |
4223 | io_tx_cq_num = max_queues->max_cq_num; | |
736ce3f4 | 4224 | io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num); |
31aa9857 SJ |
4225 | } |
4226 | ||
4227 | /* In case of LLQ use the llq fields for the tx SQ/CQ */ | |
9fd25592 | 4228 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
31aa9857 | 4229 | io_tx_sq_num = get_feat_ctx->llq.max_llq_num; |
1738cd3e | 4230 | |
736ce3f4 SJ |
4231 | max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); |
4232 | max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num); | |
4233 | max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num); | |
4234 | max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num); | |
e355fa6a | 4235 | /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */ |
736ce3f4 | 4236 | max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); |
1738cd3e | 4237 | |
736ce3f4 | 4238 | return max_num_io_queues; |
1738cd3e NB |
4239 | } |
4240 | ||
1738cd3e NB |
4241 | static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, |
4242 | struct net_device *netdev) | |
4243 | { | |
4244 | netdev_features_t dev_features = 0; | |
4245 | ||
4246 | /* Set offload features */ | |
4247 | if (feat->offload.tx & | |
4248 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) | |
4249 | dev_features |= NETIF_F_IP_CSUM; | |
4250 | ||
4251 | if (feat->offload.tx & | |
4252 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) | |
4253 | dev_features |= NETIF_F_IPV6_CSUM; | |
4254 | ||
4255 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) | |
4256 | dev_features |= NETIF_F_TSO; | |
4257 | ||
4258 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) | |
4259 | dev_features |= NETIF_F_TSO6; | |
4260 | ||
4261 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) | |
4262 | dev_features |= NETIF_F_TSO_ECN; | |
4263 | ||
4264 | if (feat->offload.rx_supported & | |
4265 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) | |
4266 | dev_features |= NETIF_F_RXCSUM; | |
4267 | ||
4268 | if (feat->offload.rx_supported & | |
4269 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) | |
4270 | dev_features |= NETIF_F_RXCSUM; | |
4271 | ||
4272 | netdev->features = | |
4273 | dev_features | | |
4274 | NETIF_F_SG | | |
1738cd3e NB |
4275 | NETIF_F_RXHASH | |
4276 | NETIF_F_HIGHDMA; | |
4277 | ||
4278 | netdev->hw_features |= netdev->features; | |
4279 | netdev->vlan_features |= netdev->features; | |
4280 | } | |
4281 | ||
4282 | static void ena_set_conf_feat_params(struct ena_adapter *adapter, | |
4283 | struct ena_com_dev_get_features_ctx *feat) | |
4284 | { | |
4285 | struct net_device *netdev = adapter->netdev; | |
4286 | ||
4287 | /* Copy mac address */ | |
4288 | if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) { | |
4289 | eth_hw_addr_random(netdev); | |
4290 | ether_addr_copy(adapter->mac_addr, netdev->dev_addr); | |
4291 | } else { | |
4292 | ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); | |
f3956ebb | 4293 | eth_hw_addr_set(netdev, adapter->mac_addr); |
1738cd3e NB |
4294 | } |
4295 | ||
4296 | /* Set offload features */ | |
4297 | ena_set_dev_offloads(feat, netdev); | |
4298 | ||
4299 | adapter->max_mtu = feat->dev_attr.max_mtu; | |
d894be57 JW |
4300 | netdev->max_mtu = adapter->max_mtu; |
4301 | netdev->min_mtu = ENA_MIN_MTU; | |
1738cd3e NB |
4302 | } |
4303 | ||
4304 | static int ena_rss_init_default(struct ena_adapter *adapter) | |
4305 | { | |
4306 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
4307 | struct device *dev = &adapter->pdev->dev; | |
4308 | int rc, i; | |
4309 | u32 val; | |
4310 | ||
4311 | rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); | |
4312 | if (unlikely(rc)) { | |
4313 | dev_err(dev, "Cannot init indirect table\n"); | |
4314 | goto err_rss_init; | |
4315 | } | |
4316 | ||
4317 | for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { | |
faa615f9 | 4318 | val = ethtool_rxfh_indir_default(i, adapter->num_io_queues); |
1738cd3e NB |
4319 | rc = ena_com_indirect_table_fill_entry(ena_dev, i, |
4320 | ENA_IO_RXQ_IDX(val)); | |
09f8676e | 4321 | if (unlikely(rc)) { |
1738cd3e NB |
4322 | dev_err(dev, "Cannot fill indirect table\n"); |
4323 | goto err_fill_indir; | |
4324 | } | |
4325 | } | |
4326 | ||
c1bd17e5 | 4327 | rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, |
1738cd3e | 4328 | ENA_HASH_KEY_SIZE, 0xFFFFFFFF); |
d1497638 | 4329 | if (unlikely(rc && (rc != -EOPNOTSUPP))) { |
1738cd3e NB |
4330 | dev_err(dev, "Cannot fill hash function\n"); |
4331 | goto err_fill_indir; | |
4332 | } | |
4333 | ||
4334 | rc = ena_com_set_default_hash_ctrl(ena_dev); | |
d1497638 | 4335 | if (unlikely(rc && (rc != -EOPNOTSUPP))) { |
1738cd3e NB |
4336 | dev_err(dev, "Cannot fill hash control\n"); |
4337 | goto err_fill_indir; | |
4338 | } | |
4339 | ||
4340 | return 0; | |
4341 | ||
4342 | err_fill_indir: | |
4343 | ena_com_rss_destroy(ena_dev); | |
4344 | err_rss_init: | |
4345 | ||
4346 | return rc; | |
4347 | } | |
4348 | ||
4349 | static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) | |
4350 | { | |
d79c3888 | 4351 | int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; |
0857d92f | 4352 | |
1738cd3e NB |
4353 | pci_release_selected_regions(pdev, release_bars); |
4354 | } | |
4355 | ||
1738cd3e NB |
4356 | /* ena_probe - Device Initialization Routine |
4357 | * @pdev: PCI device information struct | |
4358 | * @ent: entry in ena_pci_tbl | |
4359 | * | |
4360 | * Returns 0 on success, negative on failure | |
4361 | * | |
4362 | * ena_probe initializes an adapter identified by a pci_dev structure. | |
4363 | * The OS initialization, configuring of the adapter private structure, | |
4364 | * and a hardware reset occur. | |
4365 | */ | |
4366 | static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
4367 | { | |
0a39a35f | 4368 | struct ena_com_dev_get_features_ctx get_feat_ctx; |
1738cd3e | 4369 | struct ena_com_dev *ena_dev = NULL; |
83b92404 | 4370 | struct ena_adapter *adapter; |
83b92404 SJ |
4371 | struct net_device *netdev; |
4372 | static int adapters_found; | |
736ce3f4 | 4373 | u32 max_num_io_queues; |
1738cd3e | 4374 | bool wd_state; |
736ce3f4 | 4375 | int bars, rc; |
1738cd3e NB |
4376 | |
4377 | dev_dbg(&pdev->dev, "%s\n", __func__); | |
4378 | ||
1738cd3e NB |
4379 | rc = pci_enable_device_mem(pdev); |
4380 | if (rc) { | |
4381 | dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); | |
4382 | return rc; | |
4383 | } | |
4384 | ||
09323b3b SA |
4385 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS)); |
4386 | if (rc) { | |
4387 | dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc); | |
4388 | goto err_disable_device; | |
4389 | } | |
4390 | ||
1738cd3e NB |
4391 | pci_set_master(pdev); |
4392 | ||
4393 | ena_dev = vzalloc(sizeof(*ena_dev)); | |
4394 | if (!ena_dev) { | |
4395 | rc = -ENOMEM; | |
4396 | goto err_disable_device; | |
4397 | } | |
4398 | ||
4399 | bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; | |
4400 | rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); | |
4401 | if (rc) { | |
4402 | dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", | |
4403 | rc); | |
4404 | goto err_free_ena_dev; | |
4405 | } | |
4406 | ||
0857d92f NB |
4407 | ena_dev->reg_bar = devm_ioremap(&pdev->dev, |
4408 | pci_resource_start(pdev, ENA_REG_BAR), | |
4409 | pci_resource_len(pdev, ENA_REG_BAR)); | |
1738cd3e | 4410 | if (!ena_dev->reg_bar) { |
bf2746e8 | 4411 | dev_err(&pdev->dev, "Failed to remap regs bar\n"); |
1738cd3e NB |
4412 | rc = -EFAULT; |
4413 | goto err_free_region; | |
4414 | } | |
4415 | ||
4bb7f4cf AK |
4416 | ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; |
4417 | ||
1738cd3e NB |
4418 | ena_dev->dmadev = &pdev->dev; |
4419 | ||
ce74496a SA |
4420 | netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS); |
4421 | if (!netdev) { | |
4422 | dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); | |
4423 | rc = -ENOMEM; | |
4424 | goto err_free_region; | |
4425 | } | |
4426 | ||
4427 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
4428 | adapter = netdev_priv(netdev); | |
4429 | adapter->ena_dev = ena_dev; | |
4430 | adapter->netdev = netdev; | |
4431 | adapter->pdev = pdev; | |
15efff76 | 4432 | adapter->msg_enable = DEFAULT_MSG_ENABLE; |
ce74496a | 4433 | |
da580ca8 SA |
4434 | ena_dev->net_device = netdev; |
4435 | ||
ce74496a SA |
4436 | pci_set_drvdata(pdev, adapter); |
4437 | ||
1e366688 | 4438 | rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); |
1738cd3e | 4439 | if (rc) { |
1e366688 | 4440 | dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n"); |
ce74496a | 4441 | goto err_netdev_destroy; |
1738cd3e NB |
4442 | } |
4443 | ||
1e366688 | 4444 | rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state); |
38005ca8 | 4445 | if (rc) { |
1e366688 DA |
4446 | dev_err(&pdev->dev, "ENA device init failed\n"); |
4447 | if (rc == -ETIME) | |
4448 | rc = -EPROBE_DEFER; | |
4449 | goto err_netdev_destroy; | |
1738cd3e NB |
4450 | } |
4451 | ||
13830937 | 4452 | /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. |
4d192660 SJ |
4453 | * Updated during device initialization with the real granularity |
4454 | */ | |
1738cd3e | 4455 | ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; |
15619e72 | 4456 | ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; |
79226cea | 4457 | ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; |
736ce3f4 | 4458 | max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx); |
7dcf9221 | 4459 | if (unlikely(!max_num_io_queues)) { |
1738cd3e NB |
4460 | rc = -EFAULT; |
4461 | goto err_device_destroy; | |
4462 | } | |
4463 | ||
1738cd3e NB |
4464 | ena_set_conf_feat_params(adapter, &get_feat_ctx); |
4465 | ||
e2eed0e3 | 4466 | adapter->reset_reason = ENA_REGS_RESET_NORMAL; |
1738cd3e | 4467 | |
736ce3f4 SJ |
4468 | adapter->num_io_queues = max_num_io_queues; |
4469 | adapter->max_num_io_queues = max_num_io_queues; | |
0a39a35f | 4470 | adapter->last_monitored_tx_qid = 0; |
736ce3f4 | 4471 | |
548c4940 SJ |
4472 | adapter->xdp_first_ring = 0; |
4473 | adapter->xdp_num_queues = 0; | |
4474 | ||
1738cd3e | 4475 | adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; |
0e3a3f6d AK |
4476 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
4477 | adapter->disable_meta_caching = | |
4478 | !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & | |
4479 | BIT(ENA_ADMIN_DISABLE_META_CACHING)); | |
4480 | ||
1738cd3e NB |
4481 | adapter->wd_state = wd_state; |
4482 | ||
4483 | snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); | |
4484 | ||
4485 | rc = ena_com_init_interrupt_moderation(adapter->ena_dev); | |
4486 | if (rc) { | |
4487 | dev_err(&pdev->dev, | |
4488 | "Failed to query interrupt moderation feature\n"); | |
ce74496a | 4489 | goto err_device_destroy; |
1738cd3e | 4490 | } |
3a091084 | 4491 | |
548c4940 SJ |
4492 | ena_init_io_rings(adapter, |
4493 | 0, | |
4494 | adapter->xdp_num_queues + | |
4495 | adapter->num_io_queues); | |
1738cd3e NB |
4496 | |
4497 | netdev->netdev_ops = &ena_netdev_ops; | |
4498 | netdev->watchdog_timeo = TX_TIMEOUT; | |
4499 | ena_set_ethtool_ops(netdev); | |
4500 | ||
4501 | netdev->priv_flags |= IFF_UNICAST_FLT; | |
4502 | ||
4503 | u64_stats_init(&adapter->syncp); | |
4504 | ||
4d192660 | 4505 | rc = ena_enable_msix_and_set_admin_interrupts(adapter); |
1738cd3e NB |
4506 | if (rc) { |
4507 | dev_err(&pdev->dev, | |
4508 | "Failed to enable and set the admin interrupts\n"); | |
4509 | goto err_worker_destroy; | |
4510 | } | |
4511 | rc = ena_rss_init_default(adapter); | |
d1497638 | 4512 | if (rc && (rc != -EOPNOTSUPP)) { |
1738cd3e NB |
4513 | dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); |
4514 | goto err_free_msix; | |
4515 | } | |
4516 | ||
4517 | ena_config_debug_area(adapter); | |
4518 | ||
7aa6dc35 LB |
4519 | if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues)) |
4520 | netdev->xdp_features = NETDEV_XDP_ACT_BASIC | | |
4521 | NETDEV_XDP_ACT_REDIRECT; | |
4522 | ||
1738cd3e NB |
4523 | memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); |
4524 | ||
4525 | netif_carrier_off(netdev); | |
4526 | ||
4527 | rc = register_netdev(netdev); | |
4528 | if (rc) { | |
4529 | dev_err(&pdev->dev, "Cannot register net device\n"); | |
4530 | goto err_rss; | |
4531 | } | |
4532 | ||
1738cd3e NB |
4533 | INIT_WORK(&adapter->reset_task, ena_fw_reset_device); |
4534 | ||
4535 | adapter->last_keep_alive_jiffies = jiffies; | |
82ef30f1 NB |
4536 | adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; |
4537 | adapter->missing_tx_completion_to = TX_TIMEOUT; | |
4538 | adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS; | |
4539 | ||
4540 | ena_update_hints(adapter, &get_feat_ctx.hw_hints); | |
1738cd3e | 4541 | |
e99e88a9 | 4542 | timer_setup(&adapter->timer_service, ena_timer_service, 0); |
f850b4a7 | 4543 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
1738cd3e | 4544 | |
38005ca8 | 4545 | dev_info(&pdev->dev, |
a8aea849 | 4546 | "%s found at mem %lx, mac addr %pM\n", |
1738cd3e | 4547 | DEVICE_NAME, (long)pci_resource_start(pdev, 0), |
a8aea849 | 4548 | netdev->dev_addr); |
1738cd3e NB |
4549 | |
4550 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | |
4551 | ||
4552 | adapters_found++; | |
4553 | ||
4554 | return 0; | |
4555 | ||
4556 | err_rss: | |
4557 | ena_com_delete_debug_area(ena_dev); | |
4558 | ena_com_rss_destroy(ena_dev); | |
4559 | err_free_msix: | |
e2eed0e3 | 4560 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); |
58a54b9c AK |
4561 | /* stop submitting admin commands on a device that was reset */ |
4562 | ena_com_set_admin_running_state(ena_dev, false); | |
1738cd3e | 4563 | ena_free_mgmnt_irq(adapter); |
06443684 | 4564 | ena_disable_msix(adapter); |
1738cd3e | 4565 | err_worker_destroy: |
1738cd3e | 4566 | del_timer(&adapter->timer_service); |
1738cd3e NB |
4567 | err_device_destroy: |
4568 | ena_com_delete_host_info(ena_dev); | |
4569 | ena_com_admin_destroy(ena_dev); | |
ce74496a SA |
4570 | err_netdev_destroy: |
4571 | free_netdev(netdev); | |
1738cd3e NB |
4572 | err_free_region: |
4573 | ena_release_bars(ena_dev, pdev); | |
4574 | err_free_ena_dev: | |
1738cd3e NB |
4575 | vfree(ena_dev); |
4576 | err_disable_device: | |
4577 | pci_disable_device(pdev); | |
4578 | return rc; | |
4579 | } | |
4580 | ||
1738cd3e NB |
4581 | /*****************************************************************************/ |
4582 | ||
428c4913 | 4583 | /* __ena_shutoff - Helper used in both PCI remove/shutdown routines |
1738cd3e | 4584 | * @pdev: PCI device information struct |
428c4913 | 4585 | * @shutdown: Is it a shutdown operation? If false, means it is a removal |
1738cd3e | 4586 | * |
428c4913 GP |
4587 | * __ena_shutoff is a helper routine that does the real work on shutdown and |
4588 | * removal paths; the difference between those paths is with regards to whether | |
4589 | * dettach or unregister the netdevice. | |
1738cd3e | 4590 | */ |
428c4913 | 4591 | static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) |
1738cd3e NB |
4592 | { |
4593 | struct ena_adapter *adapter = pci_get_drvdata(pdev); | |
4594 | struct ena_com_dev *ena_dev; | |
4595 | struct net_device *netdev; | |
4596 | ||
1738cd3e NB |
4597 | ena_dev = adapter->ena_dev; |
4598 | netdev = adapter->netdev; | |
4599 | ||
4600 | #ifdef CONFIG_RFS_ACCEL | |
4601 | if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { | |
4602 | free_irq_cpu_rmap(netdev->rx_cpu_rmap); | |
4603 | netdev->rx_cpu_rmap = NULL; | |
4604 | } | |
4605 | #endif /* CONFIG_RFS_ACCEL */ | |
1738cd3e | 4606 | |
63d4a4c1 SA |
4607 | /* Make sure timer and reset routine won't be called after |
4608 | * freeing device resources. | |
4609 | */ | |
4610 | del_timer_sync(&adapter->timer_service); | |
1738cd3e NB |
4611 | cancel_work_sync(&adapter->reset_task); |
4612 | ||
428c4913 | 4613 | rtnl_lock(); /* lock released inside the below if-else block */ |
c1c0e40b | 4614 | adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN; |
944b28aa | 4615 | ena_destroy_device(adapter, true); |
3a091084 | 4616 | |
428c4913 GP |
4617 | if (shutdown) { |
4618 | netif_device_detach(netdev); | |
4619 | dev_close(netdev); | |
4620 | rtnl_unlock(); | |
4621 | } else { | |
4622 | rtnl_unlock(); | |
4623 | unregister_netdev(netdev); | |
4624 | free_netdev(netdev); | |
4625 | } | |
1738cd3e | 4626 | |
1738cd3e NB |
4627 | ena_com_rss_destroy(ena_dev); |
4628 | ||
4629 | ena_com_delete_debug_area(ena_dev); | |
4630 | ||
4631 | ena_com_delete_host_info(ena_dev); | |
4632 | ||
4633 | ena_release_bars(ena_dev, pdev); | |
4634 | ||
1738cd3e NB |
4635 | pci_disable_device(pdev); |
4636 | ||
1738cd3e NB |
4637 | vfree(ena_dev); |
4638 | } | |
4639 | ||
428c4913 GP |
4640 | /* ena_remove - Device Removal Routine |
4641 | * @pdev: PCI device information struct | |
4642 | * | |
4643 | * ena_remove is called by the PCI subsystem to alert the driver | |
4644 | * that it should release a PCI device. | |
4645 | */ | |
4646 | ||
4647 | static void ena_remove(struct pci_dev *pdev) | |
4648 | { | |
4649 | __ena_shutoff(pdev, false); | |
4650 | } | |
4651 | ||
4652 | /* ena_shutdown - Device Shutdown Routine | |
4653 | * @pdev: PCI device information struct | |
4654 | * | |
4655 | * ena_shutdown is called by the PCI subsystem to alert the driver that | |
4656 | * a shutdown/reboot (or kexec) is happening and device must be disabled. | |
4657 | */ | |
4658 | ||
4659 | static void ena_shutdown(struct pci_dev *pdev) | |
4660 | { | |
4661 | __ena_shutoff(pdev, true); | |
4662 | } | |
4663 | ||
8c5c7abd | 4664 | /* ena_suspend - PM suspend callback |
817a89ae | 4665 | * @dev_d: Device information struct |
8c5c7abd | 4666 | */ |
817a89ae | 4667 | static int __maybe_unused ena_suspend(struct device *dev_d) |
8c5c7abd | 4668 | { |
817a89ae | 4669 | struct pci_dev *pdev = to_pci_dev(dev_d); |
8c5c7abd NB |
4670 | struct ena_adapter *adapter = pci_get_drvdata(pdev); |
4671 | ||
89dd735e | 4672 | ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp); |
8c5c7abd NB |
4673 | |
4674 | rtnl_lock(); | |
4675 | if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { | |
4676 | dev_err(&pdev->dev, | |
bf2746e8 | 4677 | "Ignoring device reset request as the device is being suspended\n"); |
8c5c7abd NB |
4678 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
4679 | } | |
cfa324a5 | 4680 | ena_destroy_device(adapter, true); |
8c5c7abd NB |
4681 | rtnl_unlock(); |
4682 | return 0; | |
4683 | } | |
4684 | ||
4685 | /* ena_resume - PM resume callback | |
817a89ae | 4686 | * @dev_d: Device information struct |
8c5c7abd | 4687 | */ |
817a89ae | 4688 | static int __maybe_unused ena_resume(struct device *dev_d) |
8c5c7abd | 4689 | { |
817a89ae | 4690 | struct ena_adapter *adapter = dev_get_drvdata(dev_d); |
8c5c7abd NB |
4691 | int rc; |
4692 | ||
89dd735e | 4693 | ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp); |
8c5c7abd NB |
4694 | |
4695 | rtnl_lock(); | |
4696 | rc = ena_restore_device(adapter); | |
4697 | rtnl_unlock(); | |
4698 | return rc; | |
4699 | } | |
817a89ae VG |
4700 | |
4701 | static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume); | |
8c5c7abd | 4702 | |
1738cd3e NB |
4703 | static struct pci_driver ena_pci_driver = { |
4704 | .name = DRV_MODULE_NAME, | |
4705 | .id_table = ena_pci_tbl, | |
4706 | .probe = ena_probe, | |
4707 | .remove = ena_remove, | |
428c4913 | 4708 | .shutdown = ena_shutdown, |
817a89ae | 4709 | .driver.pm = &ena_pm_ops, |
115ddc49 | 4710 | .sriov_configure = pci_sriov_configure_simple, |
1738cd3e NB |
4711 | }; |
4712 | ||
4713 | static int __init ena_init(void) | |
4714 | { | |
d349e9be YC |
4715 | int ret; |
4716 | ||
1738cd3e NB |
4717 | ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); |
4718 | if (!ena_wq) { | |
4719 | pr_err("Failed to create workqueue\n"); | |
4720 | return -ENOMEM; | |
4721 | } | |
4722 | ||
d349e9be YC |
4723 | ret = pci_register_driver(&ena_pci_driver); |
4724 | if (ret) | |
4725 | destroy_workqueue(ena_wq); | |
4726 | ||
4727 | return ret; | |
1738cd3e NB |
4728 | } |
4729 | ||
4730 | static void __exit ena_cleanup(void) | |
4731 | { | |
4732 | pci_unregister_driver(&ena_pci_driver); | |
4733 | ||
4734 | if (ena_wq) { | |
4735 | destroy_workqueue(ena_wq); | |
4736 | ena_wq = NULL; | |
4737 | } | |
4738 | } | |
4739 | ||
4740 | /****************************************************************************** | |
4741 | ******************************** AENQ Handlers ******************************* | |
4742 | *****************************************************************************/ | |
4743 | /* ena_update_on_link_change: | |
4744 | * Notify the network interface about the change in link status | |
4745 | */ | |
4746 | static void ena_update_on_link_change(void *adapter_data, | |
4747 | struct ena_admin_aenq_entry *aenq_e) | |
4748 | { | |
4749 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; | |
4750 | struct ena_admin_aenq_link_change_desc *aenq_desc = | |
4751 | (struct ena_admin_aenq_link_change_desc *)aenq_e; | |
4752 | int status = aenq_desc->flags & | |
4753 | ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; | |
4754 | ||
4755 | if (status) { | |
f0525298 | 4756 | netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); |
1738cd3e | 4757 | set_bit(ENA_FLAG_LINK_UP, &adapter->flags); |
d18e4f68 NB |
4758 | if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags)) |
4759 | netif_carrier_on(adapter->netdev); | |
1738cd3e NB |
4760 | } else { |
4761 | clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); | |
4762 | netif_carrier_off(adapter->netdev); | |
4763 | } | |
4764 | } | |
4765 | ||
4766 | static void ena_keep_alive_wd(void *adapter_data, | |
4767 | struct ena_admin_aenq_entry *aenq_e) | |
4768 | { | |
4769 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; | |
11a9a460 NB |
4770 | struct ena_admin_aenq_keep_alive_desc *desc; |
4771 | u64 rx_drops; | |
5c665f8c | 4772 | u64 tx_drops; |
1738cd3e | 4773 | |
11a9a460 | 4774 | desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; |
1738cd3e | 4775 | adapter->last_keep_alive_jiffies = jiffies; |
11a9a460 NB |
4776 | |
4777 | rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; | |
5c665f8c | 4778 | tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low; |
11a9a460 NB |
4779 | |
4780 | u64_stats_update_begin(&adapter->syncp); | |
ccd143e5 SA |
4781 | /* These stats are accumulated by the device, so the counters indicate |
4782 | * all drops since last reset. | |
4783 | */ | |
11a9a460 | 4784 | adapter->dev_stats.rx_drops = rx_drops; |
5c665f8c | 4785 | adapter->dev_stats.tx_drops = tx_drops; |
11a9a460 | 4786 | u64_stats_update_end(&adapter->syncp); |
1738cd3e NB |
4787 | } |
4788 | ||
4789 | static void ena_notification(void *adapter_data, | |
4790 | struct ena_admin_aenq_entry *aenq_e) | |
4791 | { | |
4792 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; | |
82ef30f1 | 4793 | struct ena_admin_ena_hw_hints *hints; |
1738cd3e NB |
4794 | |
4795 | WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, | |
4796 | "Invalid group(%x) expected %x\n", | |
4797 | aenq_e->aenq_common_desc.group, | |
4798 | ENA_ADMIN_NOTIFICATION); | |
4799 | ||
bf2746e8 | 4800 | switch (aenq_e->aenq_common_desc.syndrome) { |
82ef30f1 NB |
4801 | case ENA_ADMIN_UPDATE_HINTS: |
4802 | hints = (struct ena_admin_ena_hw_hints *) | |
4803 | (&aenq_e->inline_data_w4); | |
4804 | ena_update_hints(adapter, hints); | |
4805 | break; | |
1738cd3e NB |
4806 | default: |
4807 | netif_err(adapter, drv, adapter->netdev, | |
4808 | "Invalid aenq notification link state %d\n", | |
bf2746e8 | 4809 | aenq_e->aenq_common_desc.syndrome); |
1738cd3e NB |
4810 | } |
4811 | } | |
4812 | ||
4813 | /* This handler will called for unknown event group or unimplemented handlers*/ | |
4814 | static void unimplemented_aenq_handler(void *data, | |
4815 | struct ena_admin_aenq_entry *aenq_e) | |
4816 | { | |
4817 | struct ena_adapter *adapter = (struct ena_adapter *)data; | |
4818 | ||
4819 | netif_err(adapter, drv, adapter->netdev, | |
4820 | "Unknown event was received or event with unimplemented handler\n"); | |
4821 | } | |
4822 | ||
4823 | static struct ena_aenq_handlers aenq_handlers = { | |
4824 | .handlers = { | |
4825 | [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, | |
4826 | [ENA_ADMIN_NOTIFICATION] = ena_notification, | |
4827 | [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, | |
4828 | }, | |
4829 | .unimplemented_handler = unimplemented_aenq_handler | |
4830 | }; | |
4831 | ||
4832 | module_init(ena_init); | |
4833 | module_exit(ena_cleanup); |