1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
10 #include "net_driver.h"
11 #include "ef10_regs.h"
14 #include "mcdi_pcol.h"
16 #include "workarounds.h"
19 #include <linux/jhash.h>
20 #include <linux/wait.h>
21 #include <linux/workqueue.h>
23 /* Hardware control for EF10 architecture including 'Huntington'. */
25 #define EFX_EF10_DRVGEN_EV 7
31 /* The reserved RSS context value */
32 #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
34 /* The filter table(s) are managed by firmware and we have write-only
35 * access. When removing filters we must identify them to the
36 * firmware by a 64-bit handle, but this is too wide for Linux kernel
37 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
38 * be able to tell in advance whether a requested insertion will
39 * replace an existing filter. Therefore we maintain a software hash
40 * table, which should be at least as large as the hardware hash
43 * Huntington has a single 8K filter table shared between all filter
44 * types and both ports.
46 #define HUNT_FILTER_TBL_ROWS 8192
48 struct efx_ef10_filter_table {
49 /* The RX match field masks supported by this fw & hw, in order of priority */
50 enum efx_filter_match_flags rx_match_flags[
51 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
52 unsigned int rx_match_count;
55 unsigned long spec; /* pointer to spec plus flag bits */
56 /* BUSY flag indicates that an update is in progress. AUTO_OLD is
57 * used to mark and sweep MAC filters for the device address lists.
59 #define EFX_EF10_FILTER_FLAG_BUSY 1UL
60 #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
61 #define EFX_EF10_FILTER_FLAGS 3UL
62 u64 handle; /* firmware handle */
64 wait_queue_head_t waitq;
65 /* Shadow of net_device address lists, guarded by mac_lock */
66 #define EFX_EF10_FILTER_DEV_UC_MAX 32
67 #define EFX_EF10_FILTER_DEV_MC_MAX 256
71 } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
72 dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
73 int dev_uc_count; /* negative for PROMISC */
74 int dev_mc_count; /* negative for PROMISC/ALLMULTI */
77 /* An arbitrary search limit for the software hash table */
78 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
80 static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
81 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
82 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
84 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
88 efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS);
89 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
90 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
93 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
95 return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
98 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
100 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
101 struct efx_ef10_nic_data *nic_data = efx->nic_data;
105 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
107 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
108 outbuf, sizeof(outbuf), &outlen);
111 if (outlen < sizeof(outbuf)) {
112 netif_err(efx, drv, efx->net_dev,
113 "unable to read datapath firmware capabilities\n");
117 nic_data->datapath_caps =
118 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
120 if (!(nic_data->datapath_caps &
121 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
122 netif_err(efx, drv, efx->net_dev,
123 "current firmware does not support TSO\n");
127 if (!(nic_data->datapath_caps &
128 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
129 netif_err(efx, probe, efx->net_dev,
130 "current firmware does not support an RX prefix\n");
137 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
139 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
142 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
143 outbuf, sizeof(outbuf), NULL);
146 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
147 return rc > 0 ? rc : -ERANGE;
150 static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
152 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
156 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
158 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
159 outbuf, sizeof(outbuf), &outlen);
162 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
166 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
170 static int efx_ef10_probe(struct efx_nic *efx)
172 struct efx_ef10_nic_data *nic_data;
175 /* We can have one VI for each 8K region. However, until we
176 * use TX option descriptors we need two TX queues per channel.
181 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
182 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
183 BUG_ON(efx->max_channels == 0);
185 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
188 efx->nic_data = nic_data;
190 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
191 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
195 /* Get the MC's warm boot count. In case it's rebooting right
196 * now, be prepared to retry.
200 rc = efx_ef10_get_warm_boot_count(efx);
207 nic_data->warm_boot_count = rc;
209 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
211 /* In case we're recovering from a crash (kexec), we want to
212 * cancel any outstanding request by the previous user of this
213 * function. We send a special message using the least
214 * significant bits of the 'high' (doorbell) register.
216 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
218 rc = efx_mcdi_init(efx);
222 /* Reset (most) configuration for this function */
223 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
227 /* Enable event logging */
228 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
232 rc = efx_ef10_init_datapath_caps(efx);
236 efx->rx_packet_len_offset =
237 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
239 rc = efx_mcdi_port_get_number(efx);
244 rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
248 rc = efx_ef10_get_sysclk_freq(efx);
251 efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
253 /* Check whether firmware supports bug 35388 workaround */
254 rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
256 nic_data->workaround_35388 = true;
257 else if (rc != -ENOSYS && rc != -ENOENT)
259 netif_dbg(efx, probe, efx->net_dev,
260 "workaround for bug 35388 is %sabled\n",
261 nic_data->workaround_35388 ? "en" : "dis");
263 rc = efx_mcdi_mon_probe(efx);
267 efx_ptp_probe(efx, NULL);
274 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
277 efx->nic_data = NULL;
281 static int efx_ef10_free_vis(struct efx_nic *efx)
283 MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
285 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
286 outbuf, sizeof(outbuf), &outlen);
288 /* -EALREADY means nothing to free, so ignore */
292 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
299 static void efx_ef10_free_piobufs(struct efx_nic *efx)
301 struct efx_ef10_nic_data *nic_data = efx->nic_data;
302 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
306 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
308 for (i = 0; i < nic_data->n_piobufs; i++) {
309 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
310 nic_data->piobuf_handle[i]);
311 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
316 nic_data->n_piobufs = 0;
319 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
321 struct efx_ef10_nic_data *nic_data = efx->nic_data;
322 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
327 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
329 for (i = 0; i < n; i++) {
330 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
331 outbuf, sizeof(outbuf), &outlen);
334 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
338 nic_data->piobuf_handle[i] =
339 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
340 netif_dbg(efx, probe, efx->net_dev,
341 "allocated PIO buffer %u handle %x\n", i,
342 nic_data->piobuf_handle[i]);
345 nic_data->n_piobufs = i;
347 efx_ef10_free_piobufs(efx);
351 static int efx_ef10_link_piobufs(struct efx_nic *efx)
353 struct efx_ef10_nic_data *nic_data = efx->nic_data;
354 MCDI_DECLARE_BUF(inbuf,
355 max(MC_CMD_LINK_PIOBUF_IN_LEN,
356 MC_CMD_UNLINK_PIOBUF_IN_LEN));
357 struct efx_channel *channel;
358 struct efx_tx_queue *tx_queue;
359 unsigned int offset, index;
362 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
363 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
365 /* Link a buffer to each VI in the write-combining mapping */
366 for (index = 0; index < nic_data->n_piobufs; ++index) {
367 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
368 nic_data->piobuf_handle[index]);
369 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
370 nic_data->pio_write_vi_base + index);
371 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
372 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
375 netif_err(efx, drv, efx->net_dev,
376 "failed to link VI %u to PIO buffer %u (%d)\n",
377 nic_data->pio_write_vi_base + index, index,
381 netif_dbg(efx, probe, efx->net_dev,
382 "linked VI %u to PIO buffer %u\n",
383 nic_data->pio_write_vi_base + index, index);
386 /* Link a buffer to each TX queue */
387 efx_for_each_channel(channel, efx) {
388 efx_for_each_channel_tx_queue(tx_queue, channel) {
389 /* We assign the PIO buffers to queues in
390 * reverse order to allow for the following
393 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
394 tx_queue->channel->channel - 1) *
396 index = offset / ER_DZ_TX_PIOBUF_SIZE;
397 offset = offset % ER_DZ_TX_PIOBUF_SIZE;
399 /* When the host page size is 4K, the first
400 * host page in the WC mapping may be within
401 * the same VI page as the last TX queue. We
402 * can only link one buffer to each VI.
404 if (tx_queue->queue == nic_data->pio_write_vi_base) {
408 MCDI_SET_DWORD(inbuf,
409 LINK_PIOBUF_IN_PIOBUF_HANDLE,
410 nic_data->piobuf_handle[index]);
411 MCDI_SET_DWORD(inbuf,
412 LINK_PIOBUF_IN_TXQ_INSTANCE,
414 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
415 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
420 /* This is non-fatal; the TX path just
421 * won't use PIO for this queue
423 netif_err(efx, drv, efx->net_dev,
424 "failed to link VI %u to PIO buffer %u (%d)\n",
425 tx_queue->queue, index, rc);
426 tx_queue->piobuf = NULL;
429 nic_data->pio_write_base +
430 index * EFX_VI_PAGE_SIZE + offset;
431 tx_queue->piobuf_offset = offset;
432 netif_dbg(efx, probe, efx->net_dev,
433 "linked VI %u to PIO buffer %u offset %x addr %p\n",
434 tx_queue->queue, index,
435 tx_queue->piobuf_offset,
445 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
446 nic_data->pio_write_vi_base + index);
447 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
448 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
454 #else /* !EFX_USE_PIO */
456 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
458 return n == 0 ? 0 : -ENOBUFS;
461 static int efx_ef10_link_piobufs(struct efx_nic *efx)
466 static void efx_ef10_free_piobufs(struct efx_nic *efx)
470 #endif /* EFX_USE_PIO */
472 static void efx_ef10_remove(struct efx_nic *efx)
474 struct efx_ef10_nic_data *nic_data = efx->nic_data;
479 efx_mcdi_mon_remove(efx);
481 efx_ef10_rx_free_indir_table(efx);
483 if (nic_data->wc_membase)
484 iounmap(nic_data->wc_membase);
486 rc = efx_ef10_free_vis(efx);
489 if (!nic_data->must_restore_piobufs)
490 efx_ef10_free_piobufs(efx);
493 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
497 static int efx_ef10_alloc_vis(struct efx_nic *efx,
498 unsigned int min_vis, unsigned int max_vis)
500 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
501 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
502 struct efx_ef10_nic_data *nic_data = efx->nic_data;
506 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
507 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
508 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
509 outbuf, sizeof(outbuf), &outlen);
513 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
516 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
517 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
519 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
520 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
524 /* Note that the failure path of this function does not free
525 * resources, as this will be done by efx_ef10_remove().
527 static int efx_ef10_dimension_resources(struct efx_nic *efx)
529 struct efx_ef10_nic_data *nic_data = efx->nic_data;
530 unsigned int uc_mem_map_size, wc_mem_map_size;
531 unsigned int min_vis, pio_write_vi_base, max_vis;
532 void __iomem *membase;
535 min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
538 /* Try to allocate PIO buffers if wanted and if the full
539 * number of PIO buffers would be sufficient to allocate one
540 * copy-buffer per TX channel. Failure is non-fatal, as there
541 * are only a small number of PIO buffers shared between all
542 * functions of the controller.
544 if (efx_piobuf_size != 0 &&
545 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
546 efx->n_tx_channels) {
547 unsigned int n_piobufs =
548 DIV_ROUND_UP(efx->n_tx_channels,
549 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
551 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
553 netif_err(efx, probe, efx->net_dev,
554 "failed to allocate PIO buffers (%d)\n", rc);
556 netif_dbg(efx, probe, efx->net_dev,
557 "allocated %u PIO buffers\n", n_piobufs);
560 nic_data->n_piobufs = 0;
563 /* PIO buffers should be mapped with write-combining enabled,
564 * and we want to make single UC and WC mappings rather than
565 * several of each (in fact that's the only option if host
566 * page size is >4K). So we may allocate some extra VIs just
567 * for writing PIO buffers through.
569 uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
571 if (nic_data->n_piobufs) {
572 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
573 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
574 nic_data->n_piobufs) *
577 max_vis = pio_write_vi_base + nic_data->n_piobufs;
579 pio_write_vi_base = 0;
584 /* In case the last attached driver failed to free VIs, do it now */
585 rc = efx_ef10_free_vis(efx);
589 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
593 /* If we didn't get enough VIs to map all the PIO buffers, free the
596 if (nic_data->n_piobufs &&
597 nic_data->n_allocated_vis <
598 pio_write_vi_base + nic_data->n_piobufs) {
599 netif_dbg(efx, probe, efx->net_dev,
600 "%u VIs are not sufficient to map %u PIO buffers\n",
601 nic_data->n_allocated_vis, nic_data->n_piobufs);
602 efx_ef10_free_piobufs(efx);
605 /* Shrink the original UC mapping of the memory BAR */
606 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
608 netif_err(efx, probe, efx->net_dev,
609 "could not shrink memory BAR to %x\n",
613 iounmap(efx->membase);
614 efx->membase = membase;
616 /* Set up the WC mapping if needed */
617 if (wc_mem_map_size) {
618 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
621 if (!nic_data->wc_membase) {
622 netif_err(efx, probe, efx->net_dev,
623 "could not allocate WC mapping of size %x\n",
627 nic_data->pio_write_vi_base = pio_write_vi_base;
628 nic_data->pio_write_base =
629 nic_data->wc_membase +
630 (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
633 rc = efx_ef10_link_piobufs(efx);
635 efx_ef10_free_piobufs(efx);
638 netif_dbg(efx, probe, efx->net_dev,
639 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
640 &efx->membase_phys, efx->membase, uc_mem_map_size,
641 nic_data->wc_membase, wc_mem_map_size);
646 static int efx_ef10_init_nic(struct efx_nic *efx)
648 struct efx_ef10_nic_data *nic_data = efx->nic_data;
651 if (nic_data->must_check_datapath_caps) {
652 rc = efx_ef10_init_datapath_caps(efx);
655 nic_data->must_check_datapath_caps = false;
658 if (nic_data->must_realloc_vis) {
659 /* We cannot let the number of VIs change now */
660 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
661 nic_data->n_allocated_vis);
664 nic_data->must_realloc_vis = false;
667 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
668 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
670 rc = efx_ef10_link_piobufs(efx);
672 efx_ef10_free_piobufs(efx);
675 /* Log an error on failure, but this is non-fatal */
677 netif_err(efx, drv, efx->net_dev,
678 "failed to restore PIO buffers (%d)\n", rc);
679 nic_data->must_restore_piobufs = false;
682 efx_ef10_rx_push_rss_config(efx);
686 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
688 struct efx_ef10_nic_data *nic_data = efx->nic_data;
690 /* All our allocations have been reset */
691 nic_data->must_realloc_vis = true;
692 nic_data->must_restore_filters = true;
693 nic_data->must_restore_piobufs = true;
694 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
697 static int efx_ef10_map_reset_flags(u32 *flags)
700 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
701 ETH_RESET_SHARED_SHIFT),
702 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
703 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
704 ETH_RESET_PHY | ETH_RESET_MGMT) <<
705 ETH_RESET_SHARED_SHIFT)
708 /* We assume for now that our PCI function is permitted to
712 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
713 *flags &= ~EF10_RESET_MC;
714 return RESET_TYPE_WORLD;
717 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
718 *flags &= ~EF10_RESET_PORT;
719 return RESET_TYPE_ALL;
722 /* no invisible reset implemented */
727 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
729 int rc = efx_mcdi_reset(efx, reset_type);
731 /* If it was a port reset, trigger reallocation of MC resources.
732 * Note that on an MC reset nothing needs to be done now because we'll
733 * detect the MC reset later and handle it then.
735 if (reset_type == RESET_TYPE_ALL && !rc)
736 efx_ef10_reset_mc_allocations(efx);
740 #define EF10_DMA_STAT(ext_name, mcdi_name) \
741 [EF10_STAT_ ## ext_name] = \
742 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
743 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
744 [EF10_STAT_ ## int_name] = \
745 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
746 #define EF10_OTHER_STAT(ext_name) \
747 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
749 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
750 EF10_DMA_STAT(tx_bytes, TX_BYTES),
751 EF10_DMA_STAT(tx_packets, TX_PKTS),
752 EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
753 EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
754 EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
755 EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
756 EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
757 EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
758 EF10_DMA_STAT(tx_64, TX_64_PKTS),
759 EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
760 EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
761 EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
762 EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
763 EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
764 EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
765 EF10_DMA_STAT(rx_bytes, RX_BYTES),
766 EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
767 EF10_OTHER_STAT(rx_good_bytes),
768 EF10_OTHER_STAT(rx_bad_bytes),
769 EF10_DMA_STAT(rx_packets, RX_PKTS),
770 EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
771 EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
772 EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
773 EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
774 EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
775 EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
776 EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
777 EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
778 EF10_DMA_STAT(rx_64, RX_64_PKTS),
779 EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
780 EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
781 EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
782 EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
783 EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
784 EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
785 EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
786 EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
787 EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
788 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
789 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
790 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
791 EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
792 EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
793 EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
794 EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
795 EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
796 EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
797 EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
798 EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
799 EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
800 EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
801 EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
802 EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
805 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
806 (1ULL << EF10_STAT_tx_packets) | \
807 (1ULL << EF10_STAT_tx_pause) | \
808 (1ULL << EF10_STAT_tx_unicast) | \
809 (1ULL << EF10_STAT_tx_multicast) | \
810 (1ULL << EF10_STAT_tx_broadcast) | \
811 (1ULL << EF10_STAT_rx_bytes) | \
812 (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
813 (1ULL << EF10_STAT_rx_good_bytes) | \
814 (1ULL << EF10_STAT_rx_bad_bytes) | \
815 (1ULL << EF10_STAT_rx_packets) | \
816 (1ULL << EF10_STAT_rx_good) | \
817 (1ULL << EF10_STAT_rx_bad) | \
818 (1ULL << EF10_STAT_rx_pause) | \
819 (1ULL << EF10_STAT_rx_control) | \
820 (1ULL << EF10_STAT_rx_unicast) | \
821 (1ULL << EF10_STAT_rx_multicast) | \
822 (1ULL << EF10_STAT_rx_broadcast) | \
823 (1ULL << EF10_STAT_rx_lt64) | \
824 (1ULL << EF10_STAT_rx_64) | \
825 (1ULL << EF10_STAT_rx_65_to_127) | \
826 (1ULL << EF10_STAT_rx_128_to_255) | \
827 (1ULL << EF10_STAT_rx_256_to_511) | \
828 (1ULL << EF10_STAT_rx_512_to_1023) | \
829 (1ULL << EF10_STAT_rx_1024_to_15xx) | \
830 (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
831 (1ULL << EF10_STAT_rx_gtjumbo) | \
832 (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
833 (1ULL << EF10_STAT_rx_overflow) | \
834 (1ULL << EF10_STAT_rx_nodesc_drops))
836 /* These statistics are only provided by the 10G MAC. For a 10G/40G
837 * switchable port we do not expose these because they might not
838 * include all the packets they should.
840 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
841 (1ULL << EF10_STAT_tx_lt64) | \
842 (1ULL << EF10_STAT_tx_64) | \
843 (1ULL << EF10_STAT_tx_65_to_127) | \
844 (1ULL << EF10_STAT_tx_128_to_255) | \
845 (1ULL << EF10_STAT_tx_256_to_511) | \
846 (1ULL << EF10_STAT_tx_512_to_1023) | \
847 (1ULL << EF10_STAT_tx_1024_to_15xx) | \
848 (1ULL << EF10_STAT_tx_15xx_to_jumbo))
850 /* These statistics are only provided by the 40G MAC. For a 10G/40G
851 * switchable port we do expose these because the errors will otherwise
854 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
855 (1ULL << EF10_STAT_rx_length_error))
857 /* These statistics are only provided if the firmware supports the
858 * capability PM_AND_RXDP_COUNTERS.
860 #define HUNT_PM_AND_RXDP_STAT_MASK ( \
861 (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
862 (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
863 (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
864 (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
865 (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
866 (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
867 (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
868 (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
869 (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
870 (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
871 (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
872 (1ULL << EF10_STAT_rx_dp_hlb_wait))
874 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
876 u64 raw_mask = HUNT_COMMON_STAT_MASK;
877 u32 port_caps = efx_mcdi_phy_get_caps(efx);
878 struct efx_ef10_nic_data *nic_data = efx->nic_data;
880 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
881 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
883 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
885 if (nic_data->datapath_caps &
886 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
887 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
892 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
894 u64 raw_mask = efx_ef10_raw_stat_mask(efx);
896 #if BITS_PER_LONG == 64
899 mask[0] = raw_mask & 0xffffffff;
900 mask[1] = raw_mask >> 32;
904 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
906 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
908 efx_ef10_get_stat_mask(efx, mask);
909 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
913 static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
915 struct efx_ef10_nic_data *nic_data = efx->nic_data;
916 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
917 __le64 generation_start, generation_end;
918 u64 *stats = nic_data->stats;
921 efx_ef10_get_stat_mask(efx, mask);
923 dma_stats = efx->stats_buffer.addr;
924 nic_data = efx->nic_data;
926 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
927 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
930 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
931 stats, efx->stats_buffer.addr, false);
933 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
934 if (generation_end != generation_start)
937 /* Update derived statistics */
938 efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
939 stats[EF10_STAT_rx_good_bytes] =
940 stats[EF10_STAT_rx_bytes] -
941 stats[EF10_STAT_rx_bytes_minus_good_bytes];
942 efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
943 stats[EF10_STAT_rx_bytes_minus_good_bytes]);
949 static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
950 struct rtnl_link_stats64 *core_stats)
952 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
953 struct efx_ef10_nic_data *nic_data = efx->nic_data;
954 u64 *stats = nic_data->stats;
955 size_t stats_count = 0, index;
958 efx_ef10_get_stat_mask(efx, mask);
960 /* If we're unlucky enough to read statistics during the DMA, wait
961 * up to 10ms for it to finish (typically takes <500us)
963 for (retry = 0; retry < 100; ++retry) {
964 if (efx_ef10_try_update_nic_stats(efx) == 0)
970 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
971 if (efx_ef10_stat_desc[index].name) {
972 *full_stats++ = stats[index];
979 core_stats->rx_packets = stats[EF10_STAT_rx_packets];
980 core_stats->tx_packets = stats[EF10_STAT_tx_packets];
981 core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
982 core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
983 core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
984 core_stats->multicast = stats[EF10_STAT_rx_multicast];
985 core_stats->rx_length_errors =
986 stats[EF10_STAT_rx_gtjumbo] +
987 stats[EF10_STAT_rx_length_error];
988 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
989 core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
990 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
991 core_stats->rx_errors = (core_stats->rx_length_errors +
992 core_stats->rx_crc_errors +
993 core_stats->rx_frame_errors);
999 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
1001 struct efx_nic *efx = channel->efx;
1002 unsigned int mode, value;
1003 efx_dword_t timer_cmd;
1005 if (channel->irq_moderation) {
1007 value = channel->irq_moderation - 1;
1013 if (EFX_EF10_WORKAROUND_35388(efx)) {
1014 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
1015 EFE_DD_EVQ_IND_TIMER_FLAGS,
1016 ERF_DD_EVQ_IND_TIMER_MODE, mode,
1017 ERF_DD_EVQ_IND_TIMER_VAL, value);
1018 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
1021 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
1022 ERF_DZ_TC_TIMER_VAL, value);
1023 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
1028 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1032 memset(&wol->sopass, 0, sizeof(wol->sopass));
1035 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
1042 static void efx_ef10_mcdi_request(struct efx_nic *efx,
1043 const efx_dword_t *hdr, size_t hdr_len,
1044 const efx_dword_t *sdu, size_t sdu_len)
1046 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1047 u8 *pdu = nic_data->mcdi_buf.addr;
1049 memcpy(pdu, hdr, hdr_len);
1050 memcpy(pdu + hdr_len, sdu, sdu_len);
1053 /* The hardware provides 'low' and 'high' (doorbell) registers
1054 * for passing the 64-bit address of an MCDI request to
1055 * firmware. However the dwords are swapped by firmware. The
1056 * least significant bits of the doorbell are then 0 for all
1057 * MCDI requests due to alignment.
1059 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
1061 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
1065 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
1067 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1068 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
1071 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
1075 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
1076 size_t offset, size_t outlen)
1078 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1079 const u8 *pdu = nic_data->mcdi_buf.addr;
1081 memcpy(outbuf, pdu + offset, outlen);
1084 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
1086 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1089 rc = efx_ef10_get_warm_boot_count(efx);
1091 /* The firmware is presumably in the process of
1092 * rebooting. However, we are supposed to report each
1093 * reboot just once, so we must only do that once we
1094 * can read and store the updated warm boot count.
1099 if (rc == nic_data->warm_boot_count)
1102 nic_data->warm_boot_count = rc;
1104 /* All our allocations have been reset */
1105 efx_ef10_reset_mc_allocations(efx);
1107 /* The datapath firmware might have been changed */
1108 nic_data->must_check_datapath_caps = true;
1110 /* MAC statistics have been cleared on the NIC; clear the local
1111 * statistic that we update with efx_update_diff_stat().
1113 nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
1118 /* Handle an MSI interrupt
1120 * Handle an MSI hardware interrupt. This routine schedules event
1121 * queue processing. No interrupt acknowledgement cycle is necessary.
1122 * Also, we never need to check that the interrupt is for us, since
1123 * MSI interrupts cannot be shared.
1125 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
1127 struct efx_msi_context *context = dev_id;
1128 struct efx_nic *efx = context->efx;
1130 netif_vdbg(efx, intr, efx->net_dev,
1131 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
1133 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
1134 /* Note test interrupts */
1135 if (context->index == efx->irq_level)
1136 efx->last_irq_cpu = raw_smp_processor_id();
1138 /* Schedule processing of the channel */
1139 efx_schedule_channel_irq(efx->channel[context->index]);
1145 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
1147 struct efx_nic *efx = dev_id;
1148 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1149 struct efx_channel *channel;
1153 /* Read the ISR which also ACKs the interrupts */
1154 efx_readd(efx, ®, ER_DZ_BIU_INT_ISR);
1155 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
1160 if (likely(soft_enabled)) {
1161 /* Note test interrupts */
1162 if (queues & (1U << efx->irq_level))
1163 efx->last_irq_cpu = raw_smp_processor_id();
1165 efx_for_each_channel(channel, efx) {
1167 efx_schedule_channel_irq(channel);
1172 netif_vdbg(efx, intr, efx->net_dev,
1173 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1174 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1179 static void efx_ef10_irq_test_generate(struct efx_nic *efx)
1181 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
1183 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
1185 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
1186 (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
1187 inbuf, sizeof(inbuf), NULL, 0, NULL);
1190 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
1192 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
1193 (tx_queue->ptr_mask + 1) *
1194 sizeof(efx_qword_t),
1198 /* This writes to the TX_DESC_WPTR and also pushes data */
1199 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
1200 const efx_qword_t *txd)
1202 unsigned int write_ptr;
1205 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1206 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
1207 reg.qword[0] = *txd;
1208 efx_writeo_page(tx_queue->efx, ®,
1209 ER_DZ_TX_DESC_UPD, tx_queue->queue);
1212 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
1214 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1216 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
1217 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
1218 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
1219 struct efx_channel *channel = tx_queue->channel;
1220 struct efx_nic *efx = tx_queue->efx;
1221 size_t inlen, outlen;
1222 dma_addr_t dma_addr;
1227 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
1228 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
1229 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
1230 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
1231 MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
1232 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
1233 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
1234 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
1235 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1237 dma_addr = tx_queue->txd.buf.dma_addr;
1239 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
1240 tx_queue->queue, entries, (u64)dma_addr);
1242 for (i = 0; i < entries; ++i) {
1243 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
1244 dma_addr += EFX_BUF_SIZE;
1247 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
1249 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
1250 outbuf, sizeof(outbuf), &outlen);
1254 /* A previous user of this TX queue might have set us up the
1255 * bomb by writing a descriptor to the TX push collector but
1256 * not the doorbell. (Each collector belongs to a port, not a
1257 * queue or function, so cannot easily be reset.) We must
1258 * attempt to push a no-op descriptor in its place.
1260 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
1261 tx_queue->insert_count = 1;
1262 txd = efx_tx_desc(tx_queue, 0);
1263 EFX_POPULATE_QWORD_4(*txd,
1264 ESF_DZ_TX_DESC_IS_OPT, true,
1265 ESF_DZ_TX_OPTION_TYPE,
1266 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
1267 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
1268 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
1269 tx_queue->write_count = 1;
1271 efx_ef10_push_tx_desc(tx_queue, txd);
1276 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
1280 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
1282 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
1283 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
1284 struct efx_nic *efx = tx_queue->efx;
1288 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
1291 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
1292 outbuf, sizeof(outbuf), &outlen);
1294 if (rc && rc != -EALREADY)
1300 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
1301 outbuf, outlen, rc);
1304 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
1306 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
1309 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
1310 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
1312 unsigned int write_ptr;
1315 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1316 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
1317 efx_writed_page(tx_queue->efx, ®,
1318 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
1321 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
1323 unsigned int old_write_count = tx_queue->write_count;
1324 struct efx_tx_buffer *buffer;
1325 unsigned int write_ptr;
1328 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
1331 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1332 buffer = &tx_queue->buffer[write_ptr];
1333 txd = efx_tx_desc(tx_queue, write_ptr);
1334 ++tx_queue->write_count;
1336 /* Create TX descriptor ring entry */
1337 if (buffer->flags & EFX_TX_BUF_OPTION) {
1338 *txd = buffer->option;
1340 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
1341 EFX_POPULATE_QWORD_3(
1344 buffer->flags & EFX_TX_BUF_CONT,
1345 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
1346 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
1348 } while (tx_queue->write_count != tx_queue->insert_count);
1350 wmb(); /* Ensure descriptors are written before they are fetched */
1352 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
1353 txd = efx_tx_desc(tx_queue,
1354 old_write_count & tx_queue->ptr_mask);
1355 efx_ef10_push_tx_desc(tx_queue, txd);
1358 efx_ef10_notify_tx_desc(tx_queue);
1362 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
1364 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
1365 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
1369 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
1370 EVB_PORT_ID_ASSIGNED);
1371 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
1372 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
1373 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
1376 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
1377 outbuf, sizeof(outbuf), &outlen);
1381 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
1384 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
1389 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
1391 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
1394 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
1397 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
1402 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
1404 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1405 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1408 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1410 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1411 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1413 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1415 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
1416 (u8) efx->rx_indir_table[i];
1418 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1419 sizeof(tablebuf), NULL, 0, NULL);
1423 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
1425 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
1426 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
1427 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
1428 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
1429 efx->rx_hash_key[i];
1431 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
1432 sizeof(keybuf), NULL, 0, NULL);
1435 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
1437 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1439 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
1440 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
1441 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1444 static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
1446 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1449 netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
1451 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
1452 rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
1457 rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
1464 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1467 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
1469 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
1470 (rx_queue->ptr_mask + 1) *
1471 sizeof(efx_qword_t),
1475 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
1477 MCDI_DECLARE_BUF(inbuf,
1478 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1480 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
1481 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1482 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
1483 struct efx_nic *efx = rx_queue->efx;
1484 size_t inlen, outlen;
1485 dma_addr_t dma_addr;
1489 rx_queue->scatter_n = 0;
1490 rx_queue->scatter_len = 0;
1492 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
1493 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
1494 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
1495 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
1496 efx_rx_queue_index(rx_queue));
1497 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
1498 INIT_RXQ_IN_FLAG_PREFIX, 1,
1499 INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
1500 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
1501 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1503 dma_addr = rx_queue->rxd.buf.dma_addr;
1505 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
1506 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
1508 for (i = 0; i < entries; ++i) {
1509 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
1510 dma_addr += EFX_BUF_SIZE;
1513 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
1515 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
1516 outbuf, sizeof(outbuf), &outlen);
1518 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
1519 efx_rx_queue_index(rx_queue));
1522 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
1524 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
1525 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
1526 struct efx_nic *efx = rx_queue->efx;
1530 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
1531 efx_rx_queue_index(rx_queue));
1533 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
1534 outbuf, sizeof(outbuf), &outlen);
1536 if (rc && rc != -EALREADY)
1542 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
1543 outbuf, outlen, rc);
1546 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
1548 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
1551 /* This creates an entry in the RX descriptor queue */
1553 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
1555 struct efx_rx_buffer *rx_buf;
1558 rxd = efx_rx_desc(rx_queue, index);
1559 rx_buf = efx_rx_buffer(rx_queue, index);
1560 EFX_POPULATE_QWORD_2(*rxd,
1561 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
1562 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
1565 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
1567 struct efx_nic *efx = rx_queue->efx;
1568 unsigned int write_count;
1571 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
1572 write_count = rx_queue->added_count & ~7;
1573 if (rx_queue->notified_count == write_count)
1577 efx_ef10_build_rx_desc(
1579 rx_queue->notified_count & rx_queue->ptr_mask);
1580 while (++rx_queue->notified_count != write_count);
1583 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
1584 write_count & rx_queue->ptr_mask);
1585 efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD,
1586 efx_rx_queue_index(rx_queue));
1589 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
1591 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
1593 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1594 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1597 EFX_POPULATE_QWORD_2(event,
1598 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1599 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
1601 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1603 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1604 * already swapped the data to little-endian order.
1606 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1607 sizeof(efx_qword_t));
1609 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
1610 inbuf, sizeof(inbuf), 0,
1611 efx_ef10_rx_defer_refill_complete, 0);
1615 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
1616 int rc, efx_dword_t *outbuf,
1617 size_t outlen_actual)
1622 static int efx_ef10_ev_probe(struct efx_channel *channel)
1624 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
1625 (channel->eventq_mask + 1) *
1626 sizeof(efx_qword_t),
1630 static int efx_ef10_ev_init(struct efx_channel *channel)
1632 MCDI_DECLARE_BUF(inbuf,
1633 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
1635 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
1636 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
1637 struct efx_nic *efx = channel->efx;
1638 struct efx_ef10_nic_data *nic_data;
1639 bool supports_rx_merge;
1640 size_t inlen, outlen;
1641 dma_addr_t dma_addr;
1645 nic_data = efx->nic_data;
1647 !!(nic_data->datapath_caps &
1648 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
1650 /* Fill event queue with all ones (i.e. empty events) */
1651 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1653 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
1654 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
1655 /* INIT_EVQ expects index in vector table, not absolute */
1656 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
1657 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
1658 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
1659 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
1660 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
1661 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
1662 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
1663 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
1664 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
1665 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
1666 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
1667 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
1668 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
1670 dma_addr = channel->eventq.buf.dma_addr;
1671 for (i = 0; i < entries; ++i) {
1672 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
1673 dma_addr += EFX_BUF_SIZE;
1676 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
1678 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
1679 outbuf, sizeof(outbuf), &outlen);
1680 /* IRQ return is ignored */
1684 static void efx_ef10_ev_fini(struct efx_channel *channel)
1686 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
1687 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
1688 struct efx_nic *efx = channel->efx;
1692 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
1694 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
1695 outbuf, sizeof(outbuf), &outlen);
1697 if (rc && rc != -EALREADY)
1703 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
1704 outbuf, outlen, rc);
1707 static void efx_ef10_ev_remove(struct efx_channel *channel)
1709 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
1712 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
1713 unsigned int rx_queue_label)
1715 struct efx_nic *efx = rx_queue->efx;
1717 netif_info(efx, hw, efx->net_dev,
1718 "rx event arrived on queue %d labeled as queue %u\n",
1719 efx_rx_queue_index(rx_queue), rx_queue_label);
1721 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1725 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
1726 unsigned int actual, unsigned int expected)
1728 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
1729 struct efx_nic *efx = rx_queue->efx;
1731 netif_info(efx, hw, efx->net_dev,
1732 "dropped %d events (index=%d expected=%d)\n",
1733 dropped, actual, expected);
1735 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1738 /* partially received RX was aborted. clean up. */
1739 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
1741 unsigned int rx_desc_ptr;
1743 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
1744 "scattered RX aborted (dropping %u buffers)\n",
1745 rx_queue->scatter_n);
1747 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
1749 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
1750 0, EFX_RX_PKT_DISCARD);
1752 rx_queue->removed_count += rx_queue->scatter_n;
1753 rx_queue->scatter_n = 0;
1754 rx_queue->scatter_len = 0;
1755 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
1758 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
1759 const efx_qword_t *event)
1761 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
1762 unsigned int n_descs, n_packets, i;
1763 struct efx_nic *efx = channel->efx;
1764 struct efx_rx_queue *rx_queue;
1768 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1771 /* Basic packet information */
1772 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
1773 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
1774 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
1775 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
1776 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
1778 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
1779 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
1781 EFX_QWORD_VAL(*event));
1783 rx_queue = efx_channel_get_rx_queue(channel);
1785 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
1786 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
1788 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
1789 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1791 if (n_descs != rx_queue->scatter_n + 1) {
1792 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1794 /* detect rx abort */
1795 if (unlikely(n_descs == rx_queue->scatter_n)) {
1796 if (rx_queue->scatter_n == 0 || rx_bytes != 0)
1797 netdev_WARN(efx->net_dev,
1798 "invalid RX abort: scatter_n=%u event="
1800 rx_queue->scatter_n,
1801 EFX_QWORD_VAL(*event));
1802 efx_ef10_handle_rx_abort(rx_queue);
1806 /* Check that RX completion merging is valid, i.e.
1807 * the current firmware supports it and this is a
1808 * non-scattered packet.
1810 if (!(nic_data->datapath_caps &
1811 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
1812 rx_queue->scatter_n != 0 || rx_cont) {
1813 efx_ef10_handle_rx_bad_lbits(
1814 rx_queue, next_ptr_lbits,
1815 (rx_queue->removed_count +
1816 rx_queue->scatter_n + 1) &
1817 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1821 /* Merged completion for multiple non-scattered packets */
1822 rx_queue->scatter_n = 1;
1823 rx_queue->scatter_len = 0;
1824 n_packets = n_descs;
1825 ++channel->n_rx_merge_events;
1826 channel->n_rx_merge_packets += n_packets;
1827 flags |= EFX_RX_PKT_PREFIX_LEN;
1829 ++rx_queue->scatter_n;
1830 rx_queue->scatter_len += rx_bytes;
1836 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
1837 flags |= EFX_RX_PKT_DISCARD;
1839 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
1840 channel->n_rx_ip_hdr_chksum_err += n_packets;
1841 } else if (unlikely(EFX_QWORD_FIELD(*event,
1842 ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
1843 channel->n_rx_tcp_udp_chksum_err += n_packets;
1844 } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
1845 rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
1846 flags |= EFX_RX_PKT_CSUMMED;
1849 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
1850 flags |= EFX_RX_PKT_TCP;
1852 channel->irq_mod_score += 2 * n_packets;
1854 /* Handle received packet(s) */
1855 for (i = 0; i < n_packets; i++) {
1856 efx_rx_packet(rx_queue,
1857 rx_queue->removed_count & rx_queue->ptr_mask,
1858 rx_queue->scatter_n, rx_queue->scatter_len,
1860 rx_queue->removed_count += rx_queue->scatter_n;
1863 rx_queue->scatter_n = 0;
1864 rx_queue->scatter_len = 0;
1870 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
1872 struct efx_nic *efx = channel->efx;
1873 struct efx_tx_queue *tx_queue;
1874 unsigned int tx_ev_desc_ptr;
1875 unsigned int tx_ev_q_label;
1878 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1881 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
1884 /* Transmit completion */
1885 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
1886 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
1887 tx_queue = efx_channel_get_tx_queue(channel,
1888 tx_ev_q_label % EFX_TXQ_TYPES);
1889 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
1890 tx_queue->ptr_mask);
1891 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
1897 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1899 struct efx_nic *efx = channel->efx;
1902 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
1905 case ESE_DZ_DRV_TIMER_EV:
1906 case ESE_DZ_DRV_WAKE_UP_EV:
1908 case ESE_DZ_DRV_START_UP_EV:
1909 /* event queue init complete. ok. */
1912 netif_err(efx, hw, efx->net_dev,
1913 "channel %d unknown driver event type %d"
1914 " (data " EFX_QWORD_FMT ")\n",
1915 channel->channel, subcode,
1916 EFX_QWORD_VAL(*event));
1921 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
1924 struct efx_nic *efx = channel->efx;
1927 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
1931 channel->event_test_cpu = raw_smp_processor_id();
1933 case EFX_EF10_REFILL:
1934 /* The queue must be empty, so we won't receive any rx
1935 * events, so efx_process_channel() won't refill the
1936 * queue. Refill it here
1938 efx_fast_push_rx_descriptors(&channel->rx_queue, true);
1941 netif_err(efx, hw, efx->net_dev,
1942 "channel %d unknown driver event type %u"
1943 " (data " EFX_QWORD_FMT ")\n",
1944 channel->channel, (unsigned) subcode,
1945 EFX_QWORD_VAL(*event));
1949 static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
1951 struct efx_nic *efx = channel->efx;
1952 efx_qword_t event, *p_event;
1953 unsigned int read_ptr;
1958 read_ptr = channel->eventq_read_ptr;
1961 p_event = efx_event(channel, read_ptr);
1964 if (!efx_event_present(&event))
1967 EFX_SET_QWORD(*p_event);
1971 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
1973 netif_vdbg(efx, drv, efx->net_dev,
1974 "processing event on %d " EFX_QWORD_FMT "\n",
1975 channel->channel, EFX_QWORD_VAL(event));
1978 case ESE_DZ_EV_CODE_MCDI_EV:
1979 efx_mcdi_process_event(channel, &event);
1981 case ESE_DZ_EV_CODE_RX_EV:
1982 spent += efx_ef10_handle_rx_event(channel, &event);
1983 if (spent >= quota) {
1984 /* XXX can we split a merged event to
1985 * avoid going over-quota?
1991 case ESE_DZ_EV_CODE_TX_EV:
1992 tx_descs += efx_ef10_handle_tx_event(channel, &event);
1993 if (tx_descs > efx->txq_entries) {
1996 } else if (++spent == quota) {
2000 case ESE_DZ_EV_CODE_DRIVER_EV:
2001 efx_ef10_handle_driver_event(channel, &event);
2002 if (++spent == quota)
2005 case EFX_EF10_DRVGEN_EV:
2006 efx_ef10_handle_driver_generated_event(channel, &event);
2009 netif_err(efx, hw, efx->net_dev,
2010 "channel %d unknown event type %d"
2011 " (data " EFX_QWORD_FMT ")\n",
2012 channel->channel, ev_code,
2013 EFX_QWORD_VAL(event));
2018 channel->eventq_read_ptr = read_ptr;
2022 static void efx_ef10_ev_read_ack(struct efx_channel *channel)
2024 struct efx_nic *efx = channel->efx;
2027 if (EFX_EF10_WORKAROUND_35388(efx)) {
2028 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
2029 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
2030 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
2031 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
2033 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2034 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
2035 ERF_DD_EVQ_IND_RPTR,
2036 (channel->eventq_read_ptr &
2037 channel->eventq_mask) >>
2038 ERF_DD_EVQ_IND_RPTR_WIDTH);
2039 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2041 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2042 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
2043 ERF_DD_EVQ_IND_RPTR,
2044 channel->eventq_read_ptr &
2045 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
2046 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2049 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
2050 channel->eventq_read_ptr &
2051 channel->eventq_mask);
2052 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
2056 static void efx_ef10_ev_test_generate(struct efx_channel *channel)
2058 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2059 struct efx_nic *efx = channel->efx;
2063 EFX_POPULATE_QWORD_2(event,
2064 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2065 ESF_DZ_EV_DATA, EFX_EF10_TEST);
2067 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2069 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2070 * already swapped the data to little-endian order.
2072 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2073 sizeof(efx_qword_t));
2075 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
2084 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2087 void efx_ef10_handle_drain_event(struct efx_nic *efx)
2089 if (atomic_dec_and_test(&efx->active_queues))
2090 wake_up(&efx->flush_wq);
2092 WARN_ON(atomic_read(&efx->active_queues) < 0);
2095 static int efx_ef10_fini_dmaq(struct efx_nic *efx)
2097 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2098 struct efx_channel *channel;
2099 struct efx_tx_queue *tx_queue;
2100 struct efx_rx_queue *rx_queue;
2103 /* If the MC has just rebooted, the TX/RX queues will have already been
2104 * torn down, but efx->active_queues needs to be set to zero.
2106 if (nic_data->must_realloc_vis) {
2107 atomic_set(&efx->active_queues, 0);
2111 /* Do not attempt to write to the NIC during EEH recovery */
2112 if (efx->state != STATE_RECOVERY) {
2113 efx_for_each_channel(channel, efx) {
2114 efx_for_each_channel_rx_queue(rx_queue, channel)
2115 efx_ef10_rx_fini(rx_queue);
2116 efx_for_each_channel_tx_queue(tx_queue, channel)
2117 efx_ef10_tx_fini(tx_queue);
2120 wait_event_timeout(efx->flush_wq,
2121 atomic_read(&efx->active_queues) == 0,
2122 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
2123 pending = atomic_read(&efx->active_queues);
2125 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
2134 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
2135 const struct efx_filter_spec *right)
2137 if ((left->match_flags ^ right->match_flags) |
2138 ((left->flags ^ right->flags) &
2139 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
2142 return memcmp(&left->outer_vid, &right->outer_vid,
2143 sizeof(struct efx_filter_spec) -
2144 offsetof(struct efx_filter_spec, outer_vid)) == 0;
2147 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
2149 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
2150 return jhash2((const u32 *)&spec->outer_vid,
2151 (sizeof(struct efx_filter_spec) -
2152 offsetof(struct efx_filter_spec, outer_vid)) / 4,
2154 /* XXX should we randomise the initval? */
2157 /* Decide whether a filter should be exclusive or else should allow
2158 * delivery to additional recipients. Currently we decide that
2159 * filters for specific local unicast MAC and IP addresses are
2162 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
2164 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
2165 !is_multicast_ether_addr(spec->loc_mac))
2168 if ((spec->match_flags &
2169 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
2170 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
2171 if (spec->ether_type == htons(ETH_P_IP) &&
2172 !ipv4_is_multicast(spec->loc_host[0]))
2174 if (spec->ether_type == htons(ETH_P_IPV6) &&
2175 ((const u8 *)spec->loc_host)[0] != 0xff)
2182 static struct efx_filter_spec *
2183 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
2184 unsigned int filter_idx)
2186 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
2187 ~EFX_EF10_FILTER_FLAGS);
2191 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
2192 unsigned int filter_idx)
2194 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
2198 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
2199 unsigned int filter_idx,
2200 const struct efx_filter_spec *spec,
2203 table->entry[filter_idx].spec = (unsigned long)spec | flags;
2206 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
2207 const struct efx_filter_spec *spec,
2208 efx_dword_t *inbuf, u64 handle,
2211 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2213 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
2216 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2217 MC_CMD_FILTER_OP_IN_OP_REPLACE);
2218 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
2220 u32 match_fields = 0;
2222 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2223 efx_ef10_filter_is_exclusive(spec) ?
2224 MC_CMD_FILTER_OP_IN_OP_INSERT :
2225 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
2227 /* Convert match flags and values. Unlike almost
2228 * everything else in MCDI, these fields are in
2229 * network byte order.
2231 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
2233 is_multicast_ether_addr(spec->loc_mac) ?
2234 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
2235 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
2236 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
2237 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
2239 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2240 mcdi_field ## _LBN; \
2242 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
2243 sizeof(spec->gen_field)); \
2244 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
2245 &spec->gen_field, sizeof(spec->gen_field)); \
2247 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
2248 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
2249 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
2250 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
2251 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
2252 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
2253 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
2254 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
2255 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
2256 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
2258 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
2262 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
2263 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
2264 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2265 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
2266 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
2267 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
2268 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
2269 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
2270 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2272 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
2273 (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
2274 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
2275 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
2276 if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
2277 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
2278 spec->rss_context !=
2279 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
2280 spec->rss_context : nic_data->rx_rss_context);
2283 static int efx_ef10_filter_push(struct efx_nic *efx,
2284 const struct efx_filter_spec *spec,
2285 u64 *handle, bool replacing)
2287 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2288 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
2291 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
2292 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2293 outbuf, sizeof(outbuf), NULL);
2295 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2297 rc = -EBUSY; /* to match efx_farch_filter_insert() */
2301 static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
2302 enum efx_filter_match_flags match_flags)
2304 unsigned int match_pri;
2307 match_pri < table->rx_match_count;
2309 if (table->rx_match_flags[match_pri] == match_flags)
2312 return -EPROTONOSUPPORT;
2315 static s32 efx_ef10_filter_insert(struct efx_nic *efx,
2316 struct efx_filter_spec *spec,
2319 struct efx_ef10_filter_table *table = efx->filter_state;
2320 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
2321 struct efx_filter_spec *saved_spec;
2322 unsigned int match_pri, hash;
2323 unsigned int priv_flags;
2324 bool replacing = false;
2330 /* For now, only support RX filters */
2331 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
2335 rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
2340 hash = efx_ef10_filter_hash(spec);
2341 is_mc_recip = efx_filter_is_mc_recipient(spec);
2343 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
2345 /* Find any existing filters with the same match tuple or
2346 * else a free slot to insert at. If any of them are busy,
2347 * we have to wait and retry.
2350 unsigned int depth = 1;
2353 spin_lock_bh(&efx->filter_lock);
2356 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2357 saved_spec = efx_ef10_filter_entry_spec(table, i);
2362 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
2363 if (table->entry[i].spec &
2364 EFX_EF10_FILTER_FLAG_BUSY)
2366 if (spec->priority < saved_spec->priority &&
2367 spec->priority != EFX_FILTER_PRI_AUTO) {
2372 /* This is the only one */
2373 if (spec->priority ==
2374 saved_spec->priority &&
2381 } else if (spec->priority >
2382 saved_spec->priority ||
2384 saved_spec->priority &&
2389 __set_bit(depth, mc_rem_map);
2393 /* Once we reach the maximum search depth, use
2394 * the first suitable slot or return -EBUSY if
2397 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2398 if (ins_index < 0) {
2408 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2409 spin_unlock_bh(&efx->filter_lock);
2414 /* Create a software table entry if necessary, and mark it
2415 * busy. We might yet fail to insert, but any attempt to
2416 * insert a conflicting filter while we're waiting for the
2417 * firmware must find the busy entry.
2419 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2421 if (spec->priority == EFX_FILTER_PRI_AUTO &&
2422 saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
2423 /* Just make sure it won't be removed */
2424 if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
2425 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
2426 table->entry[ins_index].spec &=
2427 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
2432 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
2434 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2439 *saved_spec = *spec;
2442 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2443 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
2445 /* Mark lower-priority multicast recipients busy prior to removal */
2447 unsigned int depth, i;
2449 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2450 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2451 if (test_bit(depth, mc_rem_map))
2452 table->entry[i].spec |=
2453 EFX_EF10_FILTER_FLAG_BUSY;
2457 spin_unlock_bh(&efx->filter_lock);
2459 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
2462 /* Finalise the software table entry */
2463 spin_lock_bh(&efx->filter_lock);
2466 /* Update the fields that may differ */
2467 if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
2468 saved_spec->flags |=
2469 EFX_FILTER_FLAG_RX_OVER_AUTO;
2470 saved_spec->priority = spec->priority;
2471 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
2472 saved_spec->flags |= spec->flags;
2473 saved_spec->rss_context = spec->rss_context;
2474 saved_spec->dmaq_id = spec->dmaq_id;
2476 } else if (!replacing) {
2480 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
2482 /* Remove and finalise entries for lower-priority multicast
2486 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2487 unsigned int depth, i;
2489 memset(inbuf, 0, sizeof(inbuf));
2491 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2492 if (!test_bit(depth, mc_rem_map))
2495 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2496 saved_spec = efx_ef10_filter_entry_spec(table, i);
2497 priv_flags = efx_ef10_filter_entry_flags(table, i);
2500 spin_unlock_bh(&efx->filter_lock);
2501 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2502 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2503 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2504 table->entry[i].handle);
2505 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2506 inbuf, sizeof(inbuf),
2508 spin_lock_bh(&efx->filter_lock);
2516 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
2518 efx_ef10_filter_set_entry(table, i, saved_spec,
2523 /* If successful, return the inserted filter ID */
2525 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
2527 wake_up_all(&table->waitq);
2529 spin_unlock_bh(&efx->filter_lock);
2530 finish_wait(&table->waitq, &wait);
2534 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
2536 /* no need to do anything here on EF10 */
2540 * If !by_index, remove by ID
2541 * If by_index, remove by index
2542 * Filter ID may come from userland and must be range-checked.
2544 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2545 unsigned int priority_mask,
2546 u32 filter_id, bool by_index)
2548 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2549 struct efx_ef10_filter_table *table = efx->filter_state;
2550 MCDI_DECLARE_BUF(inbuf,
2551 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2552 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2553 struct efx_filter_spec *spec;
2557 /* Find the software table entry and mark it busy. Don't
2558 * remove it yet; any attempt to update while we're waiting
2559 * for the firmware must find the busy entry.
2562 spin_lock_bh(&efx->filter_lock);
2563 if (!(table->entry[filter_idx].spec &
2564 EFX_EF10_FILTER_FLAG_BUSY))
2566 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2567 spin_unlock_bh(&efx->filter_lock);
2571 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2574 efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
2575 filter_id / HUNT_FILTER_TBL_ROWS)) {
2580 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
2581 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
2582 /* Just remove flags */
2583 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
2584 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
2589 if (!(priority_mask & (1U << spec->priority))) {
2594 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2595 spin_unlock_bh(&efx->filter_lock);
2597 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
2598 /* Reset to an automatic filter */
2600 struct efx_filter_spec new_spec = *spec;
2602 new_spec.priority = EFX_FILTER_PRI_AUTO;
2603 new_spec.flags = (EFX_FILTER_FLAG_RX |
2604 EFX_FILTER_FLAG_RX_RSS);
2605 new_spec.dmaq_id = 0;
2606 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
2607 rc = efx_ef10_filter_push(efx, &new_spec,
2608 &table->entry[filter_idx].handle,
2611 spin_lock_bh(&efx->filter_lock);
2615 /* Really remove the filter */
2617 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2618 efx_ef10_filter_is_exclusive(spec) ?
2619 MC_CMD_FILTER_OP_IN_OP_REMOVE :
2620 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2621 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2622 table->entry[filter_idx].handle);
2623 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2624 inbuf, sizeof(inbuf), NULL, 0, NULL);
2626 spin_lock_bh(&efx->filter_lock);
2629 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2633 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2634 wake_up_all(&table->waitq);
2636 spin_unlock_bh(&efx->filter_lock);
2637 finish_wait(&table->waitq, &wait);
2641 static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
2642 enum efx_filter_priority priority,
2645 return efx_ef10_filter_remove_internal(efx, 1U << priority,
2649 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
2650 enum efx_filter_priority priority,
2651 u32 filter_id, struct efx_filter_spec *spec)
2653 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2654 struct efx_ef10_filter_table *table = efx->filter_state;
2655 const struct efx_filter_spec *saved_spec;
2658 spin_lock_bh(&efx->filter_lock);
2659 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
2660 if (saved_spec && saved_spec->priority == priority &&
2661 efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
2662 filter_id / HUNT_FILTER_TBL_ROWS) {
2663 *spec = *saved_spec;
2668 spin_unlock_bh(&efx->filter_lock);
2672 static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
2673 enum efx_filter_priority priority)
2675 unsigned int priority_mask;
2679 priority_mask = (((1U << (priority + 1)) - 1) &
2680 ~(1U << EFX_FILTER_PRI_AUTO));
2682 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
2683 rc = efx_ef10_filter_remove_internal(efx, priority_mask,
2685 if (rc && rc != -ENOENT)
2692 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
2693 enum efx_filter_priority priority)
2695 struct efx_ef10_filter_table *table = efx->filter_state;
2696 unsigned int filter_idx;
2699 spin_lock_bh(&efx->filter_lock);
2700 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2701 if (table->entry[filter_idx].spec &&
2702 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
2706 spin_unlock_bh(&efx->filter_lock);
2710 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
2712 struct efx_ef10_filter_table *table = efx->filter_state;
2714 return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
2717 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
2718 enum efx_filter_priority priority,
2721 struct efx_ef10_filter_table *table = efx->filter_state;
2722 struct efx_filter_spec *spec;
2723 unsigned int filter_idx;
2726 spin_lock_bh(&efx->filter_lock);
2727 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2728 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2729 if (spec && spec->priority == priority) {
2730 if (count == size) {
2734 buf[count++] = (efx_ef10_filter_rx_match_pri(
2735 table, spec->match_flags) *
2736 HUNT_FILTER_TBL_ROWS +
2740 spin_unlock_bh(&efx->filter_lock);
2744 #ifdef CONFIG_RFS_ACCEL
2746 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
2748 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
2749 struct efx_filter_spec *spec)
2751 struct efx_ef10_filter_table *table = efx->filter_state;
2752 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2753 struct efx_filter_spec *saved_spec;
2754 unsigned int hash, i, depth = 1;
2755 bool replacing = false;
2760 /* Must be an RX filter without RSS and not for a multicast
2761 * destination address (RFS only works for connected sockets).
2762 * These restrictions allow us to pass only a tiny amount of
2763 * data through to the completion function.
2765 EFX_WARN_ON_PARANOID(spec->flags !=
2766 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
2767 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
2768 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
2770 hash = efx_ef10_filter_hash(spec);
2772 spin_lock_bh(&efx->filter_lock);
2774 /* Find any existing filter with the same match tuple or else
2775 * a free slot to insert at. If an existing filter is busy,
2776 * we have to give up.
2779 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2780 saved_spec = efx_ef10_filter_entry_spec(table, i);
2785 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
2786 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
2790 if (spec->priority < saved_spec->priority) {
2798 /* Once we reach the maximum search depth, use the
2799 * first suitable slot or return -EBUSY if there was
2802 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2803 if (ins_index < 0) {
2813 /* Create a software table entry if necessary, and mark it
2814 * busy. We might yet fail to insert, but any attempt to
2815 * insert a conflicting filter while we're waiting for the
2816 * firmware must find the busy entry.
2818 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2822 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2827 *saved_spec = *spec;
2829 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2830 EFX_EF10_FILTER_FLAG_BUSY);
2832 spin_unlock_bh(&efx->filter_lock);
2834 /* Pack up the variables needed on completion */
2835 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
2837 efx_ef10_filter_push_prep(efx, spec, inbuf,
2838 table->entry[ins_index].handle, replacing);
2839 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2840 MC_CMD_FILTER_OP_OUT_LEN,
2841 efx_ef10_filter_rfs_insert_complete, cookie);
2846 spin_unlock_bh(&efx->filter_lock);
2851 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
2852 int rc, efx_dword_t *outbuf,
2853 size_t outlen_actual)
2855 struct efx_ef10_filter_table *table = efx->filter_state;
2856 unsigned int ins_index, dmaq_id;
2857 struct efx_filter_spec *spec;
2860 /* Unpack the cookie */
2861 replacing = cookie >> 31;
2862 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
2863 dmaq_id = cookie & 0xffff;
2865 spin_lock_bh(&efx->filter_lock);
2866 spec = efx_ef10_filter_entry_spec(table, ins_index);
2868 table->entry[ins_index].handle =
2869 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2871 spec->dmaq_id = dmaq_id;
2872 } else if (!replacing) {
2876 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
2877 spin_unlock_bh(&efx->filter_lock);
2879 wake_up_all(&table->waitq);
2883 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2884 unsigned long filter_idx,
2885 int rc, efx_dword_t *outbuf,
2886 size_t outlen_actual);
2888 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2889 unsigned int filter_idx)
2891 struct efx_ef10_filter_table *table = efx->filter_state;
2892 struct efx_filter_spec *spec =
2893 efx_ef10_filter_entry_spec(table, filter_idx);
2894 MCDI_DECLARE_BUF(inbuf,
2895 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2896 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2899 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
2900 spec->priority != EFX_FILTER_PRI_HINT ||
2901 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
2902 flow_id, filter_idx))
2905 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2906 MC_CMD_FILTER_OP_IN_OP_REMOVE);
2907 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2908 table->entry[filter_idx].handle);
2909 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
2910 efx_ef10_filter_rfs_expire_complete, filter_idx))
2913 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2918 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2919 unsigned long filter_idx,
2920 int rc, efx_dword_t *outbuf,
2921 size_t outlen_actual)
2923 struct efx_ef10_filter_table *table = efx->filter_state;
2924 struct efx_filter_spec *spec =
2925 efx_ef10_filter_entry_spec(table, filter_idx);
2927 spin_lock_bh(&efx->filter_lock);
2930 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2932 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2933 wake_up_all(&table->waitq);
2934 spin_unlock_bh(&efx->filter_lock);
2937 #endif /* CONFIG_RFS_ACCEL */
2939 static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
2941 int match_flags = 0;
2943 #define MAP_FLAG(gen_flag, mcdi_field) { \
2944 u32 old_mcdi_flags = mcdi_flags; \
2945 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2946 mcdi_field ## _LBN); \
2947 if (mcdi_flags != old_mcdi_flags) \
2948 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
2950 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
2951 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
2952 MAP_FLAG(REM_HOST, SRC_IP);
2953 MAP_FLAG(LOC_HOST, DST_IP);
2954 MAP_FLAG(REM_MAC, SRC_MAC);
2955 MAP_FLAG(REM_PORT, SRC_PORT);
2956 MAP_FLAG(LOC_MAC, DST_MAC);
2957 MAP_FLAG(LOC_PORT, DST_PORT);
2958 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
2959 MAP_FLAG(INNER_VID, INNER_VLAN);
2960 MAP_FLAG(OUTER_VID, OUTER_VLAN);
2961 MAP_FLAG(IP_PROTO, IP_PROTO);
2964 /* Did we map them all? */
2971 static int efx_ef10_filter_table_probe(struct efx_nic *efx)
2973 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
2974 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
2975 unsigned int pd_match_pri, pd_match_count;
2976 struct efx_ef10_filter_table *table;
2980 table = kzalloc(sizeof(*table), GFP_KERNEL);
2984 /* Find out which RX filter types are supported, and their priorities */
2985 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
2986 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
2987 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
2988 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
2992 pd_match_count = MCDI_VAR_ARRAY_LEN(
2993 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
2994 table->rx_match_count = 0;
2996 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
3000 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
3002 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
3004 netif_dbg(efx, probe, efx->net_dev,
3005 "%s: fw flags %#x pri %u not supported in driver\n",
3006 __func__, mcdi_flags, pd_match_pri);
3008 netif_dbg(efx, probe, efx->net_dev,
3009 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
3010 __func__, mcdi_flags, pd_match_pri,
3011 rc, table->rx_match_count);
3012 table->rx_match_flags[table->rx_match_count++] = rc;
3016 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
3017 if (!table->entry) {
3022 efx->filter_state = table;
3023 init_waitqueue_head(&table->waitq);
3031 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
3033 struct efx_ef10_filter_table *table = efx->filter_state;
3034 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3035 struct efx_filter_spec *spec;
3036 unsigned int filter_idx;
3037 bool failed = false;
3040 if (!nic_data->must_restore_filters)
3043 spin_lock_bh(&efx->filter_lock);
3045 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3046 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3050 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3051 spin_unlock_bh(&efx->filter_lock);
3053 rc = efx_ef10_filter_push(efx, spec,
3054 &table->entry[filter_idx].handle,
3059 spin_lock_bh(&efx->filter_lock);
3062 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3064 table->entry[filter_idx].spec &=
3065 ~EFX_EF10_FILTER_FLAG_BUSY;
3069 spin_unlock_bh(&efx->filter_lock);
3072 netif_err(efx, hw, efx->net_dev,
3073 "unable to restore all filters\n");
3075 nic_data->must_restore_filters = false;
3078 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
3080 struct efx_ef10_filter_table *table = efx->filter_state;
3081 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3082 struct efx_filter_spec *spec;
3083 unsigned int filter_idx;
3086 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3087 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3091 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3092 efx_ef10_filter_is_exclusive(spec) ?
3093 MC_CMD_FILTER_OP_IN_OP_REMOVE :
3094 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3095 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3096 table->entry[filter_idx].handle);
3097 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3100 netdev_WARN(efx->net_dev,
3101 "filter_idx=%#x handle=%#llx\n",
3103 table->entry[filter_idx].handle);
3107 vfree(table->entry);
3111 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3113 struct efx_ef10_filter_table *table = efx->filter_state;
3114 struct net_device *net_dev = efx->net_dev;
3115 struct efx_filter_spec spec;
3116 bool remove_failed = false;
3117 struct netdev_hw_addr *uc;
3118 struct netdev_hw_addr *mc;
3119 unsigned int filter_idx;
3122 if (!efx_dev_registered(efx))
3125 /* Mark old filters that may need to be removed */
3126 spin_lock_bh(&efx->filter_lock);
3127 n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
3128 for (i = 0; i < n; i++) {
3129 filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
3130 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
3132 n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
3133 for (i = 0; i < n; i++) {
3134 filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
3135 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
3137 spin_unlock_bh(&efx->filter_lock);
3139 /* Copy/convert the address lists; add the primary station
3140 * address and broadcast address
3142 netif_addr_lock_bh(net_dev);
3143 if (net_dev->flags & IFF_PROMISC ||
3144 netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
3145 table->dev_uc_count = -1;
3147 table->dev_uc_count = 1 + netdev_uc_count(net_dev);
3148 memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
3151 netdev_for_each_uc_addr(uc, net_dev) {
3152 memcpy(table->dev_uc_list[i].addr,
3153 uc->addr, ETH_ALEN);
3157 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
3158 netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
3159 table->dev_mc_count = -1;
3161 table->dev_mc_count = 1 + netdev_mc_count(net_dev);
3162 eth_broadcast_addr(table->dev_mc_list[0].addr);
3164 netdev_for_each_mc_addr(mc, net_dev) {
3165 memcpy(table->dev_mc_list[i].addr,
3166 mc->addr, ETH_ALEN);
3170 netif_addr_unlock_bh(net_dev);
3172 /* Insert/renew unicast filters */
3173 if (table->dev_uc_count >= 0) {
3174 for (i = 0; i < table->dev_uc_count; i++) {
3175 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3176 EFX_FILTER_FLAG_RX_RSS,
3178 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3179 table->dev_uc_list[i].addr);
3180 rc = efx_ef10_filter_insert(efx, &spec, true);
3182 /* Fall back to unicast-promisc */
3184 efx_ef10_filter_remove_safe(
3185 efx, EFX_FILTER_PRI_AUTO,
3186 table->dev_uc_list[i].id);
3187 table->dev_uc_count = -1;
3190 table->dev_uc_list[i].id = rc;
3193 if (table->dev_uc_count < 0) {
3194 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3195 EFX_FILTER_FLAG_RX_RSS,
3197 efx_filter_set_uc_def(&spec);
3198 rc = efx_ef10_filter_insert(efx, &spec, true);
3201 table->dev_uc_count = 0;
3203 table->dev_uc_list[0].id = rc;
3207 /* Insert/renew multicast filters */
3208 if (table->dev_mc_count >= 0) {
3209 for (i = 0; i < table->dev_mc_count; i++) {
3210 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3211 EFX_FILTER_FLAG_RX_RSS,
3213 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3214 table->dev_mc_list[i].addr);
3215 rc = efx_ef10_filter_insert(efx, &spec, true);
3217 /* Fall back to multicast-promisc */
3219 efx_ef10_filter_remove_safe(
3220 efx, EFX_FILTER_PRI_AUTO,
3221 table->dev_mc_list[i].id);
3222 table->dev_mc_count = -1;
3225 table->dev_mc_list[i].id = rc;
3228 if (table->dev_mc_count < 0) {
3229 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3230 EFX_FILTER_FLAG_RX_RSS,
3232 efx_filter_set_mc_def(&spec);
3233 rc = efx_ef10_filter_insert(efx, &spec, true);
3236 table->dev_mc_count = 0;
3238 table->dev_mc_list[0].id = rc;
3242 /* Remove filters that weren't renewed. Since nothing else
3243 * changes the AUTO_OLD flag or removes these filters, we
3244 * don't need to hold the filter_lock while scanning for
3247 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
3248 if (ACCESS_ONCE(table->entry[i].spec) &
3249 EFX_EF10_FILTER_FLAG_AUTO_OLD) {
3250 if (efx_ef10_filter_remove_internal(
3251 efx, 1U << EFX_FILTER_PRI_AUTO,
3253 remove_failed = true;
3256 WARN_ON(remove_failed);
3259 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
3261 efx_ef10_filter_sync_rx_mode(efx);
3263 return efx_mcdi_set_mac(efx);
3266 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
3268 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
3270 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
3271 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
3275 /* MC BISTs follow a different poll mechanism to phy BISTs.
3276 * The BIST is done in the poll handler on the MC, and the MCDI command
3277 * will block until the BIST is done.
3279 static int efx_ef10_poll_bist(struct efx_nic *efx)
3282 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
3286 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
3287 outbuf, sizeof(outbuf), &outlen);
3291 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
3294 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
3296 case MC_CMD_POLL_BIST_PASSED:
3297 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
3299 case MC_CMD_POLL_BIST_TIMEOUT:
3300 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
3302 case MC_CMD_POLL_BIST_FAILED:
3303 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
3306 netif_err(efx, hw, efx->net_dev,
3307 "BIST returned unknown result %u", result);
3312 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
3316 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
3318 rc = efx_ef10_start_bist(efx, bist_type);
3322 return efx_ef10_poll_bist(efx);
3326 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
3330 efx_reset_down(efx, RESET_TYPE_WORLD);
3332 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
3333 NULL, 0, NULL, 0, NULL);
3337 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
3338 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
3340 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
3343 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
3344 return rc ? rc : rc2;
3347 #ifdef CONFIG_SFC_MTD
3349 struct efx_ef10_nvram_type_info {
3350 u16 type, type_mask;
3355 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
3356 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
3357 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
3358 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
3359 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
3360 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
3361 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
3362 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
3363 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
3364 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
3365 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
3366 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
3369 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
3370 struct efx_mcdi_mtd_partition *part,
3373 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
3374 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
3375 const struct efx_ef10_nvram_type_info *info;
3376 size_t size, erase_size, outlen;
3380 for (info = efx_ef10_nvram_types; ; info++) {
3382 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
3384 if ((type & ~info->type_mask) == info->type)
3387 if (info->port != efx_port_num(efx))
3390 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
3394 return -ENODEV; /* hide it */
3396 part->nvram_type = type;
3398 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
3399 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
3400 outbuf, sizeof(outbuf), &outlen);
3403 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
3405 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
3406 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
3407 part->fw_subtype = MCDI_DWORD(outbuf,
3408 NVRAM_METADATA_OUT_SUBTYPE);
3410 part->common.dev_type_name = "EF10 NVRAM manager";
3411 part->common.type_name = info->name;
3413 part->common.mtd.type = MTD_NORFLASH;
3414 part->common.mtd.flags = MTD_CAP_NORFLASH;
3415 part->common.mtd.size = size;
3416 part->common.mtd.erasesize = erase_size;
3421 static int efx_ef10_mtd_probe(struct efx_nic *efx)
3423 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
3424 struct efx_mcdi_mtd_partition *parts;
3425 size_t outlen, n_parts_total, i, n_parts;
3431 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
3432 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
3433 outbuf, sizeof(outbuf), &outlen);
3436 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
3439 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
3441 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
3444 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
3449 for (i = 0; i < n_parts_total; i++) {
3450 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
3452 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
3455 else if (rc != -ENODEV)
3459 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
3466 #endif /* CONFIG_SFC_MTD */
3468 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
3470 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
3473 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
3476 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
3479 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
3480 channel->sync_events_state == SYNC_EVENTS_VALID ||
3481 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
3483 channel->sync_events_state = SYNC_EVENTS_REQUESTED;
3485 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
3486 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
3487 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
3490 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
3491 inbuf, sizeof(inbuf), NULL, 0, NULL);
3494 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
3495 SYNC_EVENTS_DISABLED;
3500 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
3503 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
3506 if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
3507 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
3509 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
3510 channel->sync_events_state = SYNC_EVENTS_DISABLED;
3513 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
3514 SYNC_EVENTS_DISABLED;
3516 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
3517 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
3518 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
3519 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
3520 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
3523 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
3524 inbuf, sizeof(inbuf), NULL, 0, NULL);
3529 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
3532 int (*set)(struct efx_channel *channel, bool temp);
3533 struct efx_channel *channel;
3536 efx_ef10_rx_enable_timestamping :
3537 efx_ef10_rx_disable_timestamping;
3539 efx_for_each_channel(channel, efx) {
3540 int rc = set(channel, temp);
3541 if (en && rc != 0) {
3542 efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
3550 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
3551 struct hwtstamp_config *init)
3555 switch (init->rx_filter) {
3556 case HWTSTAMP_FILTER_NONE:
3557 efx_ef10_ptp_set_ts_sync_events(efx, false, false);
3558 /* if TX timestamping is still requested then leave PTP on */
3559 return efx_ptp_change_mode(efx,
3560 init->tx_type != HWTSTAMP_TX_OFF, 0);
3561 case HWTSTAMP_FILTER_ALL:
3562 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3563 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3564 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3565 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3566 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3567 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3568 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3569 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3570 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3571 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3572 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3573 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3574 init->rx_filter = HWTSTAMP_FILTER_ALL;
3575 rc = efx_ptp_change_mode(efx, true, 0);
3577 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
3579 efx_ptp_change_mode(efx, false, 0);
3586 const struct efx_nic_type efx_hunt_a0_nic_type = {
3587 .mem_map_size = efx_ef10_mem_map_size,
3588 .probe = efx_ef10_probe,
3589 .remove = efx_ef10_remove,
3590 .dimension_resources = efx_ef10_dimension_resources,
3591 .init = efx_ef10_init_nic,
3592 .fini = efx_port_dummy_op_void,
3593 .map_reset_reason = efx_mcdi_map_reset_reason,
3594 .map_reset_flags = efx_ef10_map_reset_flags,
3595 .reset = efx_ef10_reset,
3596 .probe_port = efx_mcdi_port_probe,
3597 .remove_port = efx_mcdi_port_remove,
3598 .fini_dmaq = efx_ef10_fini_dmaq,
3599 .describe_stats = efx_ef10_describe_stats,
3600 .update_stats = efx_ef10_update_stats,
3601 .start_stats = efx_mcdi_mac_start_stats,
3602 .pull_stats = efx_mcdi_mac_pull_stats,
3603 .stop_stats = efx_mcdi_mac_stop_stats,
3604 .set_id_led = efx_mcdi_set_id_led,
3605 .push_irq_moderation = efx_ef10_push_irq_moderation,
3606 .reconfigure_mac = efx_ef10_mac_reconfigure,
3607 .check_mac_fault = efx_mcdi_mac_check_fault,
3608 .reconfigure_port = efx_mcdi_port_reconfigure,
3609 .get_wol = efx_ef10_get_wol,
3610 .set_wol = efx_ef10_set_wol,
3611 .resume_wol = efx_port_dummy_op_void,
3612 .test_chip = efx_ef10_test_chip,
3613 .test_nvram = efx_mcdi_nvram_test_all,
3614 .mcdi_request = efx_ef10_mcdi_request,
3615 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
3616 .mcdi_read_response = efx_ef10_mcdi_read_response,
3617 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
3618 .irq_enable_master = efx_port_dummy_op_void,
3619 .irq_test_generate = efx_ef10_irq_test_generate,
3620 .irq_disable_non_ev = efx_port_dummy_op_void,
3621 .irq_handle_msi = efx_ef10_msi_interrupt,
3622 .irq_handle_legacy = efx_ef10_legacy_interrupt,
3623 .tx_probe = efx_ef10_tx_probe,
3624 .tx_init = efx_ef10_tx_init,
3625 .tx_remove = efx_ef10_tx_remove,
3626 .tx_write = efx_ef10_tx_write,
3627 .rx_push_rss_config = efx_ef10_rx_push_rss_config,
3628 .rx_probe = efx_ef10_rx_probe,
3629 .rx_init = efx_ef10_rx_init,
3630 .rx_remove = efx_ef10_rx_remove,
3631 .rx_write = efx_ef10_rx_write,
3632 .rx_defer_refill = efx_ef10_rx_defer_refill,
3633 .ev_probe = efx_ef10_ev_probe,
3634 .ev_init = efx_ef10_ev_init,
3635 .ev_fini = efx_ef10_ev_fini,
3636 .ev_remove = efx_ef10_ev_remove,
3637 .ev_process = efx_ef10_ev_process,
3638 .ev_read_ack = efx_ef10_ev_read_ack,
3639 .ev_test_generate = efx_ef10_ev_test_generate,
3640 .filter_table_probe = efx_ef10_filter_table_probe,
3641 .filter_table_restore = efx_ef10_filter_table_restore,
3642 .filter_table_remove = efx_ef10_filter_table_remove,
3643 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
3644 .filter_insert = efx_ef10_filter_insert,
3645 .filter_remove_safe = efx_ef10_filter_remove_safe,
3646 .filter_get_safe = efx_ef10_filter_get_safe,
3647 .filter_clear_rx = efx_ef10_filter_clear_rx,
3648 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
3649 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
3650 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
3651 #ifdef CONFIG_RFS_ACCEL
3652 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
3653 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
3655 #ifdef CONFIG_SFC_MTD
3656 .mtd_probe = efx_ef10_mtd_probe,
3657 .mtd_rename = efx_mcdi_mtd_rename,
3658 .mtd_read = efx_mcdi_mtd_read,
3659 .mtd_erase = efx_mcdi_mtd_erase,
3660 .mtd_write = efx_mcdi_mtd_write,
3661 .mtd_sync = efx_mcdi_mtd_sync,
3663 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
3664 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
3665 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
3667 .revision = EFX_REV_HUNT_A0,
3668 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
3669 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
3670 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
3671 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
3672 .can_rx_scatter = true,
3673 .always_rx_scatter = true,
3674 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3675 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
3676 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3677 NETIF_F_RXHASH | NETIF_F_NTUPLE),
3679 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
3680 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
3681 1 << HWTSTAMP_FILTER_ALL,