1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/crc32.h>
18 #include "net_driver.h"
22 #include "farch_regs.h"
24 #include "workarounds.h"
26 /* Falcon-architecture (SFC4000 and SFC9000-family) support */
28 /**************************************************************************
32 **************************************************************************
35 /* This is set to 16 for a good reason. In summary, if larger than
36 * 16, the descriptor cache holds more than a default socket
37 * buffer's worth of packets (for UDP we can only have at most one
38 * socket buffer's worth outstanding). This combined with the fact
39 * that we only get 1 TX event per descriptor cache means the NIC
42 #define TX_DC_ENTRIES 16
43 #define TX_DC_ENTRIES_ORDER 1
45 #define RX_DC_ENTRIES 64
46 #define RX_DC_ENTRIES_ORDER 3
48 /* If EFX_MAX_INT_ERRORS internal errors occur within
49 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
52 #define EFX_INT_ERROR_EXPIRE 3600
53 #define EFX_MAX_INT_ERRORS 5
55 /* Depth of RX flush request fifo */
56 #define EFX_RX_FLUSH_COUNT 4
58 /* Driver generated events */
59 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
60 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
61 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
62 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
64 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
65 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
67 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
69 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
71 efx_rx_queue_index(_rx_queue))
72 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
74 efx_rx_queue_index(_rx_queue))
75 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
76 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
79 static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
81 /**************************************************************************
85 **************************************************************************/
87 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
90 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
94 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
95 const efx_oword_t *mask)
97 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
98 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
101 int efx_farch_test_registers(struct efx_nic *efx,
102 const struct efx_farch_register_test *regs,
105 unsigned address = 0, i, j;
106 efx_oword_t mask, imask, original, reg, buf;
108 for (i = 0; i < n_regs; ++i) {
109 address = regs[i].address;
110 mask = imask = regs[i].mask;
111 EFX_INVERT_OWORD(imask);
113 efx_reado(efx, &original, address);
115 /* bit sweep on and off */
116 for (j = 0; j < 128; j++) {
117 if (!EFX_EXTRACT_OWORD32(mask, j, j))
120 /* Test this testable bit can be set in isolation */
121 EFX_AND_OWORD(reg, original, mask);
122 EFX_SET_OWORD32(reg, j, j, 1);
124 efx_writeo(efx, ®, address);
125 efx_reado(efx, &buf, address);
127 if (efx_masked_compare_oword(®, &buf, &mask))
130 /* Test this testable bit can be cleared in isolation */
131 EFX_OR_OWORD(reg, original, mask);
132 EFX_SET_OWORD32(reg, j, j, 0);
134 efx_writeo(efx, ®, address);
135 efx_reado(efx, &buf, address);
137 if (efx_masked_compare_oword(®, &buf, &mask))
141 efx_writeo(efx, &original, address);
147 netif_err(efx, hw, efx->net_dev,
148 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
149 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
150 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
154 /**************************************************************************
156 * Special buffer handling
157 * Special buffers are used for event queues and the TX and RX
160 *************************************************************************/
163 * Initialise a special buffer
165 * This will define a buffer (previously allocated via
166 * efx_alloc_special_buffer()) in the buffer table, allowing
167 * it to be used for event queues, descriptor rings etc.
170 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
172 efx_qword_t buf_desc;
177 EFX_BUG_ON_PARANOID(!buffer->buf.addr);
179 /* Write buffer descriptors to NIC */
180 for (i = 0; i < buffer->entries; i++) {
181 index = buffer->index + i;
182 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
183 netif_dbg(efx, probe, efx->net_dev,
184 "mapping special buffer %d at %llx\n",
185 index, (unsigned long long)dma_addr);
186 EFX_POPULATE_QWORD_3(buf_desc,
187 FRF_AZ_BUF_ADR_REGION, 0,
188 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
189 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
190 efx_write_buf_tbl(efx, &buf_desc, index);
194 /* Unmaps a buffer and clears the buffer table entries */
196 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
198 efx_oword_t buf_tbl_upd;
199 unsigned int start = buffer->index;
200 unsigned int end = (buffer->index + buffer->entries - 1);
202 if (!buffer->entries)
205 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
206 buffer->index, buffer->index + buffer->entries - 1);
208 EFX_POPULATE_OWORD_4(buf_tbl_upd,
209 FRF_AZ_BUF_UPD_CMD, 0,
210 FRF_AZ_BUF_CLR_CMD, 1,
211 FRF_AZ_BUF_CLR_END_ID, end,
212 FRF_AZ_BUF_CLR_START_ID, start);
213 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
217 * Allocate a new special buffer
219 * This allocates memory for a new buffer, clears it and allocates a
220 * new buffer ID range. It does not write into the buffer table.
222 * This call will allocate 4KB buffers, since 8KB buffers can't be
223 * used for event queues and descriptor rings.
225 static int efx_alloc_special_buffer(struct efx_nic *efx,
226 struct efx_special_buffer *buffer,
229 len = ALIGN(len, EFX_BUF_SIZE);
231 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
233 buffer->entries = len / EFX_BUF_SIZE;
234 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
236 /* Select new buffer ID */
237 buffer->index = efx->next_buffer_table;
238 efx->next_buffer_table += buffer->entries;
239 #ifdef CONFIG_SFC_SRIOV
240 BUG_ON(efx_sriov_enabled(efx) &&
241 efx->vf_buftbl_base < efx->next_buffer_table);
244 netif_dbg(efx, probe, efx->net_dev,
245 "allocating special buffers %d-%d at %llx+%x "
246 "(virt %p phys %llx)\n", buffer->index,
247 buffer->index + buffer->entries - 1,
248 (u64)buffer->buf.dma_addr, len,
249 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
255 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
257 if (!buffer->buf.addr)
260 netif_dbg(efx, hw, efx->net_dev,
261 "deallocating special buffers %d-%d at %llx+%x "
262 "(virt %p phys %llx)\n", buffer->index,
263 buffer->index + buffer->entries - 1,
264 (u64)buffer->buf.dma_addr, buffer->buf.len,
265 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
267 efx_nic_free_buffer(efx, &buffer->buf);
271 /**************************************************************************
275 **************************************************************************/
277 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
278 static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
283 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
284 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
285 efx_writed_page(tx_queue->efx, ®,
286 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
289 /* Write pointer and first descriptor for TX descriptor ring */
290 static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
291 const efx_qword_t *txd)
296 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
297 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
299 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
300 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
301 FRF_AZ_TX_DESC_WPTR, write_ptr);
303 efx_writeo_page(tx_queue->efx, ®,
304 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
308 /* For each entry inserted into the software descriptor ring, create a
309 * descriptor in the hardware TX descriptor ring (in host memory), and
312 void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
315 struct efx_tx_buffer *buffer;
318 unsigned old_write_count = tx_queue->write_count;
320 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
323 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
324 buffer = &tx_queue->buffer[write_ptr];
325 txd = efx_tx_desc(tx_queue, write_ptr);
326 ++tx_queue->write_count;
328 EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
330 /* Create TX descriptor ring entry */
331 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
332 EFX_POPULATE_QWORD_4(*txd,
334 buffer->flags & EFX_TX_BUF_CONT,
335 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
336 FSF_AZ_TX_KER_BUF_REGION, 0,
337 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
338 } while (tx_queue->write_count != tx_queue->insert_count);
340 wmb(); /* Ensure descriptors are written before they are fetched */
342 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
343 txd = efx_tx_desc(tx_queue,
344 old_write_count & tx_queue->ptr_mask);
345 efx_farch_push_tx_desc(tx_queue, txd);
348 efx_farch_notify_tx_desc(tx_queue);
352 /* Allocate hardware resources for a TX queue */
353 int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
355 struct efx_nic *efx = tx_queue->efx;
358 entries = tx_queue->ptr_mask + 1;
359 return efx_alloc_special_buffer(efx, &tx_queue->txd,
360 entries * sizeof(efx_qword_t));
363 void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
365 struct efx_nic *efx = tx_queue->efx;
368 /* Pin TX descriptor ring */
369 efx_init_special_buffer(efx, &tx_queue->txd);
371 /* Push TX descriptor ring to card */
372 EFX_POPULATE_OWORD_10(reg,
373 FRF_AZ_TX_DESCQ_EN, 1,
374 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
375 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
376 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
377 FRF_AZ_TX_DESCQ_EVQ_ID,
378 tx_queue->channel->channel,
379 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
380 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
381 FRF_AZ_TX_DESCQ_SIZE,
382 __ffs(tx_queue->txd.entries),
383 FRF_AZ_TX_DESCQ_TYPE, 0,
384 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
386 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
387 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
388 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
389 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
393 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base,
396 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
397 /* Only 128 bits in this register */
398 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
400 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG);
401 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
402 __clear_bit_le(tx_queue->queue, ®);
404 __set_bit_le(tx_queue->queue, ®);
405 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG);
408 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
409 EFX_POPULATE_OWORD_1(reg,
411 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
413 FFE_BZ_TX_PACE_RESERVED);
414 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL,
419 static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
421 struct efx_nic *efx = tx_queue->efx;
422 efx_oword_t tx_flush_descq;
424 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
425 atomic_set(&tx_queue->flush_outstanding, 1);
427 EFX_POPULATE_OWORD_2(tx_flush_descq,
428 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
429 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
430 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
433 void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
435 struct efx_nic *efx = tx_queue->efx;
436 efx_oword_t tx_desc_ptr;
438 /* Remove TX descriptor ring from card */
439 EFX_ZERO_OWORD(tx_desc_ptr);
440 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
443 /* Unpin TX descriptor ring */
444 efx_fini_special_buffer(efx, &tx_queue->txd);
447 /* Free buffers backing TX queue */
448 void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
450 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
453 /**************************************************************************
457 **************************************************************************/
459 /* This creates an entry in the RX descriptor queue */
461 efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
463 struct efx_rx_buffer *rx_buf;
466 rxd = efx_rx_desc(rx_queue, index);
467 rx_buf = efx_rx_buffer(rx_queue, index);
468 EFX_POPULATE_QWORD_3(*rxd,
469 FSF_AZ_RX_KER_BUF_SIZE,
471 rx_queue->efx->type->rx_buffer_padding,
472 FSF_AZ_RX_KER_BUF_REGION, 0,
473 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
476 /* This writes to the RX_DESC_WPTR register for the specified receive
479 void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
481 struct efx_nic *efx = rx_queue->efx;
485 while (rx_queue->notified_count != rx_queue->added_count) {
486 efx_farch_build_rx_desc(
488 rx_queue->notified_count & rx_queue->ptr_mask);
489 ++rx_queue->notified_count;
493 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
494 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
495 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0,
496 efx_rx_queue_index(rx_queue));
499 int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
501 struct efx_nic *efx = rx_queue->efx;
504 entries = rx_queue->ptr_mask + 1;
505 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
506 entries * sizeof(efx_qword_t));
509 void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
511 efx_oword_t rx_desc_ptr;
512 struct efx_nic *efx = rx_queue->efx;
513 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
514 bool iscsi_digest_en = is_b0;
517 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
518 * DMA to continue after a PCIe page boundary (and scattering
519 * is not possible). In Falcon B0 and Siena, it enables
522 jumbo_en = !is_b0 || efx->rx_scatter;
524 netif_dbg(efx, hw, efx->net_dev,
525 "RX queue %d ring in special buffers %d-%d\n",
526 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
527 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
529 rx_queue->scatter_n = 0;
531 /* Pin RX descriptor ring */
532 efx_init_special_buffer(efx, &rx_queue->rxd);
534 /* Push RX descriptor ring to card */
535 EFX_POPULATE_OWORD_10(rx_desc_ptr,
536 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
537 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
538 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
539 FRF_AZ_RX_DESCQ_EVQ_ID,
540 efx_rx_queue_channel(rx_queue)->channel,
541 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
542 FRF_AZ_RX_DESCQ_LABEL,
543 efx_rx_queue_index(rx_queue),
544 FRF_AZ_RX_DESCQ_SIZE,
545 __ffs(rx_queue->rxd.entries),
546 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
547 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
548 FRF_AZ_RX_DESCQ_EN, 1);
549 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
550 efx_rx_queue_index(rx_queue));
553 static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
555 struct efx_nic *efx = rx_queue->efx;
556 efx_oword_t rx_flush_descq;
558 EFX_POPULATE_OWORD_2(rx_flush_descq,
559 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
560 FRF_AZ_RX_FLUSH_DESCQ,
561 efx_rx_queue_index(rx_queue));
562 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
565 void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
567 efx_oword_t rx_desc_ptr;
568 struct efx_nic *efx = rx_queue->efx;
570 /* Remove RX descriptor ring from card */
571 EFX_ZERO_OWORD(rx_desc_ptr);
572 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
573 efx_rx_queue_index(rx_queue));
575 /* Unpin RX descriptor ring */
576 efx_fini_special_buffer(efx, &rx_queue->rxd);
579 /* Free buffers backing RX queue */
580 void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
582 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
585 /**************************************************************************
589 **************************************************************************/
591 /* efx_farch_flush_queues() must be woken up when all flushes are completed,
592 * or more RX flushes can be kicked off.
594 static bool efx_farch_flush_wake(struct efx_nic *efx)
596 /* Ensure that all updates are visible to efx_farch_flush_queues() */
599 return (atomic_read(&efx->active_queues) == 0 ||
600 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
601 && atomic_read(&efx->rxq_flush_pending) > 0));
604 static bool efx_check_tx_flush_complete(struct efx_nic *efx)
607 efx_oword_t txd_ptr_tbl;
608 struct efx_channel *channel;
609 struct efx_tx_queue *tx_queue;
611 efx_for_each_channel(channel, efx) {
612 efx_for_each_channel_tx_queue(tx_queue, channel) {
613 efx_reado_table(efx, &txd_ptr_tbl,
614 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
615 if (EFX_OWORD_FIELD(txd_ptr_tbl,
616 FRF_AZ_TX_DESCQ_FLUSH) ||
617 EFX_OWORD_FIELD(txd_ptr_tbl,
618 FRF_AZ_TX_DESCQ_EN)) {
619 netif_dbg(efx, hw, efx->net_dev,
620 "flush did not complete on TXQ %d\n",
623 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
625 /* The flush is complete, but we didn't
626 * receive a flush completion event
628 netif_dbg(efx, hw, efx->net_dev,
629 "flush complete on TXQ %d, so drain "
630 "the queue\n", tx_queue->queue);
631 /* Don't need to increment active_queues as it
632 * has already been incremented for the queues
633 * which did not drain
635 efx_farch_magic_event(channel,
636 EFX_CHANNEL_MAGIC_TX_DRAIN(
645 /* Flush all the transmit queues, and continue flushing receive queues until
646 * they're all flushed. Wait for the DRAIN events to be recieved so that there
647 * are no more RX and TX events left on any channel. */
648 static int efx_farch_do_flush(struct efx_nic *efx)
650 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
651 struct efx_channel *channel;
652 struct efx_rx_queue *rx_queue;
653 struct efx_tx_queue *tx_queue;
656 efx_for_each_channel(channel, efx) {
657 efx_for_each_channel_tx_queue(tx_queue, channel) {
658 efx_farch_flush_tx_queue(tx_queue);
660 efx_for_each_channel_rx_queue(rx_queue, channel) {
661 rx_queue->flush_pending = true;
662 atomic_inc(&efx->rxq_flush_pending);
666 while (timeout && atomic_read(&efx->active_queues) > 0) {
667 /* If SRIOV is enabled, then offload receive queue flushing to
668 * the firmware (though we will still have to poll for
669 * completion). If that fails, fall back to the old scheme.
671 if (efx_sriov_enabled(efx)) {
672 rc = efx_mcdi_flush_rxqs(efx);
677 /* The hardware supports four concurrent rx flushes, each of
678 * which may need to be retried if there is an outstanding
681 efx_for_each_channel(channel, efx) {
682 efx_for_each_channel_rx_queue(rx_queue, channel) {
683 if (atomic_read(&efx->rxq_flush_outstanding) >=
687 if (rx_queue->flush_pending) {
688 rx_queue->flush_pending = false;
689 atomic_dec(&efx->rxq_flush_pending);
690 atomic_inc(&efx->rxq_flush_outstanding);
691 efx_farch_flush_rx_queue(rx_queue);
697 timeout = wait_event_timeout(efx->flush_wq,
698 efx_farch_flush_wake(efx),
702 if (atomic_read(&efx->active_queues) &&
703 !efx_check_tx_flush_complete(efx)) {
704 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
705 "(rx %d+%d)\n", atomic_read(&efx->active_queues),
706 atomic_read(&efx->rxq_flush_outstanding),
707 atomic_read(&efx->rxq_flush_pending));
710 atomic_set(&efx->active_queues, 0);
711 atomic_set(&efx->rxq_flush_pending, 0);
712 atomic_set(&efx->rxq_flush_outstanding, 0);
718 int efx_farch_fini_dmaq(struct efx_nic *efx)
720 struct efx_channel *channel;
721 struct efx_tx_queue *tx_queue;
722 struct efx_rx_queue *rx_queue;
725 /* Do not attempt to write to the NIC during EEH recovery */
726 if (efx->state != STATE_RECOVERY) {
727 /* Only perform flush if DMA is enabled */
728 if (efx->pci_dev->is_busmaster) {
729 efx->type->prepare_flush(efx);
730 rc = efx_farch_do_flush(efx);
731 efx->type->finish_flush(efx);
734 efx_for_each_channel(channel, efx) {
735 efx_for_each_channel_rx_queue(rx_queue, channel)
736 efx_farch_rx_fini(rx_queue);
737 efx_for_each_channel_tx_queue(tx_queue, channel)
738 efx_farch_tx_fini(tx_queue);
745 /**************************************************************************
747 * Event queue processing
748 * Event queues are processed by per-channel tasklets.
750 **************************************************************************/
752 /* Update a channel's event queue's read pointer (RPTR) register
754 * This writes the EVQ_RPTR_REG register for the specified channel's
757 void efx_farch_ev_read_ack(struct efx_channel *channel)
760 struct efx_nic *efx = channel->efx;
762 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
763 channel->eventq_read_ptr & channel->eventq_mask);
765 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
766 * of 4 bytes, but it is really 16 bytes just like later revisions.
768 efx_writed(efx, ®,
769 efx->type->evq_rptr_tbl_base +
770 FR_BZ_EVQ_RPTR_STEP * channel->channel);
773 /* Use HW to insert a SW defined event */
774 void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
777 efx_oword_t drv_ev_reg;
779 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
780 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
781 drv_ev_reg.u32[0] = event->u32[0];
782 drv_ev_reg.u32[1] = event->u32[1];
783 drv_ev_reg.u32[2] = 0;
784 drv_ev_reg.u32[3] = 0;
785 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
786 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
789 static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
793 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
794 FSE_AZ_EV_CODE_DRV_GEN_EV,
795 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
796 efx_farch_generate_event(channel->efx, channel->channel, &event);
799 /* Handle a transmit completion event
801 * The NIC batches TX completion events; the message we receive is of
802 * the form "complete all TX events up to this index".
805 efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
807 unsigned int tx_ev_desc_ptr;
808 unsigned int tx_ev_q_label;
809 struct efx_tx_queue *tx_queue;
810 struct efx_nic *efx = channel->efx;
813 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
816 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
817 /* Transmit completion */
818 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
819 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
820 tx_queue = efx_channel_get_tx_queue(
821 channel, tx_ev_q_label % EFX_TXQ_TYPES);
822 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
824 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
825 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
826 /* Rewrite the FIFO write pointer */
827 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
828 tx_queue = efx_channel_get_tx_queue(
829 channel, tx_ev_q_label % EFX_TXQ_TYPES);
831 netif_tx_lock(efx->net_dev);
832 efx_farch_notify_tx_desc(tx_queue);
833 netif_tx_unlock(efx->net_dev);
834 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
835 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
837 netif_err(efx, tx_err, efx->net_dev,
838 "channel %d unexpected TX event "
839 EFX_QWORD_FMT"\n", channel->channel,
840 EFX_QWORD_VAL(*event));
846 /* Detect errors included in the rx_evt_pkt_ok bit. */
847 static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
848 const efx_qword_t *event)
850 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
851 struct efx_nic *efx = rx_queue->efx;
852 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
853 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
854 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
855 bool rx_ev_other_err, rx_ev_pause_frm;
856 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
857 unsigned rx_ev_pkt_type;
859 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
860 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
861 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
862 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
863 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
864 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
865 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
866 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
867 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
868 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
869 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
870 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
871 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
872 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
873 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
875 /* Every error apart from tobe_disc and pause_frm */
876 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
877 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
878 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
880 /* Count errors that are not in MAC stats. Ignore expected
881 * checksum errors during self-test. */
883 ++channel->n_rx_frm_trunc;
884 else if (rx_ev_tobe_disc)
885 ++channel->n_rx_tobe_disc;
886 else if (!efx->loopback_selftest) {
887 if (rx_ev_ip_hdr_chksum_err)
888 ++channel->n_rx_ip_hdr_chksum_err;
889 else if (rx_ev_tcp_udp_chksum_err)
890 ++channel->n_rx_tcp_udp_chksum_err;
893 /* TOBE_DISC is expected on unicast mismatches; don't print out an
894 * error message. FRM_TRUNC indicates RXDP dropped the packet due
895 * to a FIFO overflow.
898 if (rx_ev_other_err && net_ratelimit()) {
899 netif_dbg(efx, rx_err, efx->net_dev,
900 " RX queue %d unexpected RX event "
901 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
902 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
903 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
904 rx_ev_ip_hdr_chksum_err ?
905 " [IP_HDR_CHKSUM_ERR]" : "",
906 rx_ev_tcp_udp_chksum_err ?
907 " [TCP_UDP_CHKSUM_ERR]" : "",
908 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
909 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
910 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
911 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
912 rx_ev_pause_frm ? " [PAUSE]" : "");
916 /* The frame must be discarded if any of these are true. */
917 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
918 rx_ev_tobe_disc | rx_ev_pause_frm) ?
919 EFX_RX_PKT_DISCARD : 0;
922 /* Handle receive events that are not in-order. Return true if this
923 * can be handled as a partial packet discard, false if it's more
927 efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
929 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
930 struct efx_nic *efx = rx_queue->efx;
931 unsigned expected, dropped;
933 if (rx_queue->scatter_n &&
934 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
935 rx_queue->ptr_mask)) {
936 ++channel->n_rx_nodesc_trunc;
940 expected = rx_queue->removed_count & rx_queue->ptr_mask;
941 dropped = (index - expected) & rx_queue->ptr_mask;
942 netif_info(efx, rx_err, efx->net_dev,
943 "dropped %d events (index=%d expected=%d)\n",
944 dropped, index, expected);
946 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
947 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
951 /* Handle a packet received event
953 * The NIC gives a "discard" flag if it's a unicast packet with the
954 * wrong destination address
955 * Also "is multicast" and "matches multicast filter" flags can be used to
956 * discard non-matching multicast packets.
959 efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
961 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
962 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
963 unsigned expected_ptr;
964 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
966 struct efx_rx_queue *rx_queue;
967 struct efx_nic *efx = channel->efx;
969 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
972 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
973 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
974 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
977 rx_queue = efx_channel_get_rx_queue(channel);
979 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
980 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
983 /* Check for partial drops and other errors */
984 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
985 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
986 if (rx_ev_desc_ptr != expected_ptr &&
987 !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
990 /* Discard all pending fragments */
991 if (rx_queue->scatter_n) {
994 rx_queue->removed_count & rx_queue->ptr_mask,
995 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
996 rx_queue->removed_count += rx_queue->scatter_n;
997 rx_queue->scatter_n = 0;
1000 /* Return if there is no new fragment */
1001 if (rx_ev_desc_ptr != expected_ptr)
1004 /* Discard new fragment if not SOP */
1008 rx_queue->removed_count & rx_queue->ptr_mask,
1009 1, 0, EFX_RX_PKT_DISCARD);
1010 ++rx_queue->removed_count;
1015 ++rx_queue->scatter_n;
1019 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1020 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1021 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1023 if (likely(rx_ev_pkt_ok)) {
1024 /* If packet is marked as OK then we can rely on the
1025 * hardware checksum and classification.
1028 switch (rx_ev_hdr_type) {
1029 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1030 flags |= EFX_RX_PKT_TCP;
1032 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1033 flags |= EFX_RX_PKT_CSUMMED;
1035 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1036 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1040 flags = efx_farch_handle_rx_not_ok(rx_queue, event);
1043 /* Detect multicast packets that didn't match the filter */
1044 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1045 if (rx_ev_mcast_pkt) {
1046 unsigned int rx_ev_mcast_hash_match =
1047 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1049 if (unlikely(!rx_ev_mcast_hash_match)) {
1050 ++channel->n_rx_mcast_mismatch;
1051 flags |= EFX_RX_PKT_DISCARD;
1055 channel->irq_mod_score += 2;
1057 /* Handle received packet */
1058 efx_rx_packet(rx_queue,
1059 rx_queue->removed_count & rx_queue->ptr_mask,
1060 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1061 rx_queue->removed_count += rx_queue->scatter_n;
1062 rx_queue->scatter_n = 0;
1065 /* If this flush done event corresponds to a &struct efx_tx_queue, then
1066 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1067 * of all transmit completions.
1070 efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1072 struct efx_tx_queue *tx_queue;
1075 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1076 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1077 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1078 qid % EFX_TXQ_TYPES);
1079 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1080 efx_farch_magic_event(tx_queue->channel,
1081 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1086 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1087 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1088 * the RX queue back to the mask of RX queues in need of flushing.
1091 efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1093 struct efx_channel *channel;
1094 struct efx_rx_queue *rx_queue;
1098 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1099 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1100 if (qid >= efx->n_channels)
1102 channel = efx_get_channel(efx, qid);
1103 if (!efx_channel_has_rx_queue(channel))
1105 rx_queue = efx_channel_get_rx_queue(channel);
1108 netif_info(efx, hw, efx->net_dev,
1109 "RXQ %d flush retry\n", qid);
1110 rx_queue->flush_pending = true;
1111 atomic_inc(&efx->rxq_flush_pending);
1113 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1114 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1116 atomic_dec(&efx->rxq_flush_outstanding);
1117 if (efx_farch_flush_wake(efx))
1118 wake_up(&efx->flush_wq);
1122 efx_farch_handle_drain_event(struct efx_channel *channel)
1124 struct efx_nic *efx = channel->efx;
1126 WARN_ON(atomic_read(&efx->active_queues) == 0);
1127 atomic_dec(&efx->active_queues);
1128 if (efx_farch_flush_wake(efx))
1129 wake_up(&efx->flush_wq);
1132 static void efx_farch_handle_generated_event(struct efx_channel *channel,
1135 struct efx_nic *efx = channel->efx;
1136 struct efx_rx_queue *rx_queue =
1137 efx_channel_has_rx_queue(channel) ?
1138 efx_channel_get_rx_queue(channel) : NULL;
1139 unsigned magic, code;
1141 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1142 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1144 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1145 channel->event_test_cpu = raw_smp_processor_id();
1146 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1147 /* The queue must be empty, so we won't receive any rx
1148 * events, so efx_process_channel() won't refill the
1149 * queue. Refill it here */
1150 efx_fast_push_rx_descriptors(rx_queue);
1151 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1152 efx_farch_handle_drain_event(channel);
1153 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1154 efx_farch_handle_drain_event(channel);
1156 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1157 "generated event "EFX_QWORD_FMT"\n",
1158 channel->channel, EFX_QWORD_VAL(*event));
1163 efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1165 struct efx_nic *efx = channel->efx;
1166 unsigned int ev_sub_code;
1167 unsigned int ev_sub_data;
1169 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1170 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1172 switch (ev_sub_code) {
1173 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1174 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1175 channel->channel, ev_sub_data);
1176 efx_farch_handle_tx_flush_done(efx, event);
1177 efx_sriov_tx_flush_done(efx, event);
1179 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1180 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1181 channel->channel, ev_sub_data);
1182 efx_farch_handle_rx_flush_done(efx, event);
1183 efx_sriov_rx_flush_done(efx, event);
1185 case FSE_AZ_EVQ_INIT_DONE_EV:
1186 netif_dbg(efx, hw, efx->net_dev,
1187 "channel %d EVQ %d initialised\n",
1188 channel->channel, ev_sub_data);
1190 case FSE_AZ_SRM_UPD_DONE_EV:
1191 netif_vdbg(efx, hw, efx->net_dev,
1192 "channel %d SRAM update done\n", channel->channel);
1194 case FSE_AZ_WAKE_UP_EV:
1195 netif_vdbg(efx, hw, efx->net_dev,
1196 "channel %d RXQ %d wakeup event\n",
1197 channel->channel, ev_sub_data);
1199 case FSE_AZ_TIMER_EV:
1200 netif_vdbg(efx, hw, efx->net_dev,
1201 "channel %d RX queue %d timer expired\n",
1202 channel->channel, ev_sub_data);
1204 case FSE_AA_RX_RECOVER_EV:
1205 netif_err(efx, rx_err, efx->net_dev,
1206 "channel %d seen DRIVER RX_RESET event. "
1207 "Resetting.\n", channel->channel);
1208 atomic_inc(&efx->rx_reset);
1209 efx_schedule_reset(efx,
1210 EFX_WORKAROUND_6555(efx) ?
1211 RESET_TYPE_RX_RECOVERY :
1212 RESET_TYPE_DISABLE);
1214 case FSE_BZ_RX_DSC_ERROR_EV:
1215 if (ev_sub_data < EFX_VI_BASE) {
1216 netif_err(efx, rx_err, efx->net_dev,
1217 "RX DMA Q %d reports descriptor fetch error."
1218 " RX Q %d is disabled.\n", ev_sub_data,
1220 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1222 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1224 case FSE_BZ_TX_DSC_ERROR_EV:
1225 if (ev_sub_data < EFX_VI_BASE) {
1226 netif_err(efx, tx_err, efx->net_dev,
1227 "TX DMA Q %d reports descriptor fetch error."
1228 " TX Q %d is disabled.\n", ev_sub_data,
1230 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1232 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1235 netif_vdbg(efx, hw, efx->net_dev,
1236 "channel %d unknown driver event code %d "
1237 "data %04x\n", channel->channel, ev_sub_code,
1243 int efx_farch_ev_process(struct efx_channel *channel, int budget)
1245 struct efx_nic *efx = channel->efx;
1246 unsigned int read_ptr;
1247 efx_qword_t event, *p_event;
1252 read_ptr = channel->eventq_read_ptr;
1255 p_event = efx_event(channel, read_ptr);
1258 if (!efx_event_present(&event))
1262 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1263 "channel %d event is "EFX_QWORD_FMT"\n",
1264 channel->channel, EFX_QWORD_VAL(event));
1266 /* Clear this event by marking it all ones */
1267 EFX_SET_QWORD(*p_event);
1271 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1274 case FSE_AZ_EV_CODE_RX_EV:
1275 efx_farch_handle_rx_event(channel, &event);
1276 if (++spent == budget)
1279 case FSE_AZ_EV_CODE_TX_EV:
1280 tx_packets += efx_farch_handle_tx_event(channel,
1282 if (tx_packets > efx->txq_entries) {
1287 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1288 efx_farch_handle_generated_event(channel, &event);
1290 case FSE_AZ_EV_CODE_DRIVER_EV:
1291 efx_farch_handle_driver_event(channel, &event);
1293 case FSE_CZ_EV_CODE_USER_EV:
1294 efx_sriov_event(channel, &event);
1296 case FSE_CZ_EV_CODE_MCDI_EV:
1297 efx_mcdi_process_event(channel, &event);
1299 case FSE_AZ_EV_CODE_GLOBAL_EV:
1300 if (efx->type->handle_global_event &&
1301 efx->type->handle_global_event(channel, &event))
1303 /* else fall through */
1305 netif_err(channel->efx, hw, channel->efx->net_dev,
1306 "channel %d unknown event type %d (data "
1307 EFX_QWORD_FMT ")\n", channel->channel,
1308 ev_code, EFX_QWORD_VAL(event));
1313 channel->eventq_read_ptr = read_ptr;
1317 /* Allocate buffer table entries for event queue */
1318 int efx_farch_ev_probe(struct efx_channel *channel)
1320 struct efx_nic *efx = channel->efx;
1323 entries = channel->eventq_mask + 1;
1324 return efx_alloc_special_buffer(efx, &channel->eventq,
1325 entries * sizeof(efx_qword_t));
1328 int efx_farch_ev_init(struct efx_channel *channel)
1331 struct efx_nic *efx = channel->efx;
1333 netif_dbg(efx, hw, efx->net_dev,
1334 "channel %d event queue in special buffers %d-%d\n",
1335 channel->channel, channel->eventq.index,
1336 channel->eventq.index + channel->eventq.entries - 1);
1338 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1339 EFX_POPULATE_OWORD_3(reg,
1340 FRF_CZ_TIMER_Q_EN, 1,
1341 FRF_CZ_HOST_NOTIFY_MODE, 0,
1342 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1343 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1346 /* Pin event queue buffer */
1347 efx_init_special_buffer(efx, &channel->eventq);
1349 /* Fill event queue with all ones (i.e. empty events) */
1350 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1352 /* Push event queue to card */
1353 EFX_POPULATE_OWORD_3(reg,
1355 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1356 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1357 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1363 void efx_farch_ev_fini(struct efx_channel *channel)
1366 struct efx_nic *efx = channel->efx;
1368 /* Remove event queue from card */
1369 EFX_ZERO_OWORD(reg);
1370 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1372 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1373 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1375 /* Unpin event queue */
1376 efx_fini_special_buffer(efx, &channel->eventq);
1379 /* Free buffers backing event queue */
1380 void efx_farch_ev_remove(struct efx_channel *channel)
1382 efx_free_special_buffer(channel->efx, &channel->eventq);
1386 void efx_farch_ev_test_generate(struct efx_channel *channel)
1388 efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1391 void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
1393 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1394 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1397 /**************************************************************************
1399 * Hardware interrupts
1400 * The hardware interrupt handler does very little work; all the event
1401 * queue processing is carried out by per-channel tasklets.
1403 **************************************************************************/
1405 /* Enable/disable/generate interrupts */
1406 static inline void efx_farch_interrupts(struct efx_nic *efx,
1407 bool enabled, bool force)
1409 efx_oword_t int_en_reg_ker;
1411 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1412 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1413 FRF_AZ_KER_INT_KER, force,
1414 FRF_AZ_DRV_INT_EN_KER, enabled);
1415 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1418 void efx_farch_irq_enable_master(struct efx_nic *efx)
1420 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1421 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1423 efx_farch_interrupts(efx, true, false);
1426 void efx_farch_irq_disable_master(struct efx_nic *efx)
1428 /* Disable interrupts */
1429 efx_farch_interrupts(efx, false, false);
1432 /* Generate a test interrupt
1433 * Interrupt must already have been enabled, otherwise nasty things
1436 void efx_farch_irq_test_generate(struct efx_nic *efx)
1438 efx_farch_interrupts(efx, true, true);
1441 /* Process a fatal interrupt
1442 * Disable bus mastering ASAP and schedule a reset
1444 irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
1446 struct falcon_nic_data *nic_data = efx->nic_data;
1447 efx_oword_t *int_ker = efx->irq_status.addr;
1448 efx_oword_t fatal_intr;
1449 int error, mem_perr;
1451 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1452 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1454 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1455 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1456 EFX_OWORD_VAL(fatal_intr),
1457 error ? "disabling bus mastering" : "no recognised error");
1459 /* If this is a memory parity error dump which blocks are offending */
1460 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1461 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1464 efx_reado(efx, ®, FR_AZ_MEM_STAT);
1465 netif_err(efx, hw, efx->net_dev,
1466 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1467 EFX_OWORD_VAL(reg));
1470 /* Disable both devices */
1471 pci_clear_master(efx->pci_dev);
1472 if (efx_nic_is_dual_func(efx))
1473 pci_clear_master(nic_data->pci_dev2);
1474 efx_farch_irq_disable_master(efx);
1476 /* Count errors and reset or disable the NIC accordingly */
1477 if (efx->int_error_count == 0 ||
1478 time_after(jiffies, efx->int_error_expire)) {
1479 efx->int_error_count = 0;
1480 efx->int_error_expire =
1481 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1483 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1484 netif_err(efx, hw, efx->net_dev,
1485 "SYSTEM ERROR - reset scheduled\n");
1486 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1488 netif_err(efx, hw, efx->net_dev,
1489 "SYSTEM ERROR - max number of errors seen."
1490 "NIC will be disabled\n");
1491 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1497 /* Handle a legacy interrupt
1498 * Acknowledges the interrupt and schedule event queue processing.
1500 irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
1502 struct efx_nic *efx = dev_id;
1503 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1504 efx_oword_t *int_ker = efx->irq_status.addr;
1505 irqreturn_t result = IRQ_NONE;
1506 struct efx_channel *channel;
1511 /* Read the ISR which also ACKs the interrupts */
1512 efx_readd(efx, ®, FR_BZ_INT_ISR0);
1513 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1515 /* Legacy interrupts are disabled too late by the EEH kernel
1516 * code. Disable them earlier.
1517 * If an EEH error occurred, the read will have returned all ones.
1519 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
1520 !efx->eeh_disabled_legacy_irq) {
1521 disable_irq_nosync(efx->legacy_irq);
1522 efx->eeh_disabled_legacy_irq = true;
1525 /* Handle non-event-queue sources */
1526 if (queues & (1U << efx->irq_level) && soft_enabled) {
1527 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1528 if (unlikely(syserr))
1529 return efx_farch_fatal_interrupt(efx);
1530 efx->last_irq_cpu = raw_smp_processor_id();
1534 efx->irq_zero_count = 0;
1536 /* Schedule processing of any interrupting queues */
1537 if (likely(soft_enabled)) {
1538 efx_for_each_channel(channel, efx) {
1540 efx_schedule_channel_irq(channel);
1544 result = IRQ_HANDLED;
1549 /* Legacy ISR read can return zero once (SF bug 15783) */
1551 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1552 * because this might be a shared interrupt. */
1553 if (efx->irq_zero_count++ == 0)
1554 result = IRQ_HANDLED;
1556 /* Ensure we schedule or rearm all event queues */
1557 if (likely(soft_enabled)) {
1558 efx_for_each_channel(channel, efx) {
1559 event = efx_event(channel,
1560 channel->eventq_read_ptr);
1561 if (efx_event_present(event))
1562 efx_schedule_channel_irq(channel);
1564 efx_farch_ev_read_ack(channel);
1569 if (result == IRQ_HANDLED)
1570 netif_vdbg(efx, intr, efx->net_dev,
1571 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1572 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1577 /* Handle an MSI interrupt
1579 * Handle an MSI hardware interrupt. This routine schedules event
1580 * queue processing. No interrupt acknowledgement cycle is necessary.
1581 * Also, we never need to check that the interrupt is for us, since
1582 * MSI interrupts cannot be shared.
1584 irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
1586 struct efx_msi_context *context = dev_id;
1587 struct efx_nic *efx = context->efx;
1588 efx_oword_t *int_ker = efx->irq_status.addr;
1591 netif_vdbg(efx, intr, efx->net_dev,
1592 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1593 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1595 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
1598 /* Handle non-event-queue sources */
1599 if (context->index == efx->irq_level) {
1600 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1601 if (unlikely(syserr))
1602 return efx_farch_fatal_interrupt(efx);
1603 efx->last_irq_cpu = raw_smp_processor_id();
1606 /* Schedule processing of the channel */
1607 efx_schedule_channel_irq(efx->channel[context->index]);
1613 /* Setup RSS indirection table.
1614 * This maps from the hash value of the packet to RXQ
1616 void efx_farch_rx_push_indir_table(struct efx_nic *efx)
1621 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1624 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1625 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1627 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1628 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1629 efx->rx_indir_table[i]);
1630 efx_writed(efx, &dword,
1631 FR_BZ_RX_INDIRECTION_TBL +
1632 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1636 /* Looks at available SRAM resources and works out how many queues we
1637 * can support, and where things like descriptor caches should live.
1639 * SRAM is split up as follows:
1640 * 0 buftbl entries for channels
1641 * efx->vf_buftbl_base buftbl entries for SR-IOV
1642 * efx->rx_dc_base RX descriptor caches
1643 * efx->tx_dc_base TX descriptor caches
1645 void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1647 unsigned vi_count, buftbl_min;
1649 /* Account for the buffer table entries backing the datapath channels
1650 * and the descriptor caches for those channels.
1652 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1653 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1654 efx->n_channels * EFX_MAX_EVQ_SIZE)
1655 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1656 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1658 #ifdef CONFIG_SFC_SRIOV
1659 if (efx_sriov_wanted(efx)) {
1660 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1662 efx->vf_buftbl_base = buftbl_min;
1664 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1665 vi_count = max(vi_count, EFX_VI_BASE);
1666 buftbl_free = (sram_lim_qw - buftbl_min -
1667 vi_count * vi_dc_entries);
1669 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1671 vf_limit = min(buftbl_free / entries_per_vf,
1672 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1674 if (efx->vf_count > vf_limit) {
1675 netif_err(efx, probe, efx->net_dev,
1676 "Reducing VF count from from %d to %d\n",
1677 efx->vf_count, vf_limit);
1678 efx->vf_count = vf_limit;
1680 vi_count += efx->vf_count * efx_vf_size(efx);
1684 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1685 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1688 u32 efx_farch_fpga_ver(struct efx_nic *efx)
1690 efx_oword_t altera_build;
1691 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1692 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1695 void efx_farch_init_common(struct efx_nic *efx)
1699 /* Set positions of descriptor caches in SRAM. */
1700 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1701 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1702 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1703 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1705 /* Set TX descriptor cache size. */
1706 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1707 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1708 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1710 /* Set RX descriptor cache size. Set low watermark to size-8, as
1711 * this allows most efficient prefetching.
1713 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1714 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1715 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1716 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1717 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1719 /* Program INT_KER address */
1720 EFX_POPULATE_OWORD_2(temp,
1721 FRF_AZ_NORM_INT_VEC_DIS_KER,
1722 EFX_INT_MODE_USE_MSI(efx),
1723 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1724 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1726 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1727 /* Use an interrupt level unused by event queues */
1728 efx->irq_level = 0x1f;
1730 /* Use a valid MSI-X vector */
1733 /* Enable all the genuinely fatal interrupts. (They are still
1734 * masked by the overall interrupt mask, controlled by
1735 * falcon_interrupts()).
1737 * Note: All other fatal interrupts are enabled
1739 EFX_POPULATE_OWORD_3(temp,
1740 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1741 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1742 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1743 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1744 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1745 EFX_INVERT_OWORD(temp);
1746 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1748 efx_farch_rx_push_indir_table(efx);
1750 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1751 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1753 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1754 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1755 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1756 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1757 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1758 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1759 /* Enable SW_EV to inherit in char driver - assume harmless here */
1760 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1761 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1762 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1763 /* Disable hardware watchdog which can misfire */
1764 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1765 /* Squash TX of packets of 16 bytes or less */
1766 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1767 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1768 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1770 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1771 EFX_POPULATE_OWORD_4(temp,
1772 /* Default values */
1773 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1774 FRF_BZ_TX_PACE_SB_AF, 0xb,
1775 FRF_BZ_TX_PACE_FB_BASE, 0,
1776 /* Allow large pace values in the
1778 FRF_BZ_TX_PACE_BIN_TH,
1779 FFE_BZ_TX_PACE_RESERVED);
1780 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1784 /**************************************************************************
1788 **************************************************************************
1791 /* "Fudge factors" - difference between programmed value and actual depth.
1792 * Due to pipelined implementation we need to program H/W with a value that
1793 * is larger than the hop limit we want.
1795 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1796 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1798 /* Hard maximum search limit. Hardware will time-out beyond 200-something.
1799 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1802 #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1804 /* Don't try very hard to find space for performance hints, as this is
1805 * counter-productive. */
1806 #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1808 enum efx_farch_filter_type {
1809 EFX_FARCH_FILTER_TCP_FULL = 0,
1810 EFX_FARCH_FILTER_TCP_WILD,
1811 EFX_FARCH_FILTER_UDP_FULL,
1812 EFX_FARCH_FILTER_UDP_WILD,
1813 EFX_FARCH_FILTER_MAC_FULL = 4,
1814 EFX_FARCH_FILTER_MAC_WILD,
1815 EFX_FARCH_FILTER_UC_DEF = 8,
1816 EFX_FARCH_FILTER_MC_DEF,
1817 EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
1820 enum efx_farch_filter_table_id {
1821 EFX_FARCH_FILTER_TABLE_RX_IP = 0,
1822 EFX_FARCH_FILTER_TABLE_RX_MAC,
1823 EFX_FARCH_FILTER_TABLE_RX_DEF,
1824 EFX_FARCH_FILTER_TABLE_TX_MAC,
1825 EFX_FARCH_FILTER_TABLE_COUNT,
1828 enum efx_farch_filter_index {
1829 EFX_FARCH_FILTER_INDEX_UC_DEF,
1830 EFX_FARCH_FILTER_INDEX_MC_DEF,
1831 EFX_FARCH_FILTER_SIZE_RX_DEF,
1834 struct efx_farch_filter_spec {
1842 struct efx_farch_filter_table {
1843 enum efx_farch_filter_table_id id;
1844 u32 offset; /* address of table relative to BAR */
1845 unsigned size; /* number of entries */
1846 unsigned step; /* step between entries */
1847 unsigned used; /* number currently used */
1848 unsigned long *used_bitmap;
1849 struct efx_farch_filter_spec *spec;
1850 unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
1853 struct efx_farch_filter_state {
1854 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
1858 efx_farch_filter_table_clear_entry(struct efx_nic *efx,
1859 struct efx_farch_filter_table *table,
1860 unsigned int filter_idx);
1862 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1863 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1864 static u16 efx_farch_filter_hash(u32 key)
1868 /* First 16 rounds */
1869 tmp = 0x1fff ^ key >> 16;
1870 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1871 tmp = tmp ^ tmp >> 9;
1872 /* Last 16 rounds */
1873 tmp = tmp ^ tmp << 13 ^ key;
1874 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1875 return tmp ^ tmp >> 9;
1878 /* To allow for hash collisions, filter search continues at these
1879 * increments from the first possible entry selected by the hash. */
1880 static u16 efx_farch_filter_increment(u32 key)
1885 static enum efx_farch_filter_table_id
1886 efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
1888 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1889 (EFX_FARCH_FILTER_TCP_FULL >> 2));
1890 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1891 (EFX_FARCH_FILTER_TCP_WILD >> 2));
1892 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1893 (EFX_FARCH_FILTER_UDP_FULL >> 2));
1894 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1895 (EFX_FARCH_FILTER_UDP_WILD >> 2));
1896 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1897 (EFX_FARCH_FILTER_MAC_FULL >> 2));
1898 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1899 (EFX_FARCH_FILTER_MAC_WILD >> 2));
1900 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
1901 EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
1902 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
1905 static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
1907 struct efx_farch_filter_state *state = efx->filter_state;
1908 struct efx_farch_filter_table *table;
1909 efx_oword_t filter_ctl;
1911 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1913 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1914 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1915 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
1916 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1917 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1918 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
1919 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1920 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1921 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
1922 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1923 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1924 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
1925 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1927 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
1929 EFX_SET_OWORD_FIELD(
1930 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1931 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1932 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1933 EFX_SET_OWORD_FIELD(
1934 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1935 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1936 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1939 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
1941 EFX_SET_OWORD_FIELD(
1942 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1943 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1944 EFX_SET_OWORD_FIELD(
1945 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1946 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1947 EFX_FILTER_FLAG_RX_RSS));
1948 EFX_SET_OWORD_FIELD(
1949 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1950 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1951 EFX_SET_OWORD_FIELD(
1952 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1953 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1954 EFX_FILTER_FLAG_RX_RSS));
1956 /* There is a single bit to enable RX scatter for all
1957 * unmatched packets. Only set it if scatter is
1958 * enabled in both filter specs.
1960 EFX_SET_OWORD_FIELD(
1961 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1962 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1963 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1964 EFX_FILTER_FLAG_RX_SCATTER));
1965 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1966 /* We don't expose 'default' filters because unmatched
1967 * packets always go to the queue number found in the
1968 * RSS table. But we still need to set the RX scatter
1971 EFX_SET_OWORD_FIELD(
1972 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1976 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1979 static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
1981 struct efx_farch_filter_state *state = efx->filter_state;
1982 struct efx_farch_filter_table *table;
1985 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
1987 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
1989 EFX_SET_OWORD_FIELD(
1990 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
1991 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1992 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1993 EFX_SET_OWORD_FIELD(
1994 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
1995 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1996 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1999 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
2003 efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
2004 const struct efx_filter_spec *gen_spec)
2006 bool is_full = false;
2008 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
2009 gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
2012 spec->priority = gen_spec->priority;
2013 spec->flags = gen_spec->flags;
2014 spec->dmaq_id = gen_spec->dmaq_id;
2016 switch (gen_spec->match_flags) {
2017 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2018 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
2019 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
2022 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2023 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
2024 __be32 rhost, host1, host2;
2025 __be16 rport, port1, port2;
2027 EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
2029 if (gen_spec->ether_type != htons(ETH_P_IP))
2030 return -EPROTONOSUPPORT;
2031 if (gen_spec->loc_port == 0 ||
2032 (is_full && gen_spec->rem_port == 0))
2033 return -EADDRNOTAVAIL;
2034 switch (gen_spec->ip_proto) {
2036 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
2037 EFX_FARCH_FILTER_TCP_WILD);
2040 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
2041 EFX_FARCH_FILTER_UDP_WILD);
2044 return -EPROTONOSUPPORT;
2047 /* Filter is constructed in terms of source and destination,
2048 * with the odd wrinkle that the ports are swapped in a UDP
2049 * wildcard filter. We need to convert from local and remote
2050 * (= zero for wildcard) addresses.
2052 rhost = is_full ? gen_spec->rem_host[0] : 0;
2053 rport = is_full ? gen_spec->rem_port : 0;
2055 host2 = gen_spec->loc_host[0];
2056 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2057 port1 = gen_spec->loc_port;
2061 port2 = gen_spec->loc_port;
2063 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2064 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2065 spec->data[2] = ntohl(host2);
2070 case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
2073 case EFX_FILTER_MATCH_LOC_MAC:
2074 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
2075 EFX_FARCH_FILTER_MAC_WILD);
2076 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2077 spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2078 gen_spec->loc_mac[3] << 16 |
2079 gen_spec->loc_mac[4] << 8 |
2080 gen_spec->loc_mac[5]);
2081 spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2082 gen_spec->loc_mac[1]);
2085 case EFX_FILTER_MATCH_LOC_MAC_IG:
2086 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2087 EFX_FARCH_FILTER_MC_DEF :
2088 EFX_FARCH_FILTER_UC_DEF);
2089 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2093 return -EPROTONOSUPPORT;
2100 efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
2101 const struct efx_farch_filter_spec *spec)
2103 bool is_full = false;
2105 /* *gen_spec should be completely initialised, to be consistent
2106 * with efx_filter_init_{rx,tx}() and in case we want to copy
2107 * it back to userland.
2109 memset(gen_spec, 0, sizeof(*gen_spec));
2111 gen_spec->priority = spec->priority;
2112 gen_spec->flags = spec->flags;
2113 gen_spec->dmaq_id = spec->dmaq_id;
2115 switch (spec->type) {
2116 case EFX_FARCH_FILTER_TCP_FULL:
2117 case EFX_FARCH_FILTER_UDP_FULL:
2120 case EFX_FARCH_FILTER_TCP_WILD:
2121 case EFX_FARCH_FILTER_UDP_WILD: {
2122 __be32 host1, host2;
2123 __be16 port1, port2;
2125 gen_spec->match_flags =
2126 EFX_FILTER_MATCH_ETHER_TYPE |
2127 EFX_FILTER_MATCH_IP_PROTO |
2128 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
2130 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
2131 EFX_FILTER_MATCH_REM_PORT);
2132 gen_spec->ether_type = htons(ETH_P_IP);
2133 gen_spec->ip_proto =
2134 (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
2135 spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
2136 IPPROTO_TCP : IPPROTO_UDP;
2138 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2139 port1 = htons(spec->data[0]);
2140 host2 = htonl(spec->data[2]);
2141 port2 = htons(spec->data[1] >> 16);
2142 if (spec->flags & EFX_FILTER_FLAG_TX) {
2143 gen_spec->loc_host[0] = host1;
2144 gen_spec->rem_host[0] = host2;
2146 gen_spec->loc_host[0] = host2;
2147 gen_spec->rem_host[0] = host1;
2149 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
2150 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2151 gen_spec->loc_port = port1;
2152 gen_spec->rem_port = port2;
2154 gen_spec->loc_port = port2;
2155 gen_spec->rem_port = port1;
2161 case EFX_FARCH_FILTER_MAC_FULL:
2164 case EFX_FARCH_FILTER_MAC_WILD:
2165 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
2167 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
2168 gen_spec->loc_mac[0] = spec->data[2] >> 8;
2169 gen_spec->loc_mac[1] = spec->data[2];
2170 gen_spec->loc_mac[2] = spec->data[1] >> 24;
2171 gen_spec->loc_mac[3] = spec->data[1] >> 16;
2172 gen_spec->loc_mac[4] = spec->data[1] >> 8;
2173 gen_spec->loc_mac[5] = spec->data[1];
2174 gen_spec->outer_vid = htons(spec->data[0]);
2177 case EFX_FARCH_FILTER_UC_DEF:
2178 case EFX_FARCH_FILTER_MC_DEF:
2179 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
2180 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
2190 efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
2191 struct efx_farch_filter_spec *spec)
2193 /* If there's only one channel then disable RSS for non VF
2194 * traffic, thereby allowing VFs to use RSS when the PF can't.
2196 spec->priority = EFX_FILTER_PRI_REQUIRED;
2197 spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
2198 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
2199 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2203 /* Build a filter entry and return its n-tuple key. */
2204 static u32 efx_farch_filter_build(efx_oword_t *filter,
2205 struct efx_farch_filter_spec *spec)
2209 switch (efx_farch_filter_spec_table_id(spec)) {
2210 case EFX_FARCH_FILTER_TABLE_RX_IP: {
2211 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
2212 spec->type == EFX_FARCH_FILTER_UDP_WILD);
2213 EFX_POPULATE_OWORD_7(
2216 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2218 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2219 FRF_BZ_TCP_UDP, is_udp,
2220 FRF_BZ_RXQ_ID, spec->dmaq_id,
2221 EFX_DWORD_2, spec->data[2],
2222 EFX_DWORD_1, spec->data[1],
2223 EFX_DWORD_0, spec->data[0]);
2228 case EFX_FARCH_FILTER_TABLE_RX_MAC: {
2229 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2230 EFX_POPULATE_OWORD_7(
2233 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2234 FRF_CZ_RMFT_SCATTER_EN,
2235 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2236 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2237 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2238 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2239 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2240 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2245 case EFX_FARCH_FILTER_TABLE_TX_MAC: {
2246 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2247 EFX_POPULATE_OWORD_5(*filter,
2248 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2249 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2250 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2251 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2252 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2253 data3 = is_wild | spec->dmaq_id << 1;
2261 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2264 static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
2265 const struct efx_farch_filter_spec *right)
2267 if (left->type != right->type ||
2268 memcmp(left->data, right->data, sizeof(left->data)))
2271 if (left->flags & EFX_FILTER_FLAG_TX &&
2272 left->dmaq_id != right->dmaq_id)
2279 * Construct/deconstruct external filter IDs. At least the RX filter
2280 * IDs must be ordered by matching priority, for RX NFC semantics.
2282 * Deconstruction needs to be robust against invalid IDs so that
2283 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2284 * accept user-provided IDs.
2287 #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
2289 static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
2290 [EFX_FARCH_FILTER_TCP_FULL] = 0,
2291 [EFX_FARCH_FILTER_UDP_FULL] = 0,
2292 [EFX_FARCH_FILTER_TCP_WILD] = 1,
2293 [EFX_FARCH_FILTER_UDP_WILD] = 1,
2294 [EFX_FARCH_FILTER_MAC_FULL] = 2,
2295 [EFX_FARCH_FILTER_MAC_WILD] = 3,
2296 [EFX_FARCH_FILTER_UC_DEF] = 4,
2297 [EFX_FARCH_FILTER_MC_DEF] = 4,
2300 static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
2301 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
2302 EFX_FARCH_FILTER_TABLE_RX_IP,
2303 EFX_FARCH_FILTER_TABLE_RX_MAC,
2304 EFX_FARCH_FILTER_TABLE_RX_MAC,
2305 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
2306 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
2307 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
2310 #define EFX_FARCH_FILTER_INDEX_WIDTH 13
2311 #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2314 efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
2319 range = efx_farch_filter_type_match_pri[spec->type];
2320 if (!(spec->flags & EFX_FILTER_FLAG_RX))
2321 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
2323 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
2326 static inline enum efx_farch_filter_table_id
2327 efx_farch_filter_id_table_id(u32 id)
2329 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
2331 if (range < ARRAY_SIZE(efx_farch_filter_range_table))
2332 return efx_farch_filter_range_table[range];
2334 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
2337 static inline unsigned int efx_farch_filter_id_index(u32 id)
2339 return id & EFX_FARCH_FILTER_INDEX_MASK;
2342 u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
2344 struct efx_farch_filter_state *state = efx->filter_state;
2345 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2346 enum efx_farch_filter_table_id table_id;
2349 table_id = efx_farch_filter_range_table[range];
2350 if (state->table[table_id].size != 0)
2351 return range << EFX_FARCH_FILTER_INDEX_WIDTH |
2352 state->table[table_id].size;
2358 s32 efx_farch_filter_insert(struct efx_nic *efx,
2359 struct efx_filter_spec *gen_spec,
2362 struct efx_farch_filter_state *state = efx->filter_state;
2363 struct efx_farch_filter_table *table;
2364 struct efx_farch_filter_spec spec;
2366 int rep_index, ins_index;
2367 unsigned int depth = 0;
2370 rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
2374 table = &state->table[efx_farch_filter_spec_table_id(&spec)];
2375 if (table->size == 0)
2378 netif_vdbg(efx, hw, efx->net_dev,
2379 "%s: type %d search_limit=%d", __func__, spec.type,
2380 table->search_limit[spec.type]);
2382 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2383 /* One filter spec per type */
2384 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
2385 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
2386 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
2387 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
2388 ins_index = rep_index;
2390 spin_lock_bh(&efx->filter_lock);
2392 /* Search concurrently for
2393 * (1) a filter to be replaced (rep_index): any filter
2394 * with the same match values, up to the current
2395 * search depth for this type, and
2396 * (2) the insertion point (ins_index): (1) or any
2397 * free slot before it or up to the maximum search
2398 * depth for this priority
2399 * We fail if we cannot find (2).
2401 * We can stop once either
2402 * (a) we find (1), in which case we have definitely
2403 * found (2) as well; or
2404 * (b) we have searched exhaustively for (1), and have
2405 * either found (2) or searched exhaustively for it
2407 u32 key = efx_farch_filter_build(&filter, &spec);
2408 unsigned int hash = efx_farch_filter_hash(key);
2409 unsigned int incr = efx_farch_filter_increment(key);
2410 unsigned int max_rep_depth = table->search_limit[spec.type];
2411 unsigned int max_ins_depth =
2412 spec.priority <= EFX_FILTER_PRI_HINT ?
2413 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2414 EFX_FARCH_FILTER_CTL_SRCH_MAX;
2415 unsigned int i = hash & (table->size - 1);
2420 spin_lock_bh(&efx->filter_lock);
2423 if (!test_bit(i, table->used_bitmap)) {
2426 } else if (efx_farch_filter_equal(&spec,
2435 if (depth >= max_rep_depth &&
2436 (ins_index >= 0 || depth >= max_ins_depth)) {
2438 if (ins_index < 0) {
2446 i = (i + incr) & (table->size - 1);
2451 /* If we found a filter to be replaced, check whether we
2454 if (rep_index >= 0) {
2455 struct efx_farch_filter_spec *saved_spec =
2456 &table->spec[rep_index];
2458 if (spec.priority == saved_spec->priority && !replace_equal) {
2462 if (spec.priority < saved_spec->priority &&
2463 !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
2464 saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
2468 if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
2469 /* Just make sure it won't be removed */
2470 saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
2474 /* Retain the RX_STACK flag */
2475 spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
2478 /* Insert the filter */
2479 if (ins_index != rep_index) {
2480 __set_bit(ins_index, table->used_bitmap);
2483 table->spec[ins_index] = spec;
2485 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2486 efx_farch_filter_push_rx_config(efx);
2488 if (table->search_limit[spec.type] < depth) {
2489 table->search_limit[spec.type] = depth;
2490 if (spec.flags & EFX_FILTER_FLAG_TX)
2491 efx_farch_filter_push_tx_limits(efx);
2493 efx_farch_filter_push_rx_config(efx);
2496 efx_writeo(efx, &filter,
2497 table->offset + table->step * ins_index);
2499 /* If we were able to replace a filter by inserting
2500 * at a lower depth, clear the replaced filter
2502 if (ins_index != rep_index && rep_index >= 0)
2503 efx_farch_filter_table_clear_entry(efx, table,
2507 netif_vdbg(efx, hw, efx->net_dev,
2508 "%s: filter type %d index %d rxq %u set",
2509 __func__, spec.type, ins_index, spec.dmaq_id);
2510 rc = efx_farch_filter_make_id(&spec, ins_index);
2513 spin_unlock_bh(&efx->filter_lock);
2518 efx_farch_filter_table_clear_entry(struct efx_nic *efx,
2519 struct efx_farch_filter_table *table,
2520 unsigned int filter_idx)
2522 static efx_oword_t filter;
2524 EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2525 BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2527 __clear_bit(filter_idx, table->used_bitmap);
2529 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2531 efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
2533 /* If this filter required a greater search depth than
2534 * any other, the search limit for its type can now be
2535 * decreased. However, it is hard to determine that
2536 * unless the table has become completely empty - in
2537 * which case, all its search limits can be set to 0.
2539 if (unlikely(table->used == 0)) {
2540 memset(table->search_limit, 0, sizeof(table->search_limit));
2541 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
2542 efx_farch_filter_push_tx_limits(efx);
2544 efx_farch_filter_push_rx_config(efx);
2548 static int efx_farch_filter_remove(struct efx_nic *efx,
2549 struct efx_farch_filter_table *table,
2550 unsigned int filter_idx,
2551 enum efx_filter_priority priority)
2553 struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
2555 if (!test_bit(filter_idx, table->used_bitmap) ||
2556 spec->priority > priority)
2559 if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
2560 efx_farch_filter_init_rx_for_stack(efx, spec);
2561 efx_farch_filter_push_rx_config(efx);
2563 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
2569 int efx_farch_filter_remove_safe(struct efx_nic *efx,
2570 enum efx_filter_priority priority,
2573 struct efx_farch_filter_state *state = efx->filter_state;
2574 enum efx_farch_filter_table_id table_id;
2575 struct efx_farch_filter_table *table;
2576 unsigned int filter_idx;
2577 struct efx_farch_filter_spec *spec;
2580 table_id = efx_farch_filter_id_table_id(filter_id);
2581 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2583 table = &state->table[table_id];
2585 filter_idx = efx_farch_filter_id_index(filter_id);
2586 if (filter_idx >= table->size)
2588 spec = &table->spec[filter_idx];
2590 spin_lock_bh(&efx->filter_lock);
2591 rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
2592 spin_unlock_bh(&efx->filter_lock);
2597 int efx_farch_filter_get_safe(struct efx_nic *efx,
2598 enum efx_filter_priority priority,
2599 u32 filter_id, struct efx_filter_spec *spec_buf)
2601 struct efx_farch_filter_state *state = efx->filter_state;
2602 enum efx_farch_filter_table_id table_id;
2603 struct efx_farch_filter_table *table;
2604 struct efx_farch_filter_spec *spec;
2605 unsigned int filter_idx;
2608 table_id = efx_farch_filter_id_table_id(filter_id);
2609 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2611 table = &state->table[table_id];
2613 filter_idx = efx_farch_filter_id_index(filter_id);
2614 if (filter_idx >= table->size)
2616 spec = &table->spec[filter_idx];
2618 spin_lock_bh(&efx->filter_lock);
2620 if (test_bit(filter_idx, table->used_bitmap) &&
2621 spec->priority == priority) {
2622 efx_farch_filter_to_gen_spec(spec_buf, spec);
2628 spin_unlock_bh(&efx->filter_lock);
2634 efx_farch_filter_table_clear(struct efx_nic *efx,
2635 enum efx_farch_filter_table_id table_id,
2636 enum efx_filter_priority priority)
2638 struct efx_farch_filter_state *state = efx->filter_state;
2639 struct efx_farch_filter_table *table = &state->table[table_id];
2640 unsigned int filter_idx;
2642 spin_lock_bh(&efx->filter_lock);
2643 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
2644 efx_farch_filter_remove(efx, table, filter_idx, priority);
2645 spin_unlock_bh(&efx->filter_lock);
2648 void efx_farch_filter_clear_rx(struct efx_nic *efx,
2649 enum efx_filter_priority priority)
2651 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
2653 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
2655 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
2659 u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
2660 enum efx_filter_priority priority)
2662 struct efx_farch_filter_state *state = efx->filter_state;
2663 enum efx_farch_filter_table_id table_id;
2664 struct efx_farch_filter_table *table;
2665 unsigned int filter_idx;
2668 spin_lock_bh(&efx->filter_lock);
2670 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2671 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2673 table = &state->table[table_id];
2674 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2675 if (test_bit(filter_idx, table->used_bitmap) &&
2676 table->spec[filter_idx].priority == priority)
2681 spin_unlock_bh(&efx->filter_lock);
2686 s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
2687 enum efx_filter_priority priority,
2690 struct efx_farch_filter_state *state = efx->filter_state;
2691 enum efx_farch_filter_table_id table_id;
2692 struct efx_farch_filter_table *table;
2693 unsigned int filter_idx;
2696 spin_lock_bh(&efx->filter_lock);
2698 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2699 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2701 table = &state->table[table_id];
2702 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2703 if (test_bit(filter_idx, table->used_bitmap) &&
2704 table->spec[filter_idx].priority == priority) {
2705 if (count == size) {
2709 buf[count++] = efx_farch_filter_make_id(
2710 &table->spec[filter_idx], filter_idx);
2715 spin_unlock_bh(&efx->filter_lock);
2720 /* Restore filter stater after reset */
2721 void efx_farch_filter_table_restore(struct efx_nic *efx)
2723 struct efx_farch_filter_state *state = efx->filter_state;
2724 enum efx_farch_filter_table_id table_id;
2725 struct efx_farch_filter_table *table;
2727 unsigned int filter_idx;
2729 spin_lock_bh(&efx->filter_lock);
2731 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2732 table = &state->table[table_id];
2734 /* Check whether this is a regular register table */
2735 if (table->step == 0)
2738 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2739 if (!test_bit(filter_idx, table->used_bitmap))
2741 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2742 efx_writeo(efx, &filter,
2743 table->offset + table->step * filter_idx);
2747 efx_farch_filter_push_rx_config(efx);
2748 efx_farch_filter_push_tx_limits(efx);
2750 spin_unlock_bh(&efx->filter_lock);
2753 void efx_farch_filter_table_remove(struct efx_nic *efx)
2755 struct efx_farch_filter_state *state = efx->filter_state;
2756 enum efx_farch_filter_table_id table_id;
2758 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2759 kfree(state->table[table_id].used_bitmap);
2760 vfree(state->table[table_id].spec);
2765 int efx_farch_filter_table_probe(struct efx_nic *efx)
2767 struct efx_farch_filter_state *state;
2768 struct efx_farch_filter_table *table;
2771 state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
2774 efx->filter_state = state;
2776 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2777 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2778 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
2779 table->offset = FR_BZ_RX_FILTER_TBL0;
2780 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2781 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2784 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
2785 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
2786 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
2787 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
2788 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
2789 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
2791 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2792 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
2793 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
2795 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2796 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
2797 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
2798 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
2799 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
2802 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2803 table = &state->table[table_id];
2804 if (table->size == 0)
2806 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2807 sizeof(unsigned long),
2809 if (!table->used_bitmap)
2811 table->spec = vzalloc(table->size * sizeof(*table->spec));
2816 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2818 /* RX default filters must always exist */
2819 struct efx_farch_filter_spec *spec;
2822 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
2823 spec = &table->spec[i];
2824 spec->type = EFX_FARCH_FILTER_UC_DEF + i;
2825 efx_farch_filter_init_rx_for_stack(efx, spec);
2826 __set_bit(i, table->used_bitmap);
2830 efx_farch_filter_push_rx_config(efx);
2835 efx_farch_filter_table_remove(efx);
2839 /* Update scatter enable flags for filters pointing to our own RX queues */
2840 void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
2842 struct efx_farch_filter_state *state = efx->filter_state;
2843 enum efx_farch_filter_table_id table_id;
2844 struct efx_farch_filter_table *table;
2846 unsigned int filter_idx;
2848 spin_lock_bh(&efx->filter_lock);
2850 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2851 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2853 table = &state->table[table_id];
2855 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2856 if (!test_bit(filter_idx, table->used_bitmap) ||
2857 table->spec[filter_idx].dmaq_id >=
2861 if (efx->rx_scatter)
2862 table->spec[filter_idx].flags |=
2863 EFX_FILTER_FLAG_RX_SCATTER;
2865 table->spec[filter_idx].flags &=
2866 ~EFX_FILTER_FLAG_RX_SCATTER;
2868 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
2869 /* Pushed by efx_farch_filter_push_rx_config() */
2872 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2873 efx_writeo(efx, &filter,
2874 table->offset + table->step * filter_idx);
2878 efx_farch_filter_push_rx_config(efx);
2880 spin_unlock_bh(&efx->filter_lock);
2883 #ifdef CONFIG_RFS_ACCEL
2885 s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
2886 struct efx_filter_spec *gen_spec)
2888 return efx_farch_filter_insert(efx, gen_spec, true);
2891 bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2894 struct efx_farch_filter_state *state = efx->filter_state;
2895 struct efx_farch_filter_table *table =
2896 &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2898 if (test_bit(index, table->used_bitmap) &&
2899 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
2900 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2902 efx_farch_filter_table_clear_entry(efx, table, index);
2909 #endif /* CONFIG_RFS_ACCEL */
2911 void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
2913 struct net_device *net_dev = efx->net_dev;
2914 struct netdev_hw_addr *ha;
2915 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2919 netif_addr_lock_bh(net_dev);
2921 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2923 /* Build multicast hash table */
2924 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2925 memset(mc_hash, 0xff, sizeof(*mc_hash));
2927 memset(mc_hash, 0x00, sizeof(*mc_hash));
2928 netdev_for_each_mc_addr(ha, net_dev) {
2929 crc = ether_crc_le(ETH_ALEN, ha->addr);
2930 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2931 __set_bit_le(bit, mc_hash);
2934 /* Broadcast packets go through the multicast hash filter.
2935 * ether_crc_le() of the broadcast address is 0xbe2612ff
2936 * so we always add bit 0xff to the mask.
2938 __set_bit_le(0xff, mc_hash);
2941 netif_addr_unlock_bh(net_dev);