1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include "net_driver.h"
22 #include "workarounds.h"
24 /**************************************************************************
28 **************************************************************************
31 /* This is set to 16 for a good reason. In summary, if larger than
32 * 16, the descriptor cache holds more than a default socket
33 * buffer's worth of packets (for UDP we can only have at most one
34 * socket buffer's worth outstanding). This combined with the fact
35 * that we only get 1 TX event per descriptor cache means the NIC
38 #define TX_DC_ENTRIES 16
39 #define TX_DC_ENTRIES_ORDER 1
41 #define RX_DC_ENTRIES 64
42 #define RX_DC_ENTRIES_ORDER 3
44 /* RX FIFO XOFF watermark
46 * When the amount of the RX FIFO increases used increases past this
47 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
48 * This also has an effect on RX/TX arbitration
50 int efx_nic_rx_xoff_thresh = -1;
51 module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
52 MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
54 /* RX FIFO XON watermark
56 * When the amount of the RX FIFO used decreases below this
57 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
58 * This also has an effect on RX/TX arbitration
60 int efx_nic_rx_xon_thresh = -1;
61 module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
62 MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
64 /* If EFX_MAX_INT_ERRORS internal errors occur within
65 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
68 #define EFX_INT_ERROR_EXPIRE 3600
69 #define EFX_MAX_INT_ERRORS 5
71 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
73 #define EFX_FLUSH_INTERVAL 10
74 #define EFX_FLUSH_POLL_COUNT 100
76 /* Size and alignment of special buffers (4KB) */
77 #define EFX_BUF_SIZE 4096
79 /* Depth of RX flush request fifo */
80 #define EFX_RX_FLUSH_COUNT 4
82 /* Generated event code for efx_generate_test_event() */
83 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
84 (0x00010100 + (_channel)->channel)
86 /* Generated event code for efx_generate_fill_event() */
87 #define EFX_CHANNEL_MAGIC_FILL(_channel) \
88 (0x00010200 + (_channel)->channel)
90 /**************************************************************************
92 * Solarstorm hardware access
94 **************************************************************************/
96 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
99 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
103 /* Read the current event from the event queue */
104 static inline efx_qword_t *efx_event(struct efx_channel *channel,
107 return (((efx_qword_t *) (channel->eventq.addr)) + index);
110 /* See if an event is present
112 * We check both the high and low dword of the event for all ones. We
113 * wrote all ones when we cleared the event, and no valid event can
114 * have all ones in either its high or low dwords. This approach is
115 * robust against reordering.
117 * Note that using a single 64-bit comparison is incorrect; even
118 * though the CPU read will be atomic, the DMA write may not be.
120 static inline int efx_event_present(efx_qword_t *event)
122 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
123 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
126 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
127 const efx_oword_t *mask)
129 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
130 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
133 int efx_nic_test_registers(struct efx_nic *efx,
134 const struct efx_nic_register_test *regs,
137 unsigned address = 0, i, j;
138 efx_oword_t mask, imask, original, reg, buf;
140 /* Falcon should be in loopback to isolate the XMAC from the PHY */
141 WARN_ON(!LOOPBACK_INTERNAL(efx));
143 for (i = 0; i < n_regs; ++i) {
144 address = regs[i].address;
145 mask = imask = regs[i].mask;
146 EFX_INVERT_OWORD(imask);
148 efx_reado(efx, &original, address);
150 /* bit sweep on and off */
151 for (j = 0; j < 128; j++) {
152 if (!EFX_EXTRACT_OWORD32(mask, j, j))
155 /* Test this testable bit can be set in isolation */
156 EFX_AND_OWORD(reg, original, mask);
157 EFX_SET_OWORD32(reg, j, j, 1);
159 efx_writeo(efx, ®, address);
160 efx_reado(efx, &buf, address);
162 if (efx_masked_compare_oword(®, &buf, &mask))
165 /* Test this testable bit can be cleared in isolation */
166 EFX_OR_OWORD(reg, original, mask);
167 EFX_SET_OWORD32(reg, j, j, 0);
169 efx_writeo(efx, ®, address);
170 efx_reado(efx, &buf, address);
172 if (efx_masked_compare_oword(®, &buf, &mask))
176 efx_writeo(efx, &original, address);
182 netif_err(efx, hw, efx->net_dev,
183 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
184 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
185 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
189 /**************************************************************************
191 * Special buffer handling
192 * Special buffers are used for event queues and the TX and RX
195 *************************************************************************/
198 * Initialise a special buffer
200 * This will define a buffer (previously allocated via
201 * efx_alloc_special_buffer()) in the buffer table, allowing
202 * it to be used for event queues, descriptor rings etc.
205 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
207 efx_qword_t buf_desc;
212 EFX_BUG_ON_PARANOID(!buffer->addr);
214 /* Write buffer descriptors to NIC */
215 for (i = 0; i < buffer->entries; i++) {
216 index = buffer->index + i;
217 dma_addr = buffer->dma_addr + (i * 4096);
218 netif_dbg(efx, probe, efx->net_dev,
219 "mapping special buffer %d at %llx\n",
220 index, (unsigned long long)dma_addr);
221 EFX_POPULATE_QWORD_3(buf_desc,
222 FRF_AZ_BUF_ADR_REGION, 0,
223 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
224 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
225 efx_write_buf_tbl(efx, &buf_desc, index);
229 /* Unmaps a buffer and clears the buffer table entries */
231 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
233 efx_oword_t buf_tbl_upd;
234 unsigned int start = buffer->index;
235 unsigned int end = (buffer->index + buffer->entries - 1);
237 if (!buffer->entries)
240 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
241 buffer->index, buffer->index + buffer->entries - 1);
243 EFX_POPULATE_OWORD_4(buf_tbl_upd,
244 FRF_AZ_BUF_UPD_CMD, 0,
245 FRF_AZ_BUF_CLR_CMD, 1,
246 FRF_AZ_BUF_CLR_END_ID, end,
247 FRF_AZ_BUF_CLR_START_ID, start);
248 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
252 * Allocate a new special buffer
254 * This allocates memory for a new buffer, clears it and allocates a
255 * new buffer ID range. It does not write into the buffer table.
257 * This call will allocate 4KB buffers, since 8KB buffers can't be
258 * used for event queues and descriptor rings.
260 static int efx_alloc_special_buffer(struct efx_nic *efx,
261 struct efx_special_buffer *buffer,
264 len = ALIGN(len, EFX_BUF_SIZE);
266 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
267 &buffer->dma_addr, GFP_KERNEL);
271 buffer->entries = len / EFX_BUF_SIZE;
272 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
274 /* All zeros is a potentially valid event so memset to 0xff */
275 memset(buffer->addr, 0xff, len);
277 /* Select new buffer ID */
278 buffer->index = efx->next_buffer_table;
279 efx->next_buffer_table += buffer->entries;
281 netif_dbg(efx, probe, efx->net_dev,
282 "allocating special buffers %d-%d at %llx+%x "
283 "(virt %p phys %llx)\n", buffer->index,
284 buffer->index + buffer->entries - 1,
285 (u64)buffer->dma_addr, len,
286 buffer->addr, (u64)virt_to_phys(buffer->addr));
292 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
297 netif_dbg(efx, hw, efx->net_dev,
298 "deallocating special buffers %d-%d at %llx+%x "
299 "(virt %p phys %llx)\n", buffer->index,
300 buffer->index + buffer->entries - 1,
301 (u64)buffer->dma_addr, buffer->len,
302 buffer->addr, (u64)virt_to_phys(buffer->addr));
304 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
310 /**************************************************************************
312 * Generic buffer handling
313 * These buffers are used for interrupt status and MAC stats
315 **************************************************************************/
317 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
320 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
325 memset(buffer->addr, 0, len);
329 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
332 pci_free_consistent(efx->pci_dev, buffer->len,
333 buffer->addr, buffer->dma_addr);
338 /**************************************************************************
342 **************************************************************************/
344 /* Returns a pointer to the specified transmit descriptor in the TX
345 * descriptor queue belonging to the specified channel.
347 static inline efx_qword_t *
348 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
350 return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
353 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
354 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
359 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
360 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
361 efx_writed_page(tx_queue->efx, ®,
362 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
366 /* For each entry inserted into the software descriptor ring, create a
367 * descriptor in the hardware TX descriptor ring (in host memory), and
370 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
373 struct efx_tx_buffer *buffer;
377 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
380 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
381 buffer = &tx_queue->buffer[write_ptr];
382 txd = efx_tx_desc(tx_queue, write_ptr);
383 ++tx_queue->write_count;
385 /* Create TX descriptor ring entry */
386 EFX_POPULATE_QWORD_4(*txd,
387 FSF_AZ_TX_KER_CONT, buffer->continuation,
388 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
389 FSF_AZ_TX_KER_BUF_REGION, 0,
390 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
391 } while (tx_queue->write_count != tx_queue->insert_count);
393 wmb(); /* Ensure descriptors are written before they are fetched */
394 efx_notify_tx_desc(tx_queue);
397 /* Allocate hardware resources for a TX queue */
398 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
400 struct efx_nic *efx = tx_queue->efx;
401 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
402 EFX_TXQ_SIZE & EFX_TXQ_MASK);
403 return efx_alloc_special_buffer(efx, &tx_queue->txd,
404 EFX_TXQ_SIZE * sizeof(efx_qword_t));
407 void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
409 efx_oword_t tx_desc_ptr;
410 struct efx_nic *efx = tx_queue->efx;
412 tx_queue->flushed = FLUSH_NONE;
414 /* Pin TX descriptor ring */
415 efx_init_special_buffer(efx, &tx_queue->txd);
417 /* Push TX descriptor ring to card */
418 EFX_POPULATE_OWORD_10(tx_desc_ptr,
419 FRF_AZ_TX_DESCQ_EN, 1,
420 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
421 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
422 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
423 FRF_AZ_TX_DESCQ_EVQ_ID,
424 tx_queue->channel->channel,
425 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
426 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
427 FRF_AZ_TX_DESCQ_SIZE,
428 __ffs(tx_queue->txd.entries),
429 FRF_AZ_TX_DESCQ_TYPE, 0,
430 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
432 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
433 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
434 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
435 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
439 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
442 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
445 /* Only 128 bits in this register */
446 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
448 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG);
449 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
450 clear_bit_le(tx_queue->queue, (void *)®);
452 set_bit_le(tx_queue->queue, (void *)®);
453 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG);
457 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
459 struct efx_nic *efx = tx_queue->efx;
460 efx_oword_t tx_flush_descq;
462 tx_queue->flushed = FLUSH_PENDING;
464 /* Post a flush command */
465 EFX_POPULATE_OWORD_2(tx_flush_descq,
466 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
467 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
468 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
471 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
473 struct efx_nic *efx = tx_queue->efx;
474 efx_oword_t tx_desc_ptr;
476 /* The queue should have been flushed */
477 WARN_ON(tx_queue->flushed != FLUSH_DONE);
479 /* Remove TX descriptor ring from card */
480 EFX_ZERO_OWORD(tx_desc_ptr);
481 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
484 /* Unpin TX descriptor ring */
485 efx_fini_special_buffer(efx, &tx_queue->txd);
488 /* Free buffers backing TX queue */
489 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
491 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
494 /**************************************************************************
498 **************************************************************************/
500 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
501 static inline efx_qword_t *
502 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
504 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
507 /* This creates an entry in the RX descriptor queue */
509 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
511 struct efx_rx_buffer *rx_buf;
514 rxd = efx_rx_desc(rx_queue, index);
515 rx_buf = efx_rx_buffer(rx_queue, index);
516 EFX_POPULATE_QWORD_3(*rxd,
517 FSF_AZ_RX_KER_BUF_SIZE,
519 rx_queue->efx->type->rx_buffer_padding,
520 FSF_AZ_RX_KER_BUF_REGION, 0,
521 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
524 /* This writes to the RX_DESC_WPTR register for the specified receive
527 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
532 while (rx_queue->notified_count != rx_queue->added_count) {
533 efx_build_rx_desc(rx_queue,
534 rx_queue->notified_count &
536 ++rx_queue->notified_count;
540 write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
541 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
542 efx_writed_page(rx_queue->efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0,
543 efx_rx_queue_index(rx_queue));
546 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
548 struct efx_nic *efx = rx_queue->efx;
549 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
550 EFX_RXQ_SIZE & EFX_RXQ_MASK);
551 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
552 EFX_RXQ_SIZE * sizeof(efx_qword_t));
555 void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
557 efx_oword_t rx_desc_ptr;
558 struct efx_nic *efx = rx_queue->efx;
559 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
560 bool iscsi_digest_en = is_b0;
562 netif_dbg(efx, hw, efx->net_dev,
563 "RX queue %d ring in special buffers %d-%d\n",
564 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
565 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
567 rx_queue->flushed = FLUSH_NONE;
569 /* Pin RX descriptor ring */
570 efx_init_special_buffer(efx, &rx_queue->rxd);
572 /* Push RX descriptor ring to card */
573 EFX_POPULATE_OWORD_10(rx_desc_ptr,
574 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
575 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
576 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
577 FRF_AZ_RX_DESCQ_EVQ_ID,
578 efx_rx_queue_channel(rx_queue)->channel,
579 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
580 FRF_AZ_RX_DESCQ_LABEL,
581 efx_rx_queue_index(rx_queue),
582 FRF_AZ_RX_DESCQ_SIZE,
583 __ffs(rx_queue->rxd.entries),
584 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
585 /* For >=B0 this is scatter so disable */
586 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
587 FRF_AZ_RX_DESCQ_EN, 1);
588 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
589 efx_rx_queue_index(rx_queue));
592 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
594 struct efx_nic *efx = rx_queue->efx;
595 efx_oword_t rx_flush_descq;
597 rx_queue->flushed = FLUSH_PENDING;
599 /* Post a flush command */
600 EFX_POPULATE_OWORD_2(rx_flush_descq,
601 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
602 FRF_AZ_RX_FLUSH_DESCQ,
603 efx_rx_queue_index(rx_queue));
604 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
607 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
609 efx_oword_t rx_desc_ptr;
610 struct efx_nic *efx = rx_queue->efx;
612 /* The queue should already have been flushed */
613 WARN_ON(rx_queue->flushed != FLUSH_DONE);
615 /* Remove RX descriptor ring from card */
616 EFX_ZERO_OWORD(rx_desc_ptr);
617 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
618 efx_rx_queue_index(rx_queue));
620 /* Unpin RX descriptor ring */
621 efx_fini_special_buffer(efx, &rx_queue->rxd);
624 /* Free buffers backing RX queue */
625 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
627 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
630 /**************************************************************************
632 * Event queue processing
633 * Event queues are processed by per-channel tasklets.
635 **************************************************************************/
637 /* Update a channel's event queue's read pointer (RPTR) register
639 * This writes the EVQ_RPTR_REG register for the specified channel's
642 void efx_nic_eventq_read_ack(struct efx_channel *channel)
645 struct efx_nic *efx = channel->efx;
647 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
648 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base,
652 /* Use HW to insert a SW defined event */
653 void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
655 efx_oword_t drv_ev_reg;
657 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
658 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
659 drv_ev_reg.u32[0] = event->u32[0];
660 drv_ev_reg.u32[1] = event->u32[1];
661 drv_ev_reg.u32[2] = 0;
662 drv_ev_reg.u32[3] = 0;
663 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
664 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
667 /* Handle a transmit completion event
669 * The NIC batches TX completion events; the message we receive is of
670 * the form "complete all TX events up to this index".
673 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
675 unsigned int tx_ev_desc_ptr;
676 unsigned int tx_ev_q_label;
677 struct efx_tx_queue *tx_queue;
678 struct efx_nic *efx = channel->efx;
681 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
682 /* Transmit completion */
683 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
684 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
685 tx_queue = efx_channel_get_tx_queue(
686 channel, tx_ev_q_label % EFX_TXQ_TYPES);
687 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
689 channel->irq_mod_score += tx_packets;
690 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
691 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
692 /* Rewrite the FIFO write pointer */
693 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
694 tx_queue = efx_channel_get_tx_queue(
695 channel, tx_ev_q_label % EFX_TXQ_TYPES);
697 if (efx_dev_registered(efx))
698 netif_tx_lock(efx->net_dev);
699 efx_notify_tx_desc(tx_queue);
700 if (efx_dev_registered(efx))
701 netif_tx_unlock(efx->net_dev);
702 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
703 EFX_WORKAROUND_10727(efx)) {
704 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
706 netif_err(efx, tx_err, efx->net_dev,
707 "channel %d unexpected TX event "
708 EFX_QWORD_FMT"\n", channel->channel,
709 EFX_QWORD_VAL(*event));
715 /* Detect errors included in the rx_evt_pkt_ok bit. */
716 static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
717 const efx_qword_t *event,
721 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
722 struct efx_nic *efx = rx_queue->efx;
723 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
724 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
725 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
726 bool rx_ev_other_err, rx_ev_pause_frm;
727 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
728 unsigned rx_ev_pkt_type;
730 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
731 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
732 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
733 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
734 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
735 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
736 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
737 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
738 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
739 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
740 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
741 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
742 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
743 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
744 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
746 /* Every error apart from tobe_disc and pause_frm */
747 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
748 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
749 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
751 /* Count errors that are not in MAC stats. Ignore expected
752 * checksum errors during self-test. */
754 ++channel->n_rx_frm_trunc;
755 else if (rx_ev_tobe_disc)
756 ++channel->n_rx_tobe_disc;
757 else if (!efx->loopback_selftest) {
758 if (rx_ev_ip_hdr_chksum_err)
759 ++channel->n_rx_ip_hdr_chksum_err;
760 else if (rx_ev_tcp_udp_chksum_err)
761 ++channel->n_rx_tcp_udp_chksum_err;
764 /* The frame must be discarded if any of these are true. */
765 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
766 rx_ev_tobe_disc | rx_ev_pause_frm);
768 /* TOBE_DISC is expected on unicast mismatches; don't print out an
769 * error message. FRM_TRUNC indicates RXDP dropped the packet due
770 * to a FIFO overflow.
772 #ifdef EFX_ENABLE_DEBUG
773 if (rx_ev_other_err && net_ratelimit()) {
774 netif_dbg(efx, rx_err, efx->net_dev,
775 " RX queue %d unexpected RX event "
776 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
777 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
778 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
779 rx_ev_ip_hdr_chksum_err ?
780 " [IP_HDR_CHKSUM_ERR]" : "",
781 rx_ev_tcp_udp_chksum_err ?
782 " [TCP_UDP_CHKSUM_ERR]" : "",
783 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
784 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
785 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
786 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
787 rx_ev_pause_frm ? " [PAUSE]" : "");
792 /* Handle receive events that are not in-order. */
794 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
796 struct efx_nic *efx = rx_queue->efx;
797 unsigned expected, dropped;
799 expected = rx_queue->removed_count & EFX_RXQ_MASK;
800 dropped = (index - expected) & EFX_RXQ_MASK;
801 netif_info(efx, rx_err, efx->net_dev,
802 "dropped %d events (index=%d expected=%d)\n",
803 dropped, index, expected);
805 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
806 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
809 /* Handle a packet received event
811 * The NIC gives a "discard" flag if it's a unicast packet with the
812 * wrong destination address
813 * Also "is multicast" and "matches multicast filter" flags can be used to
814 * discard non-matching multicast packets.
817 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
819 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
820 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
821 unsigned expected_ptr;
822 bool rx_ev_pkt_ok, discard = false, checksummed;
823 struct efx_rx_queue *rx_queue;
824 struct efx_nic *efx = channel->efx;
826 /* Basic packet information */
827 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
828 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
829 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
830 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
831 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
832 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
835 rx_queue = efx_channel_get_rx_queue(channel);
837 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
838 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
839 if (unlikely(rx_ev_desc_ptr != expected_ptr))
840 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
842 if (likely(rx_ev_pkt_ok)) {
843 /* If packet is marked as OK and packet type is TCP/IP or
844 * UDP/IP, then we can rely on the hardware checksum.
847 likely(efx->rx_checksum_enabled) &&
848 (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
849 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP);
851 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
855 /* Detect multicast packets that didn't match the filter */
856 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
857 if (rx_ev_mcast_pkt) {
858 unsigned int rx_ev_mcast_hash_match =
859 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
861 if (unlikely(!rx_ev_mcast_hash_match)) {
862 ++channel->n_rx_mcast_mismatch;
867 channel->irq_mod_score += 2;
869 /* Handle received packet */
870 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
871 checksummed, discard);
875 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
877 struct efx_nic *efx = channel->efx;
880 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
881 if (code == EFX_CHANNEL_MAGIC_TEST(channel))
882 ++channel->magic_count;
883 else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
884 /* The queue must be empty, so we won't receive any rx
885 * events, so efx_process_channel() won't refill the
886 * queue. Refill it here */
887 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
889 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
890 "generated event "EFX_QWORD_FMT"\n",
891 channel->channel, EFX_QWORD_VAL(*event));
894 /* Global events are basically PHY events */
896 efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
898 struct efx_nic *efx = channel->efx;
899 bool handled = false;
901 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
902 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
903 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
908 if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
909 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
910 efx->xmac_poll_required = true;
914 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
915 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
916 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
917 netif_err(efx, rx_err, efx->net_dev,
918 "channel %d seen global RX_RESET event. Resetting.\n",
921 atomic_inc(&efx->rx_reset);
922 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
923 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
928 netif_err(efx, hw, efx->net_dev,
929 "channel %d unknown global event "
930 EFX_QWORD_FMT "\n", channel->channel,
931 EFX_QWORD_VAL(*event));
935 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
937 struct efx_nic *efx = channel->efx;
938 unsigned int ev_sub_code;
939 unsigned int ev_sub_data;
941 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
942 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
944 switch (ev_sub_code) {
945 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
946 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
947 channel->channel, ev_sub_data);
949 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
950 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
951 channel->channel, ev_sub_data);
953 case FSE_AZ_EVQ_INIT_DONE_EV:
954 netif_dbg(efx, hw, efx->net_dev,
955 "channel %d EVQ %d initialised\n",
956 channel->channel, ev_sub_data);
958 case FSE_AZ_SRM_UPD_DONE_EV:
959 netif_vdbg(efx, hw, efx->net_dev,
960 "channel %d SRAM update done\n", channel->channel);
962 case FSE_AZ_WAKE_UP_EV:
963 netif_vdbg(efx, hw, efx->net_dev,
964 "channel %d RXQ %d wakeup event\n",
965 channel->channel, ev_sub_data);
967 case FSE_AZ_TIMER_EV:
968 netif_vdbg(efx, hw, efx->net_dev,
969 "channel %d RX queue %d timer expired\n",
970 channel->channel, ev_sub_data);
972 case FSE_AA_RX_RECOVER_EV:
973 netif_err(efx, rx_err, efx->net_dev,
974 "channel %d seen DRIVER RX_RESET event. "
975 "Resetting.\n", channel->channel);
976 atomic_inc(&efx->rx_reset);
977 efx_schedule_reset(efx,
978 EFX_WORKAROUND_6555(efx) ?
979 RESET_TYPE_RX_RECOVERY :
982 case FSE_BZ_RX_DSC_ERROR_EV:
983 netif_err(efx, rx_err, efx->net_dev,
984 "RX DMA Q %d reports descriptor fetch error."
985 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
986 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
988 case FSE_BZ_TX_DSC_ERROR_EV:
989 netif_err(efx, tx_err, efx->net_dev,
990 "TX DMA Q %d reports descriptor fetch error."
991 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
992 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
995 netif_vdbg(efx, hw, efx->net_dev,
996 "channel %d unknown driver event code %d "
997 "data %04x\n", channel->channel, ev_sub_code,
1003 int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1005 unsigned int read_ptr;
1006 efx_qword_t event, *p_event;
1011 read_ptr = channel->eventq_read_ptr;
1014 p_event = efx_event(channel, read_ptr);
1017 if (!efx_event_present(&event))
1021 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1022 "channel %d event is "EFX_QWORD_FMT"\n",
1023 channel->channel, EFX_QWORD_VAL(event));
1025 /* Clear this event by marking it all ones */
1026 EFX_SET_QWORD(*p_event);
1028 /* Increment read pointer */
1029 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1031 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1034 case FSE_AZ_EV_CODE_RX_EV:
1035 efx_handle_rx_event(channel, &event);
1036 if (++spent == budget)
1039 case FSE_AZ_EV_CODE_TX_EV:
1040 tx_packets += efx_handle_tx_event(channel, &event);
1041 if (tx_packets >= EFX_TXQ_SIZE) {
1046 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1047 efx_handle_generated_event(channel, &event);
1049 case FSE_AZ_EV_CODE_GLOBAL_EV:
1050 efx_handle_global_event(channel, &event);
1052 case FSE_AZ_EV_CODE_DRIVER_EV:
1053 efx_handle_driver_event(channel, &event);
1055 case FSE_CZ_EV_CODE_MCDI_EV:
1056 efx_mcdi_process_event(channel, &event);
1059 netif_err(channel->efx, hw, channel->efx->net_dev,
1060 "channel %d unknown event type %d (data "
1061 EFX_QWORD_FMT ")\n", channel->channel,
1062 ev_code, EFX_QWORD_VAL(event));
1067 channel->eventq_read_ptr = read_ptr;
1072 /* Allocate buffer table entries for event queue */
1073 int efx_nic_probe_eventq(struct efx_channel *channel)
1075 struct efx_nic *efx = channel->efx;
1076 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
1077 EFX_EVQ_SIZE & EFX_EVQ_MASK);
1078 return efx_alloc_special_buffer(efx, &channel->eventq,
1079 EFX_EVQ_SIZE * sizeof(efx_qword_t));
1082 void efx_nic_init_eventq(struct efx_channel *channel)
1085 struct efx_nic *efx = channel->efx;
1087 netif_dbg(efx, hw, efx->net_dev,
1088 "channel %d event queue in special buffers %d-%d\n",
1089 channel->channel, channel->eventq.index,
1090 channel->eventq.index + channel->eventq.entries - 1);
1092 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1093 EFX_POPULATE_OWORD_3(reg,
1094 FRF_CZ_TIMER_Q_EN, 1,
1095 FRF_CZ_HOST_NOTIFY_MODE, 0,
1096 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1097 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1100 /* Pin event queue buffer */
1101 efx_init_special_buffer(efx, &channel->eventq);
1103 /* Fill event queue with all ones (i.e. empty events) */
1104 memset(channel->eventq.addr, 0xff, channel->eventq.len);
1106 /* Push event queue to card */
1107 EFX_POPULATE_OWORD_3(reg,
1109 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1110 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1111 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1114 efx->type->push_irq_moderation(channel);
1117 void efx_nic_fini_eventq(struct efx_channel *channel)
1120 struct efx_nic *efx = channel->efx;
1122 /* Remove event queue from card */
1123 EFX_ZERO_OWORD(reg);
1124 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1126 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1127 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1129 /* Unpin event queue */
1130 efx_fini_special_buffer(efx, &channel->eventq);
1133 /* Free buffers backing event queue */
1134 void efx_nic_remove_eventq(struct efx_channel *channel)
1136 efx_free_special_buffer(channel->efx, &channel->eventq);
1140 void efx_nic_generate_test_event(struct efx_channel *channel)
1142 unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
1143 efx_qword_t test_event;
1145 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1146 FSE_AZ_EV_CODE_DRV_GEN_EV,
1147 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1148 efx_generate_event(channel, &test_event);
1151 void efx_nic_generate_fill_event(struct efx_channel *channel)
1153 unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
1154 efx_qword_t test_event;
1156 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1157 FSE_AZ_EV_CODE_DRV_GEN_EV,
1158 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1159 efx_generate_event(channel, &test_event);
1162 /**************************************************************************
1166 **************************************************************************/
1169 static void efx_poll_flush_events(struct efx_nic *efx)
1171 struct efx_channel *channel = efx_get_channel(efx, 0);
1172 struct efx_tx_queue *tx_queue;
1173 struct efx_rx_queue *rx_queue;
1174 unsigned int read_ptr = channel->eventq_read_ptr;
1175 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
1178 efx_qword_t *event = efx_event(channel, read_ptr);
1179 int ev_code, ev_sub_code, ev_queue;
1182 if (!efx_event_present(event))
1185 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1186 ev_sub_code = EFX_QWORD_FIELD(*event,
1187 FSF_AZ_DRIVER_EV_SUBCODE);
1188 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1189 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1190 ev_queue = EFX_QWORD_FIELD(*event,
1191 FSF_AZ_DRIVER_EV_SUBDATA);
1192 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1193 tx_queue = efx_get_tx_queue(
1194 efx, ev_queue / EFX_TXQ_TYPES,
1195 ev_queue % EFX_TXQ_TYPES);
1196 tx_queue->flushed = FLUSH_DONE;
1198 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1199 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1200 ev_queue = EFX_QWORD_FIELD(
1201 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1202 ev_failed = EFX_QWORD_FIELD(
1203 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1204 if (ev_queue < efx->n_rx_channels) {
1205 rx_queue = efx_get_rx_queue(efx, ev_queue);
1207 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1211 /* We're about to destroy the queue anyway, so
1212 * it's ok to throw away every non-flush event */
1213 EFX_SET_QWORD(*event);
1215 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1216 } while (read_ptr != end_ptr);
1218 channel->eventq_read_ptr = read_ptr;
1221 /* Handle tx and rx flushes at the same time, since they run in
1222 * parallel in the hardware and there's no reason for us to
1224 int efx_nic_flush_queues(struct efx_nic *efx)
1226 struct efx_channel *channel;
1227 struct efx_rx_queue *rx_queue;
1228 struct efx_tx_queue *tx_queue;
1229 int i, tx_pending, rx_pending;
1231 /* If necessary prepare the hardware for flushing */
1232 efx->type->prepare_flush(efx);
1234 /* Flush all tx queues in parallel */
1235 efx_for_each_channel(channel, efx) {
1236 efx_for_each_channel_tx_queue(tx_queue, channel)
1237 efx_flush_tx_queue(tx_queue);
1240 /* The hardware supports four concurrent rx flushes, each of which may
1241 * need to be retried if there is an outstanding descriptor fetch */
1242 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1243 rx_pending = tx_pending = 0;
1244 efx_for_each_channel(channel, efx) {
1245 efx_for_each_channel_rx_queue(rx_queue, channel) {
1246 if (rx_queue->flushed == FLUSH_PENDING)
1250 efx_for_each_channel(channel, efx) {
1251 efx_for_each_channel_rx_queue(rx_queue, channel) {
1252 if (rx_pending == EFX_RX_FLUSH_COUNT)
1254 if (rx_queue->flushed == FLUSH_FAILED ||
1255 rx_queue->flushed == FLUSH_NONE) {
1256 efx_flush_rx_queue(rx_queue);
1260 efx_for_each_channel_tx_queue(tx_queue, channel) {
1261 if (tx_queue->flushed != FLUSH_DONE)
1266 if (rx_pending == 0 && tx_pending == 0)
1269 msleep(EFX_FLUSH_INTERVAL);
1270 efx_poll_flush_events(efx);
1273 /* Mark the queues as all flushed. We're going to return failure
1274 * leading to a reset, or fake up success anyway */
1275 efx_for_each_channel(channel, efx) {
1276 efx_for_each_channel_tx_queue(tx_queue, channel) {
1277 if (tx_queue->flushed != FLUSH_DONE)
1278 netif_err(efx, hw, efx->net_dev,
1279 "tx queue %d flush command timed out\n",
1281 tx_queue->flushed = FLUSH_DONE;
1283 efx_for_each_channel_rx_queue(rx_queue, channel) {
1284 if (rx_queue->flushed != FLUSH_DONE)
1285 netif_err(efx, hw, efx->net_dev,
1286 "rx queue %d flush command timed out\n",
1287 efx_rx_queue_index(rx_queue));
1288 rx_queue->flushed = FLUSH_DONE;
1295 /**************************************************************************
1297 * Hardware interrupts
1298 * The hardware interrupt handler does very little work; all the event
1299 * queue processing is carried out by per-channel tasklets.
1301 **************************************************************************/
1303 /* Enable/disable/generate interrupts */
1304 static inline void efx_nic_interrupts(struct efx_nic *efx,
1305 bool enabled, bool force)
1307 efx_oword_t int_en_reg_ker;
1309 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1310 FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
1311 FRF_AZ_KER_INT_KER, force,
1312 FRF_AZ_DRV_INT_EN_KER, enabled);
1313 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1316 void efx_nic_enable_interrupts(struct efx_nic *efx)
1318 struct efx_channel *channel;
1320 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1321 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1323 /* Enable interrupts */
1324 efx_nic_interrupts(efx, true, false);
1326 /* Force processing of all the channels to get the EVQ RPTRs up to
1328 efx_for_each_channel(channel, efx)
1329 efx_schedule_channel(channel);
1332 void efx_nic_disable_interrupts(struct efx_nic *efx)
1334 /* Disable interrupts */
1335 efx_nic_interrupts(efx, false, false);
1338 /* Generate a test interrupt
1339 * Interrupt must already have been enabled, otherwise nasty things
1342 void efx_nic_generate_interrupt(struct efx_nic *efx)
1344 efx_nic_interrupts(efx, true, true);
1347 /* Process a fatal interrupt
1348 * Disable bus mastering ASAP and schedule a reset
1350 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1352 struct falcon_nic_data *nic_data = efx->nic_data;
1353 efx_oword_t *int_ker = efx->irq_status.addr;
1354 efx_oword_t fatal_intr;
1355 int error, mem_perr;
1357 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1358 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1360 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1361 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1362 EFX_OWORD_VAL(fatal_intr),
1363 error ? "disabling bus mastering" : "no recognised error");
1365 /* If this is a memory parity error dump which blocks are offending */
1366 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1367 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1370 efx_reado(efx, ®, FR_AZ_MEM_STAT);
1371 netif_err(efx, hw, efx->net_dev,
1372 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1373 EFX_OWORD_VAL(reg));
1376 /* Disable both devices */
1377 pci_clear_master(efx->pci_dev);
1378 if (efx_nic_is_dual_func(efx))
1379 pci_clear_master(nic_data->pci_dev2);
1380 efx_nic_disable_interrupts(efx);
1382 /* Count errors and reset or disable the NIC accordingly */
1383 if (efx->int_error_count == 0 ||
1384 time_after(jiffies, efx->int_error_expire)) {
1385 efx->int_error_count = 0;
1386 efx->int_error_expire =
1387 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1389 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1390 netif_err(efx, hw, efx->net_dev,
1391 "SYSTEM ERROR - reset scheduled\n");
1392 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1394 netif_err(efx, hw, efx->net_dev,
1395 "SYSTEM ERROR - max number of errors seen."
1396 "NIC will be disabled\n");
1397 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1403 /* Handle a legacy interrupt
1404 * Acknowledges the interrupt and schedule event queue processing.
1406 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1408 struct efx_nic *efx = dev_id;
1409 efx_oword_t *int_ker = efx->irq_status.addr;
1410 irqreturn_t result = IRQ_NONE;
1411 struct efx_channel *channel;
1416 /* Read the ISR which also ACKs the interrupts */
1417 efx_readd(efx, ®, FR_BZ_INT_ISR0);
1418 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1420 /* Check to see if we have a serious error condition */
1421 if (queues & (1U << efx->fatal_irq_level)) {
1422 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1423 if (unlikely(syserr))
1424 return efx_nic_fatal_interrupt(efx);
1428 if (EFX_WORKAROUND_15783(efx))
1429 efx->irq_zero_count = 0;
1431 /* Schedule processing of any interrupting queues */
1432 efx_for_each_channel(channel, efx) {
1434 efx_schedule_channel(channel);
1437 result = IRQ_HANDLED;
1439 } else if (EFX_WORKAROUND_15783(efx)) {
1442 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1443 * because this might be a shared interrupt. */
1444 if (efx->irq_zero_count++ == 0)
1445 result = IRQ_HANDLED;
1447 /* Ensure we schedule or rearm all event queues */
1448 efx_for_each_channel(channel, efx) {
1449 event = efx_event(channel, channel->eventq_read_ptr);
1450 if (efx_event_present(event))
1451 efx_schedule_channel(channel);
1453 efx_nic_eventq_read_ack(channel);
1457 if (result == IRQ_HANDLED) {
1458 efx->last_irq_cpu = raw_smp_processor_id();
1459 netif_vdbg(efx, intr, efx->net_dev,
1460 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1461 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1467 /* Handle an MSI interrupt
1469 * Handle an MSI hardware interrupt. This routine schedules event
1470 * queue processing. No interrupt acknowledgement cycle is necessary.
1471 * Also, we never need to check that the interrupt is for us, since
1472 * MSI interrupts cannot be shared.
1474 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1476 struct efx_channel *channel = dev_id;
1477 struct efx_nic *efx = channel->efx;
1478 efx_oword_t *int_ker = efx->irq_status.addr;
1481 efx->last_irq_cpu = raw_smp_processor_id();
1482 netif_vdbg(efx, intr, efx->net_dev,
1483 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1484 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1486 /* Check to see if we have a serious error condition */
1487 if (channel->channel == efx->fatal_irq_level) {
1488 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1489 if (unlikely(syserr))
1490 return efx_nic_fatal_interrupt(efx);
1493 /* Schedule processing of the channel */
1494 efx_schedule_channel(channel);
1500 /* Setup RSS indirection table.
1501 * This maps from the hash value of the packet to RXQ
1503 void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1508 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1511 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1512 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1514 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1515 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1516 efx->rx_indir_table[i]);
1517 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
1521 /* Hook interrupt handler(s)
1522 * Try MSI and then legacy interrupts.
1524 int efx_nic_init_interrupt(struct efx_nic *efx)
1526 struct efx_channel *channel;
1529 if (!EFX_INT_MODE_USE_MSI(efx)) {
1530 irq_handler_t handler;
1531 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1532 handler = efx_legacy_interrupt;
1534 handler = falcon_legacy_interrupt_a1;
1536 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1539 netif_err(efx, drv, efx->net_dev,
1540 "failed to hook legacy IRQ %d\n",
1547 /* Hook MSI or MSI-X interrupt */
1548 efx_for_each_channel(channel, efx) {
1549 rc = request_irq(channel->irq, efx_msi_interrupt,
1550 IRQF_PROBE_SHARED, /* Not shared */
1551 channel->name, channel);
1553 netif_err(efx, drv, efx->net_dev,
1554 "failed to hook IRQ %d\n", channel->irq);
1562 efx_for_each_channel(channel, efx)
1563 free_irq(channel->irq, channel);
1568 void efx_nic_fini_interrupt(struct efx_nic *efx)
1570 struct efx_channel *channel;
1573 /* Disable MSI/MSI-X interrupts */
1574 efx_for_each_channel(channel, efx) {
1576 free_irq(channel->irq, channel);
1579 /* ACK legacy interrupt */
1580 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1581 efx_reado(efx, ®, FR_BZ_INT_ISR0);
1583 falcon_irq_ack_a1(efx);
1585 /* Disable legacy interrupt */
1586 if (efx->legacy_irq)
1587 free_irq(efx->legacy_irq, efx);
1590 u32 efx_nic_fpga_ver(struct efx_nic *efx)
1592 efx_oword_t altera_build;
1593 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1594 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1597 void efx_nic_init_common(struct efx_nic *efx)
1601 /* Set positions of descriptor caches in SRAM. */
1602 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
1603 efx->type->tx_dc_base / 8);
1604 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1605 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
1606 efx->type->rx_dc_base / 8);
1607 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1609 /* Set TX descriptor cache size. */
1610 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1611 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1612 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1614 /* Set RX descriptor cache size. Set low watermark to size-8, as
1615 * this allows most efficient prefetching.
1617 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1618 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1619 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1620 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1621 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1623 /* Program INT_KER address */
1624 EFX_POPULATE_OWORD_2(temp,
1625 FRF_AZ_NORM_INT_VEC_DIS_KER,
1626 EFX_INT_MODE_USE_MSI(efx),
1627 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1628 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1630 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1631 /* Use an interrupt level unused by event queues */
1632 efx->fatal_irq_level = 0x1f;
1634 /* Use a valid MSI-X vector */
1635 efx->fatal_irq_level = 0;
1637 /* Enable all the genuinely fatal interrupts. (They are still
1638 * masked by the overall interrupt mask, controlled by
1639 * falcon_interrupts()).
1641 * Note: All other fatal interrupts are enabled
1643 EFX_POPULATE_OWORD_3(temp,
1644 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1645 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1646 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1647 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1648 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1649 EFX_INVERT_OWORD(temp);
1650 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1652 efx_nic_push_rx_indir_table(efx);
1654 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1655 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1657 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1658 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1659 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1660 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1661 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
1662 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1663 /* Enable SW_EV to inherit in char driver - assume harmless here */
1664 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1665 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1666 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1667 /* Disable hardware watchdog which can misfire */
1668 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1669 /* Squash TX of packets of 16 bytes or less */
1670 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1671 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1672 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1677 #define REGISTER_REVISION_A 1
1678 #define REGISTER_REVISION_B 2
1679 #define REGISTER_REVISION_C 3
1680 #define REGISTER_REVISION_Z 3 /* latest revision */
1682 struct efx_nic_reg {
1684 u32 min_revision:2, max_revision:2;
1687 #define REGISTER(name, min_rev, max_rev) { \
1688 FR_ ## min_rev ## max_rev ## _ ## name, \
1689 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
1691 #define REGISTER_AA(name) REGISTER(name, A, A)
1692 #define REGISTER_AB(name) REGISTER(name, A, B)
1693 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1694 #define REGISTER_BB(name) REGISTER(name, B, B)
1695 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1696 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1698 static const struct efx_nic_reg efx_nic_regs[] = {
1699 REGISTER_AZ(ADR_REGION),
1700 REGISTER_AZ(INT_EN_KER),
1701 REGISTER_BZ(INT_EN_CHAR),
1702 REGISTER_AZ(INT_ADR_KER),
1703 REGISTER_BZ(INT_ADR_CHAR),
1704 /* INT_ACK_KER is WO */
1705 /* INT_ISR0 is RC */
1706 REGISTER_AZ(HW_INIT),
1707 REGISTER_CZ(USR_EV_CFG),
1708 REGISTER_AB(EE_SPI_HCMD),
1709 REGISTER_AB(EE_SPI_HADR),
1710 REGISTER_AB(EE_SPI_HDATA),
1711 REGISTER_AB(EE_BASE_PAGE),
1712 REGISTER_AB(EE_VPD_CFG0),
1713 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1714 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1715 /* PCIE_CORE_INDIRECT is indirect */
1716 REGISTER_AB(NIC_STAT),
1717 REGISTER_AB(GPIO_CTL),
1718 REGISTER_AB(GLB_CTL),
1719 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1720 REGISTER_BZ(DP_CTRL),
1721 REGISTER_AZ(MEM_STAT),
1722 REGISTER_AZ(CS_DEBUG),
1723 REGISTER_AZ(ALTERA_BUILD),
1724 REGISTER_AZ(CSR_SPARE),
1725 REGISTER_AB(PCIE_SD_CTL0123),
1726 REGISTER_AB(PCIE_SD_CTL45),
1727 REGISTER_AB(PCIE_PCS_CTL_STAT),
1728 /* DEBUG_DATA_OUT is not used */
1730 REGISTER_AZ(EVQ_CTL),
1731 REGISTER_AZ(EVQ_CNT1),
1732 REGISTER_AZ(EVQ_CNT2),
1733 REGISTER_AZ(BUF_TBL_CFG),
1734 REGISTER_AZ(SRM_RX_DC_CFG),
1735 REGISTER_AZ(SRM_TX_DC_CFG),
1736 REGISTER_AZ(SRM_CFG),
1737 /* BUF_TBL_UPD is WO */
1738 REGISTER_AZ(SRM_UPD_EVQ),
1739 REGISTER_AZ(SRAM_PARITY),
1740 REGISTER_AZ(RX_CFG),
1741 REGISTER_BZ(RX_FILTER_CTL),
1742 /* RX_FLUSH_DESCQ is WO */
1743 REGISTER_AZ(RX_DC_CFG),
1744 REGISTER_AZ(RX_DC_PF_WM),
1745 REGISTER_BZ(RX_RSS_TKEY),
1746 /* RX_NODESC_DROP is RC */
1747 REGISTER_AA(RX_SELF_RST),
1748 /* RX_DEBUG, RX_PUSH_DROP are not used */
1749 REGISTER_CZ(RX_RSS_IPV6_REG1),
1750 REGISTER_CZ(RX_RSS_IPV6_REG2),
1751 REGISTER_CZ(RX_RSS_IPV6_REG3),
1752 /* TX_FLUSH_DESCQ is WO */
1753 REGISTER_AZ(TX_DC_CFG),
1754 REGISTER_AA(TX_CHKSM_CFG),
1755 REGISTER_AZ(TX_CFG),
1756 /* TX_PUSH_DROP is not used */
1757 REGISTER_AZ(TX_RESERVED),
1758 REGISTER_BZ(TX_PACE),
1759 /* TX_PACE_DROP_QID is RC */
1760 REGISTER_BB(TX_VLAN),
1761 REGISTER_BZ(TX_IPFIL_PORTEN),
1762 REGISTER_AB(MD_TXD),
1763 REGISTER_AB(MD_RXD),
1765 REGISTER_AB(MD_PHY_ADR),
1768 REGISTER_AB(MAC_STAT_DMA),
1769 REGISTER_AB(MAC_CTRL),
1770 REGISTER_BB(GEN_MODE),
1771 REGISTER_AB(MAC_MC_HASH_REG0),
1772 REGISTER_AB(MAC_MC_HASH_REG1),
1773 REGISTER_AB(GM_CFG1),
1774 REGISTER_AB(GM_CFG2),
1775 /* GM_IPG and GM_HD are not used */
1776 REGISTER_AB(GM_MAX_FLEN),
1777 /* GM_TEST is not used */
1778 REGISTER_AB(GM_ADR1),
1779 REGISTER_AB(GM_ADR2),
1780 REGISTER_AB(GMF_CFG0),
1781 REGISTER_AB(GMF_CFG1),
1782 REGISTER_AB(GMF_CFG2),
1783 REGISTER_AB(GMF_CFG3),
1784 REGISTER_AB(GMF_CFG4),
1785 REGISTER_AB(GMF_CFG5),
1786 REGISTER_BB(TX_SRC_MAC_CTL),
1787 REGISTER_AB(XM_ADR_LO),
1788 REGISTER_AB(XM_ADR_HI),
1789 REGISTER_AB(XM_GLB_CFG),
1790 REGISTER_AB(XM_TX_CFG),
1791 REGISTER_AB(XM_RX_CFG),
1792 REGISTER_AB(XM_MGT_INT_MASK),
1794 REGISTER_AB(XM_PAUSE_TIME),
1795 REGISTER_AB(XM_TX_PARAM),
1796 REGISTER_AB(XM_RX_PARAM),
1797 /* XM_MGT_INT_MSK (note no 'A') is RC */
1798 REGISTER_AB(XX_PWR_RST),
1799 REGISTER_AB(XX_SD_CTL),
1800 REGISTER_AB(XX_TXDRV_CTL),
1801 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1802 /* XX_CORE_STAT is partly RC */
1805 struct efx_nic_reg_table {
1807 u32 min_revision:2, max_revision:2;
1808 u32 step:6, rows:21;
1811 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1813 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
1816 #define REGISTER_TABLE(name, min_rev, max_rev) \
1817 REGISTER_TABLE_DIMENSIONS( \
1818 name, FR_ ## min_rev ## max_rev ## _ ## name, \
1820 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
1821 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1822 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1823 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1824 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1825 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1826 #define REGISTER_TABLE_BB_CZ(name) \
1827 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
1828 FR_BZ_ ## name ## _STEP, \
1829 FR_BB_ ## name ## _ROWS), \
1830 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
1831 FR_BZ_ ## name ## _STEP, \
1832 FR_CZ_ ## name ## _ROWS)
1833 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1835 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1836 /* DRIVER is not used */
1837 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1838 REGISTER_TABLE_BB(TX_IPFIL_TBL),
1839 REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
1840 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
1841 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
1842 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
1843 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1844 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1845 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1846 /* The register buffer is allocated with slab, so we can't
1847 * reasonably read all of the buffer table (up to 8MB!).
1848 * However this driver will only use a few entries. Reading
1849 * 1K entries allows for some expansion of queue count and
1850 * size before we need to change the version. */
1851 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
1853 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1855 /* RX_FILTER_TBL{0,1} is huge and not used by this driver */
1856 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1857 REGISTER_TABLE_BB_CZ(TIMER_TBL),
1858 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
1859 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
1860 /* TX_FILTER_TBL0 is huge and not used by this driver */
1861 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
1862 REGISTER_TABLE_CZ(MC_TREG_SMEM),
1863 /* MSIX_PBA_TABLE is not mapped */
1864 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1867 size_t efx_nic_get_regs_len(struct efx_nic *efx)
1869 const struct efx_nic_reg *reg;
1870 const struct efx_nic_reg_table *table;
1873 for (reg = efx_nic_regs;
1874 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1876 if (efx->type->revision >= reg->min_revision &&
1877 efx->type->revision <= reg->max_revision)
1878 len += sizeof(efx_oword_t);
1880 for (table = efx_nic_reg_tables;
1881 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1883 if (efx->type->revision >= table->min_revision &&
1884 efx->type->revision <= table->max_revision)
1885 len += table->rows * min_t(size_t, table->step, 16);
1890 void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1892 const struct efx_nic_reg *reg;
1893 const struct efx_nic_reg_table *table;
1895 for (reg = efx_nic_regs;
1896 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1898 if (efx->type->revision >= reg->min_revision &&
1899 efx->type->revision <= reg->max_revision) {
1900 efx_reado(efx, (efx_oword_t *)buf, reg->offset);
1901 buf += sizeof(efx_oword_t);
1905 for (table = efx_nic_reg_tables;
1906 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1910 if (!(efx->type->revision >= table->min_revision &&
1911 efx->type->revision <= table->max_revision))
1914 size = min_t(size_t, table->step, 16);
1916 for (i = 0; i < table->rows; i++) {
1917 switch (table->step) {
1918 case 4: /* 32-bit register or SRAM */
1919 efx_readd_table(efx, buf, table->offset, i);
1921 case 8: /* 64-bit SRAM */
1923 efx->membase + table->offset,
1926 case 16: /* 128-bit register */
1927 efx_reado_table(efx, buf, table->offset, i);
1929 case 32: /* 128-bit register, interleaved */
1930 efx_reado_table(efx, buf, table->offset, 2 * i);