2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/delay.h>
38 #include "t4_values.h"
40 #include "t4fw_version.h"
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
61 u32 val = t4_read_reg(adapter, reg);
63 if (!!(val & mask) == polarity) {
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
89 * Sets a register field specified by the supplied mask to the
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
102 * t4_read_indirect - read indirectly addressed registers
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
110 * Reads registers that are accessed indirectly through an address/data
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
125 * t4_write_indirect - write indirectly addressed registers
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
154 u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
156 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
161 if (is_t4(adap->params.chip))
164 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
167 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168 * Configuration Space read. (None of the other fields matter when
169 * ENABLE is 0 so a simple register write is easier than a
170 * read-modify-write via t4_set_reg_field().)
172 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
176 * t4_report_fw_error - report firmware error
179 * The adapter firmware can indicate error conditions to the host.
180 * If the firmware has indicated an error, print out the reason for
181 * the firmware error.
183 static void t4_report_fw_error(struct adapter *adap)
185 static const char *const reason[] = {
186 "Crash", /* PCIE_FW_EVAL_CRASH */
187 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
188 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
189 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
190 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
192 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193 "Reserved", /* reserved */
197 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198 if (pcie_fw & PCIE_FW_ERR_F) {
199 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
200 reason[PCIE_FW_EVAL_G(pcie_fw)]);
201 adap->flags &= ~CXGB4_FW_OK;
206 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
208 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
211 for ( ; nflit; nflit--, mbox_addr += 8)
212 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
216 * Handle a FW assertion reported in a mailbox.
218 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
220 struct fw_debug_cmd asrt;
222 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
223 dev_alert(adap->pdev_dev,
224 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
225 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
226 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
230 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
231 * @adapter: the adapter
232 * @cmd: the Firmware Mailbox Command or Reply
233 * @size: command length in bytes
234 * @access: the time (ms) needed to access the Firmware Mailbox
235 * @execute: the time (ms) the command spent being executed
237 static void t4_record_mbox(struct adapter *adapter,
238 const __be64 *cmd, unsigned int size,
239 int access, int execute)
241 struct mbox_cmd_log *log = adapter->mbox_log;
242 struct mbox_cmd *entry;
245 entry = mbox_cmd_log_entry(log, log->cursor++);
246 if (log->cursor == log->size)
249 for (i = 0; i < size / 8; i++)
250 entry->cmd[i] = be64_to_cpu(cmd[i]);
251 while (i < MBOX_LEN / 8)
253 entry->timestamp = jiffies;
254 entry->seqno = log->seqno++;
255 entry->access = access;
256 entry->execute = execute;
260 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
262 * @mbox: index of the mailbox to use
263 * @cmd: the command to write
264 * @size: command length in bytes
265 * @rpl: where to optionally store the reply
266 * @sleep_ok: if true we may sleep while awaiting command completion
267 * @timeout: time to wait for command to finish before timing out
269 * Sends the given command to FW through the selected mailbox and waits
270 * for the FW to execute the command. If @rpl is not %NULL it is used to
271 * store the FW's reply to the command. The command and its optional
272 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
273 * to respond. @sleep_ok determines whether we may sleep while awaiting
274 * the response. If sleeping is allowed we use progressive backoff
277 * The return value is 0 on success or a negative errno on failure. A
278 * failure can happen either because we are not able to execute the
279 * command or FW executes it but signals an error. In the latter case
280 * the return value is the error code indicated by FW (negated).
282 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
283 int size, void *rpl, bool sleep_ok, int timeout)
285 static const int delay[] = {
286 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
289 struct mbox_list entry;
294 int i, ms, delay_idx, ret;
295 const __be64 *p = cmd;
296 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
297 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
298 __be64 cmd_rpl[MBOX_LEN / 8];
301 if ((size & 15) || size > MBOX_LEN)
305 * If the device is off-line, as in EEH, commands will time out.
306 * Fail them early so we don't waste time waiting.
308 if (adap->pdev->error_state != pci_channel_io_normal)
311 /* If we have a negative timeout, that implies that we can't sleep. */
317 /* Queue ourselves onto the mailbox access list. When our entry is at
318 * the front of the list, we have rights to access the mailbox. So we
319 * wait [for a while] till we're at the front [or bail out with an
322 spin_lock_bh(&adap->mbox_lock);
323 list_add_tail(&entry.list, &adap->mlist.list);
324 spin_unlock_bh(&adap->mbox_lock);
329 for (i = 0; ; i += ms) {
330 /* If we've waited too long, return a busy indication. This
331 * really ought to be based on our initial position in the
332 * mailbox access list but this is a start. We very rarely
333 * contend on access to the mailbox ...
335 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
336 if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
337 spin_lock_bh(&adap->mbox_lock);
338 list_del(&entry.list);
339 spin_unlock_bh(&adap->mbox_lock);
340 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
341 t4_record_mbox(adap, cmd, size, access, ret);
345 /* If we're at the head, break out and start the mailbox
348 if (list_first_entry(&adap->mlist.list, struct mbox_list,
352 /* Delay for a bit before checking again ... */
354 ms = delay[delay_idx]; /* last element may repeat */
355 if (delay_idx < ARRAY_SIZE(delay) - 1)
363 /* Loop trying to get ownership of the mailbox. Return an error
364 * if we can't gain ownership.
366 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
368 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
369 if (v != MBOX_OWNER_DRV) {
370 spin_lock_bh(&adap->mbox_lock);
371 list_del(&entry.list);
372 spin_unlock_bh(&adap->mbox_lock);
373 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
374 t4_record_mbox(adap, cmd, size, access, ret);
378 /* Copy in the new mailbox command and send it on its way ... */
379 t4_record_mbox(adap, cmd, size, access, 0);
380 for (i = 0; i < size; i += 8)
381 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
383 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
384 t4_read_reg(adap, ctl_reg); /* flush write */
390 !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
394 ms = delay[delay_idx]; /* last element may repeat */
395 if (delay_idx < ARRAY_SIZE(delay) - 1)
401 v = t4_read_reg(adap, ctl_reg);
402 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
403 if (!(v & MBMSGVALID_F)) {
404 t4_write_reg(adap, ctl_reg, 0);
408 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
409 res = be64_to_cpu(cmd_rpl[0]);
411 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
412 fw_asrt(adap, data_reg);
413 res = FW_CMD_RETVAL_V(EIO);
415 memcpy(rpl, cmd_rpl, size);
418 t4_write_reg(adap, ctl_reg, 0);
421 t4_record_mbox(adap, cmd_rpl,
422 MBOX_LEN, access, execute);
423 spin_lock_bh(&adap->mbox_lock);
424 list_del(&entry.list);
425 spin_unlock_bh(&adap->mbox_lock);
426 return -FW_CMD_RETVAL_G((int)res);
430 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
431 t4_record_mbox(adap, cmd, size, access, ret);
432 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
433 *(const u8 *)cmd, mbox);
434 t4_report_fw_error(adap);
435 spin_lock_bh(&adap->mbox_lock);
436 list_del(&entry.list);
437 spin_unlock_bh(&adap->mbox_lock);
442 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
443 void *rpl, bool sleep_ok)
445 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
449 static int t4_edc_err_read(struct adapter *adap, int idx)
451 u32 edc_ecc_err_addr_reg;
454 if (is_t4(adap->params.chip)) {
455 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
458 if (idx != 0 && idx != 1) {
459 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
463 edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
464 rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
467 "edc%d err addr 0x%x: 0x%x.\n",
468 idx, edc_ecc_err_addr_reg,
469 t4_read_reg(adap, edc_ecc_err_addr_reg));
471 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
473 (unsigned long long)t4_read_reg64(adap, rdata_reg),
474 (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
475 (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
476 (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
477 (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
478 (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
479 (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
480 (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
481 (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
487 * t4_memory_rw_init - Get memory window relative offset, base, and size.
489 * @win: PCI-E Memory Window to use
490 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
491 * @mem_off: memory relative offset with respect to @mtype.
492 * @mem_base: configured memory base address.
493 * @mem_aperture: configured memory window aperture.
495 * Get the configured memory window's relative offset, base, and size.
497 int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
498 u32 *mem_base, u32 *mem_aperture)
500 u32 edc_size, mc_size, mem_reg;
502 /* Offset into the region of memory which is being accessed
505 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
506 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
509 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
510 if (mtype == MEM_HMA) {
511 *mem_off = 2 * (edc_size * 1024 * 1024);
512 } else if (mtype != MEM_MC1) {
513 *mem_off = (mtype * (edc_size * 1024 * 1024));
515 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
516 MA_EXT_MEMORY0_BAR_A));
517 *mem_off = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
520 /* Each PCI-E Memory Window is programmed with a window size -- or
521 * "aperture" -- which controls the granularity of its mapping onto
522 * adapter memory. We need to grab that aperture in order to know
523 * how to use the specified window. The window is also programmed
524 * with the base address of the Memory Window in BAR0's address
525 * space. For T4 this is an absolute PCI-E Bus Address. For T5
526 * the address is relative to BAR0.
528 mem_reg = t4_read_reg(adap,
529 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
531 /* a dead adapter will return 0xffffffff for PIO reads */
532 if (mem_reg == 0xffffffff)
535 *mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
536 *mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
537 if (is_t4(adap->params.chip))
538 *mem_base -= adap->t4_bar0;
544 * t4_memory_update_win - Move memory window to specified address.
546 * @win: PCI-E Memory Window to use
547 * @addr: location to move.
549 * Move memory window to specified address.
551 void t4_memory_update_win(struct adapter *adap, int win, u32 addr)
554 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
556 /* Read it back to ensure that changes propagate before we
557 * attempt to use the new value.
560 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
564 * t4_memory_rw_residual - Read/Write residual data.
566 * @off: relative offset within residual to start read/write.
567 * @addr: address within indicated memory type.
568 * @buf: host memory buffer
569 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
571 * Read/Write residual data less than 32-bits.
573 void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
583 if (dir == T4_MEMORY_READ) {
584 last.word = le32_to_cpu((__force __le32)
585 t4_read_reg(adap, addr));
586 for (bp = (unsigned char *)buf, i = off; i < 4; i++)
587 bp[i] = last.byte[i];
590 for (i = off; i < 4; i++)
592 t4_write_reg(adap, addr,
593 (__force u32)cpu_to_le32(last.word));
598 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
600 * @win: PCI-E Memory Window to use
601 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
602 * @addr: address within indicated memory type
603 * @len: amount of memory to transfer
604 * @hbuf: host memory buffer
605 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
607 * Reads/writes an [almost] arbitrary memory region in the firmware: the
608 * firmware memory address and host buffer must be aligned on 32-bit
609 * boundaries; the length may be arbitrary. The memory is transferred as
610 * a raw byte sequence from/to the firmware's memory. If this memory
611 * contains data structures which contain multi-byte integers, it's the
612 * caller's responsibility to perform appropriate byte order conversions.
614 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
615 u32 len, void *hbuf, int dir)
617 u32 pos, offset, resid, memoffset;
618 u32 win_pf, mem_aperture, mem_base;
622 /* Argument sanity checks ...
624 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
628 /* It's convenient to be able to handle lengths which aren't a
629 * multiple of 32-bits because we often end up transferring files to
630 * the firmware. So we'll handle that by normalizing the length here
631 * and then handling any residual transfer at the end.
636 ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
641 /* Determine the PCIE_MEM_ACCESS_OFFSET */
642 addr = addr + memoffset;
644 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
646 /* Calculate our initial PCI-E Memory Window Position and Offset into
649 pos = addr & ~(mem_aperture - 1);
652 /* Set up initial PCI-E Memory Window to cover the start of our
655 t4_memory_update_win(adap, win, pos | win_pf);
657 /* Transfer data to/from the adapter as long as there's an integral
658 * number of 32-bit transfers to complete.
660 * A note on Endianness issues:
662 * The "register" reads and writes below from/to the PCI-E Memory
663 * Window invoke the standard adapter Big-Endian to PCI-E Link
664 * Little-Endian "swizzel." As a result, if we have the following
665 * data in adapter memory:
667 * Memory: ... | b0 | b1 | b2 | b3 | ...
668 * Address: i+0 i+1 i+2 i+3
670 * Then a read of the adapter memory via the PCI-E Memory Window
675 * [ b3 | b2 | b1 | b0 ]
677 * If this value is stored into local memory on a Little-Endian system
678 * it will show up correctly in local memory as:
680 * ( ..., b0, b1, b2, b3, ... )
682 * But on a Big-Endian system, the store will show up in memory
683 * incorrectly swizzled as:
685 * ( ..., b3, b2, b1, b0, ... )
687 * So we need to account for this in the reads and writes to the
688 * PCI-E Memory Window below by undoing the register read/write
692 if (dir == T4_MEMORY_READ)
693 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
696 t4_write_reg(adap, mem_base + offset,
697 (__force u32)cpu_to_le32(*buf++));
698 offset += sizeof(__be32);
699 len -= sizeof(__be32);
701 /* If we've reached the end of our current window aperture,
702 * move the PCI-E Memory Window on to the next. Note that
703 * doing this here after "len" may be 0 allows us to set up
704 * the PCI-E Memory Window for a possible final residual
707 if (offset == mem_aperture) {
710 t4_memory_update_win(adap, win, pos | win_pf);
714 /* If the original transfer had a length which wasn't a multiple of
715 * 32-bits, now's where we need to finish off the transfer of the
716 * residual amount. The PCI-E Memory Window has already been moved
717 * above (if necessary) to cover this final transfer.
720 t4_memory_rw_residual(adap, resid, mem_base + offset,
726 /* Return the specified PCI-E Configuration Space register from our Physical
727 * Function. We try first via a Firmware LDST Command since we prefer to let
728 * the firmware own all of these registers, but if that fails we go for it
729 * directly ourselves.
731 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
733 u32 val, ldst_addrspace;
735 /* If fw_attach != 0, construct and send the Firmware LDST Command to
736 * retrieve the specified PCI-E Configuration Space register.
738 struct fw_ldst_cmd ldst_cmd;
741 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
742 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
743 ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
747 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
748 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
749 ldst_cmd.u.pcie.ctrl_to_fn =
750 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
751 ldst_cmd.u.pcie.r = reg;
753 /* If the LDST Command succeeds, return the result, otherwise
754 * fall through to reading it directly ourselves ...
756 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
759 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
761 /* Read the desired Configuration Space register via the PCI-E
762 * Backdoor mechanism.
764 t4_hw_pci_read_cfg4(adap, reg, &val);
768 /* Get the window based on base passed to it.
769 * Window aperture is currently unhandled, but there is no use case for it
772 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
777 if (is_t4(adap->params.chip)) {
780 /* Truncation intentional: we only read the bottom 32-bits of
781 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
782 * mechanism to read BAR0 instead of using
783 * pci_resource_start() because we could be operating from
784 * within a Virtual Machine which is trapping our accesses to
785 * our Configuration Space and we need to set up the PCI-E
786 * Memory Window decoders with the actual addresses which will
787 * be coming across the PCI-E link.
789 bar0 = t4_read_pcie_cfg4(adap, pci_base);
791 adap->t4_bar0 = bar0;
793 ret = bar0 + memwin_base;
795 /* For T5, only relative offset inside the PCIe BAR is passed */
801 /* Get the default utility window (win0) used by everyone */
802 u32 t4_get_util_window(struct adapter *adap)
804 return t4_get_window(adap, PCI_BASE_ADDRESS_0,
805 PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
808 /* Set up memory window for accessing adapter memory ranges. (Read
809 * back MA register to ensure that changes propagate before we attempt
810 * to use the new values.)
812 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
815 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
816 memwin_base | BIR_V(0) |
817 WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
819 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
823 * t4_get_regs_len - return the size of the chips register set
824 * @adapter: the adapter
826 * Returns the size of the chip's BAR0 register space.
828 unsigned int t4_get_regs_len(struct adapter *adapter)
830 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
832 switch (chip_version) {
834 return T4_REGMAP_SIZE;
838 return T5_REGMAP_SIZE;
841 dev_err(adapter->pdev_dev,
842 "Unsupported chip version %d\n", chip_version);
847 * t4_get_regs - read chip registers into provided buffer
849 * @buf: register buffer
850 * @buf_size: size (in bytes) of register buffer
852 * If the provided register buffer isn't large enough for the chip's
853 * full register range, the register dump will be truncated to the
854 * register buffer's size.
856 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
858 static const unsigned int t4_reg_ranges[] = {
1317 static const unsigned int t5_reg_ranges[] = {
2081 static const unsigned int t6_reg_ranges[] = {
2640 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2641 const unsigned int *reg_ranges;
2642 int reg_ranges_size, range;
2643 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2645 /* Select the right set of register ranges to dump depending on the
2646 * adapter chip type.
2648 switch (chip_version) {
2650 reg_ranges = t4_reg_ranges;
2651 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2655 reg_ranges = t5_reg_ranges;
2656 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2660 reg_ranges = t6_reg_ranges;
2661 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2665 dev_err(adap->pdev_dev,
2666 "Unsupported chip version %d\n", chip_version);
2670 /* Clear the register buffer and insert the appropriate register
2671 * values selected by the above register ranges.
2673 memset(buf, 0, buf_size);
2674 for (range = 0; range < reg_ranges_size; range += 2) {
2675 unsigned int reg = reg_ranges[range];
2676 unsigned int last_reg = reg_ranges[range + 1];
2677 u32 *bufp = (u32 *)((char *)buf + reg);
2679 /* Iterate across the register range filling in the register
2680 * buffer but don't write past the end of the register buffer.
2682 while (reg <= last_reg && bufp < buf_end) {
2683 *bufp++ = t4_read_reg(adap, reg);
2689 #define EEPROM_STAT_ADDR 0x7bfc
2690 #define VPD_BASE 0x400
2691 #define VPD_BASE_OLD 0
2692 #define VPD_LEN 1024
2695 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2696 * @phys_addr: the physical EEPROM address
2697 * @fn: the PCI function number
2698 * @sz: size of function-specific area
2700 * Translate a physical EEPROM address to virtual. The first 1K is
2701 * accessed through virtual addresses starting at 31K, the rest is
2702 * accessed through virtual addresses starting at 0.
2704 * The mapping is as follows:
2705 * [0..1K) -> [31K..32K)
2706 * [1K..1K+A) -> [31K-A..31K)
2707 * [1K+A..ES) -> [0..ES-A-1K)
2709 * where A = @fn * @sz, and ES = EEPROM size.
2711 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2714 if (phys_addr < 1024)
2715 return phys_addr + (31 << 10);
2716 if (phys_addr < 1024 + fn)
2717 return 31744 - fn + phys_addr - 1024;
2718 if (phys_addr < EEPROMSIZE)
2719 return phys_addr - 1024 - fn;
2724 * t4_seeprom_wp - enable/disable EEPROM write protection
2725 * @adapter: the adapter
2726 * @enable: whether to enable or disable write protection
2728 * Enables or disables write protection on the serial EEPROM.
2730 int t4_seeprom_wp(struct adapter *adapter, bool enable)
2732 unsigned int v = enable ? 0xc : 0;
2733 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2734 return ret < 0 ? ret : 0;
2738 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
2739 * @adapter: adapter to read
2740 * @p: where to store the parameters
2742 * Reads card parameters stored in VPD EEPROM.
2744 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2746 unsigned int id_len, pn_len, sn_len, na_len;
2747 int id, sn, pn, na, addr, ret = 0;
2748 u8 *vpd, base_val = 0;
2750 vpd = vmalloc(VPD_LEN);
2754 /* Card information normally starts at VPD_BASE but early cards had
2757 ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
2761 addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : VPD_BASE_OLD;
2763 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
2767 ret = pci_vpd_find_id_string(vpd, VPD_LEN, &id_len);
2772 ret = pci_vpd_check_csum(vpd, VPD_LEN);
2774 dev_err(adapter->pdev_dev, "VPD checksum incorrect or missing\n");
2779 ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
2780 PCI_VPD_RO_KEYWORD_SERIALNO, &sn_len);
2785 ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
2786 PCI_VPD_RO_KEYWORD_PARTNO, &pn_len);
2791 ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN, "NA", &na_len);
2796 memcpy(p->id, vpd + id, min_t(unsigned int, id_len, ID_LEN));
2798 memcpy(p->sn, vpd + sn, min_t(unsigned int, sn_len, SERNUM_LEN));
2800 memcpy(p->pn, vpd + pn, min_t(unsigned int, pn_len, PN_LEN));
2802 memcpy(p->na, vpd + na, min_t(unsigned int, na_len, MACADDR_LEN));
2808 dev_err(adapter->pdev_dev, "error reading VPD\n");
2816 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2817 * @adapter: adapter to read
2818 * @p: where to store the parameters
2820 * Reads card parameters stored in VPD EEPROM and retrieves the Core
2821 * Clock. This can only be called after a connection to the firmware
2824 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2826 u32 cclk_param, cclk_val;
2829 /* Grab the raw VPD parameters.
2831 ret = t4_get_raw_vpd_params(adapter, p);
2835 /* Ask firmware for the Core Clock since it knows how to translate the
2836 * Reference Clock ('V2') VPD field into a Core Clock value ...
2838 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2839 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
2840 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2841 1, &cclk_param, &cclk_val);
2851 * t4_get_pfres - retrieve VF resource limits
2852 * @adapter: the adapter
2854 * Retrieves configured resource limits and capabilities for a physical
2855 * function. The results are stored in @adapter->pfres.
2857 int t4_get_pfres(struct adapter *adapter)
2859 struct pf_resources *pfres = &adapter->params.pfres;
2860 struct fw_pfvf_cmd cmd, rpl;
2864 /* Execute PFVF Read command to get VF resource limits; bail out early
2865 * with error on command failure.
2867 memset(&cmd, 0, sizeof(cmd));
2868 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
2871 FW_PFVF_CMD_PFN_V(adapter->pf) |
2872 FW_PFVF_CMD_VFN_V(0));
2873 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2874 v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
2875 if (v != FW_SUCCESS)
2878 /* Extract PF resource limits and return success.
2880 word = be32_to_cpu(rpl.niqflint_niq);
2881 pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
2882 pfres->niq = FW_PFVF_CMD_NIQ_G(word);
2884 word = be32_to_cpu(rpl.type_to_neq);
2885 pfres->neq = FW_PFVF_CMD_NEQ_G(word);
2886 pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
2888 word = be32_to_cpu(rpl.tc_to_nexactf);
2889 pfres->tc = FW_PFVF_CMD_TC_G(word);
2890 pfres->nvi = FW_PFVF_CMD_NVI_G(word);
2891 pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
2893 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
2894 pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
2895 pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
2896 pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
2901 /* serial flash and firmware constants */
2903 SF_ATTEMPTS = 10, /* max retries for SF operations */
2905 /* flash command opcodes */
2906 SF_PROG_PAGE = 2, /* program page */
2907 SF_WR_DISABLE = 4, /* disable writes */
2908 SF_RD_STATUS = 5, /* read status register */
2909 SF_WR_ENABLE = 6, /* enable writes */
2910 SF_RD_DATA_FAST = 0xb, /* read flash */
2911 SF_RD_ID = 0x9f, /* read ID */
2912 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2916 * sf1_read - read data from the serial flash
2917 * @adapter: the adapter
2918 * @byte_cnt: number of bytes to read
2919 * @cont: whether another operation will be chained
2920 * @lock: whether to lock SF for PL access only
2921 * @valp: where to store the read data
2923 * Reads up to 4 bytes of data from the serial flash. The location of
2924 * the read needs to be specified prior to calling this by issuing the
2925 * appropriate commands to the serial flash.
2927 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2928 int lock, u32 *valp)
2932 if (!byte_cnt || byte_cnt > 4)
2934 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2936 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2937 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2938 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2940 *valp = t4_read_reg(adapter, SF_DATA_A);
2945 * sf1_write - write data to the serial flash
2946 * @adapter: the adapter
2947 * @byte_cnt: number of bytes to write
2948 * @cont: whether another operation will be chained
2949 * @lock: whether to lock SF for PL access only
2950 * @val: value to write
2952 * Writes up to 4 bytes of data to the serial flash. The location of
2953 * the write needs to be specified prior to calling this by issuing the
2954 * appropriate commands to the serial flash.
2956 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2959 if (!byte_cnt || byte_cnt > 4)
2961 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2963 t4_write_reg(adapter, SF_DATA_A, val);
2964 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2965 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2966 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2970 * flash_wait_op - wait for a flash operation to complete
2971 * @adapter: the adapter
2972 * @attempts: max number of polls of the status register
2973 * @delay: delay between polls in ms
2975 * Wait for a flash operation to complete by polling the status register.
2977 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
2983 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
2984 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
2988 if (--attempts == 0)
2996 * t4_read_flash - read words from serial flash
2997 * @adapter: the adapter
2998 * @addr: the start address for the read
2999 * @nwords: how many 32-bit words to read
3000 * @data: where to store the read data
3001 * @byte_oriented: whether to store data as bytes or as words
3003 * Read the specified number of 32-bit words from the serial flash.
3004 * If @byte_oriented is set the read data is stored as a byte array
3005 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3006 * natural endianness.
3008 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3009 unsigned int nwords, u32 *data, int byte_oriented)
3013 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3016 addr = swab32(addr) | SF_RD_DATA_FAST;
3018 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3019 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3022 for ( ; nwords; nwords--, data++) {
3023 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3025 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3029 *data = (__force __u32)(cpu_to_be32(*data));
3035 * t4_write_flash - write up to a page of data to the serial flash
3036 * @adapter: the adapter
3037 * @addr: the start address to write
3038 * @n: length of data to write in bytes
3039 * @data: the data to write
3040 * @byte_oriented: whether to store data as bytes or as words
3042 * Writes up to a page of data (256 bytes) to the serial flash starting
3043 * at the given address. All the data must be written to the same page.
3044 * If @byte_oriented is set the write data is stored as byte stream
3045 * (i.e. matches what on disk), otherwise in big-endian.
3047 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
3048 unsigned int n, const u8 *data, bool byte_oriented)
3050 unsigned int i, c, left, val, offset = addr & 0xff;
3054 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3057 val = swab32(addr) | SF_PROG_PAGE;
3059 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3060 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3063 for (left = n; left; left -= c, data += c) {
3065 for (val = 0, i = 0; i < c; ++i) {
3067 val = (val << 8) + data[i];
3069 val = (val << 8) + data[c - i - 1];
3072 ret = sf1_write(adapter, c, c != left, 1, val);
3076 ret = flash_wait_op(adapter, 8, 1);
3080 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3082 /* Read the page to verify the write succeeded */
3083 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3088 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3089 dev_err(adapter->pdev_dev,
3090 "failed to correctly write the flash page at %#x\n",
3097 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3102 * t4_get_fw_version - read the firmware version
3103 * @adapter: the adapter
3104 * @vers: where to place the version
3106 * Reads the FW version from flash.
3108 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3110 return t4_read_flash(adapter, FLASH_FW_START +
3111 offsetof(struct fw_hdr, fw_ver), 1,
3116 * t4_get_bs_version - read the firmware bootstrap version
3117 * @adapter: the adapter
3118 * @vers: where to place the version
3120 * Reads the FW Bootstrap version from flash.
3122 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3124 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3125 offsetof(struct fw_hdr, fw_ver), 1,
3130 * t4_get_tp_version - read the TP microcode version
3131 * @adapter: the adapter
3132 * @vers: where to place the version
3134 * Reads the TP microcode version from flash.
3136 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3138 return t4_read_flash(adapter, FLASH_FW_START +
3139 offsetof(struct fw_hdr, tp_microcode_ver),
3144 * t4_get_exprom_version - return the Expansion ROM version (if any)
3145 * @adap: the adapter
3146 * @vers: where to place the version
3148 * Reads the Expansion ROM header from FLASH and returns the version
3149 * number (if present) through the @vers return value pointer. We return
3150 * this in the Firmware Version Format since it's convenient. Return
3151 * 0 on success, -ENOENT if no Expansion ROM is present.
3153 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3155 struct exprom_header {
3156 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3157 unsigned char hdr_ver[4]; /* Expansion ROM version */
3159 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3163 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3164 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3169 hdr = (struct exprom_header *)exprom_header_buf;
3170 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3173 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3174 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3175 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3176 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3181 * t4_get_vpd_version - return the VPD version
3182 * @adapter: the adapter
3183 * @vers: where to place the version
3185 * Reads the VPD via the Firmware interface (thus this can only be called
3186 * once we're ready to issue Firmware commands). The format of the
3187 * VPD version is adapter specific. Returns 0 on success, an error on
3190 * Note that early versions of the Firmware didn't include the ability
3191 * to retrieve the VPD version, so we zero-out the return-value parameter
3192 * in that case to avoid leaving it with garbage in it.
3194 * Also note that the Firmware will return its cached copy of the VPD
3195 * Revision ID, not the actual Revision ID as written in the Serial
3196 * EEPROM. This is only an issue if a new VPD has been written and the
3197 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3198 * to defer calling this routine till after a FW_RESET_CMD has been issued
3199 * if the Host Driver will be performing a full adapter initialization.
3201 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3206 vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3207 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
3208 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3209 1, &vpdrev_param, vers);
3216 * t4_get_scfg_version - return the Serial Configuration version
3217 * @adapter: the adapter
3218 * @vers: where to place the version
3220 * Reads the Serial Configuration Version via the Firmware interface
3221 * (thus this can only be called once we're ready to issue Firmware
3222 * commands). The format of the Serial Configuration version is
3223 * adapter specific. Returns 0 on success, an error on failure.
3225 * Note that early versions of the Firmware didn't include the ability
3226 * to retrieve the Serial Configuration version, so we zero-out the
3227 * return-value parameter in that case to avoid leaving it with
3230 * Also note that the Firmware will return its cached copy of the Serial
3231 * Initialization Revision ID, not the actual Revision ID as written in
3232 * the Serial EEPROM. This is only an issue if a new VPD has been written
3233 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3234 * it's best to defer calling this routine till after a FW_RESET_CMD has
3235 * been issued if the Host Driver will be performing a full adapter
3238 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3243 scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3244 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
3245 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3246 1, &scfgrev_param, vers);
3253 * t4_get_version_info - extract various chip/firmware version information
3254 * @adapter: the adapter
3256 * Reads various chip/firmware version numbers and stores them into the
3257 * adapter Adapter Parameters structure. If any of the efforts fails
3258 * the first failure will be returned, but all of the version numbers
3261 int t4_get_version_info(struct adapter *adapter)
3265 #define FIRST_RET(__getvinfo) \
3267 int __ret = __getvinfo; \
3268 if (__ret && !ret) \
3272 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3273 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3274 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3275 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3276 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3277 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3284 * t4_dump_version_info - dump all of the adapter configuration IDs
3285 * @adapter: the adapter
3287 * Dumps all of the various bits of adapter configuration version/revision
3288 * IDs information. This is typically called at some point after
3289 * t4_get_version_info() has been called.
3291 void t4_dump_version_info(struct adapter *adapter)
3293 /* Device information */
3294 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
3295 adapter->params.vpd.id,
3296 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3297 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
3298 adapter->params.vpd.sn, adapter->params.vpd.pn);
3300 /* Firmware Version */
3301 if (!adapter->params.fw_vers)
3302 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
3304 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
3305 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
3306 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
3307 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
3308 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
3310 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3311 * Firmware, so dev_info() is more appropriate here.)
3313 if (!adapter->params.bs_vers)
3314 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
3316 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
3317 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
3318 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
3319 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
3320 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
3322 /* TP Microcode Version */
3323 if (!adapter->params.tp_vers)
3324 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
3326 dev_info(adapter->pdev_dev,
3327 "TP Microcode version: %u.%u.%u.%u\n",
3328 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
3329 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
3330 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
3331 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
3333 /* Expansion ROM version */
3334 if (!adapter->params.er_vers)
3335 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
3337 dev_info(adapter->pdev_dev,
3338 "Expansion ROM version: %u.%u.%u.%u\n",
3339 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
3340 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
3341 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
3342 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
3344 /* Serial Configuration version */
3345 dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
3346 adapter->params.scfg_vers);
3349 dev_info(adapter->pdev_dev, "VPD version: %#x\n",
3350 adapter->params.vpd_vers);
3354 * t4_check_fw_version - check if the FW is supported with this driver
3355 * @adap: the adapter
3357 * Checks if an adapter's FW is compatible with the driver. Returns 0
3358 * if there's exact match, a negative error if the version could not be
3359 * read or there's a major version mismatch
3361 int t4_check_fw_version(struct adapter *adap)
3363 int i, ret, major, minor, micro;
3364 int exp_major, exp_minor, exp_micro;
3365 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3367 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3368 /* Try multiple times before returning error */
3369 for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3370 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3375 major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3376 minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3377 micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3379 switch (chip_version) {
3381 exp_major = T4FW_MIN_VERSION_MAJOR;
3382 exp_minor = T4FW_MIN_VERSION_MINOR;
3383 exp_micro = T4FW_MIN_VERSION_MICRO;
3386 exp_major = T5FW_MIN_VERSION_MAJOR;
3387 exp_minor = T5FW_MIN_VERSION_MINOR;
3388 exp_micro = T5FW_MIN_VERSION_MICRO;
3391 exp_major = T6FW_MIN_VERSION_MAJOR;
3392 exp_minor = T6FW_MIN_VERSION_MINOR;
3393 exp_micro = T6FW_MIN_VERSION_MICRO;
3396 dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3401 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3402 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3403 dev_err(adap->pdev_dev,
3404 "Card has firmware version %u.%u.%u, minimum "
3405 "supported firmware is %u.%u.%u.\n", major, minor,
3406 micro, exp_major, exp_minor, exp_micro);
3412 /* Is the given firmware API compatible with the one the driver was compiled
3415 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3418 /* short circuit if it's the exact same firmware version */
3419 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3422 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3423 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3424 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3431 /* The firmware in the filesystem is usable, but should it be installed?
3432 * This routine explains itself in detail if it indicates the filesystem
3433 * firmware should be installed.
3435 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3440 if (!card_fw_usable) {
3441 reason = "incompatible or unusable";
3446 reason = "older than the version supported with this driver";
3453 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3454 "installing firmware %u.%u.%u.%u on card.\n",
3455 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3456 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3457 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3458 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3463 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3464 const u8 *fw_data, unsigned int fw_size,
3465 struct fw_hdr *card_fw, enum dev_state state,
3468 int ret, card_fw_usable, fs_fw_usable;
3469 const struct fw_hdr *fs_fw;
3470 const struct fw_hdr *drv_fw;
3472 drv_fw = &fw_info->fw_hdr;
3474 /* Read the header of the firmware on the card */
3475 ret = t4_read_flash(adap, FLASH_FW_START,
3476 sizeof(*card_fw) / sizeof(uint32_t),
3477 (uint32_t *)card_fw, 1);
3479 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3481 dev_err(adap->pdev_dev,
3482 "Unable to read card's firmware header: %d\n", ret);
3486 if (fw_data != NULL) {
3487 fs_fw = (const void *)fw_data;
3488 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3494 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3495 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3496 /* Common case: the firmware on the card is an exact match and
3497 * the filesystem one is an exact match too, or the filesystem
3498 * one is absent/incompatible.
3500 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3501 should_install_fs_fw(adap, card_fw_usable,
3502 be32_to_cpu(fs_fw->fw_ver),
3503 be32_to_cpu(card_fw->fw_ver))) {
3504 ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
3507 dev_err(adap->pdev_dev,
3508 "failed to install firmware: %d\n", ret);
3512 /* Installed successfully, update the cached header too. */
3515 *reset = 0; /* already reset as part of load_fw */
3518 if (!card_fw_usable) {
3521 d = be32_to_cpu(drv_fw->fw_ver);
3522 c = be32_to_cpu(card_fw->fw_ver);
3523 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3525 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3527 "driver compiled with %d.%d.%d.%d, "
3528 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3530 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3531 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3532 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3533 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3534 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3535 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3540 /* We're using whatever's on the card and it's known to be good. */
3541 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3542 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3549 * t4_flash_erase_sectors - erase a range of flash sectors
3550 * @adapter: the adapter
3551 * @start: the first sector to erase
3552 * @end: the last sector to erase
3554 * Erases the sectors in the given inclusive range.
3556 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3560 if (end >= adapter->params.sf_nsec)
3563 while (start <= end) {
3564 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3565 (ret = sf1_write(adapter, 4, 0, 1,
3566 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3567 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3568 dev_err(adapter->pdev_dev,
3569 "erase of flash sector %d failed, error %d\n",
3575 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3580 * t4_flash_cfg_addr - return the address of the flash configuration file
3581 * @adapter: the adapter
3583 * Return the address within the flash where the Firmware Configuration
3586 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3588 if (adapter->params.sf_size == 0x100000)
3589 return FLASH_FPGA_CFG_START;
3591 return FLASH_CFG_START;
3594 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
3595 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3596 * and emit an error message for mismatched firmware to save our caller the
3599 static bool t4_fw_matches_chip(const struct adapter *adap,
3600 const struct fw_hdr *hdr)
3602 /* The expression below will return FALSE for any unsupported adapter
3603 * which will keep us "honest" in the future ...
3605 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3606 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3607 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
3610 dev_err(adap->pdev_dev,
3611 "FW image (%d) is not suitable for this adapter (%d)\n",
3612 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3617 * t4_load_fw - download firmware
3618 * @adap: the adapter
3619 * @fw_data: the firmware image to write
3622 * Write the supplied firmware image to the card's serial flash.
3624 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3629 u8 first_page[SF_PAGE_SIZE];
3630 const __be32 *p = (const __be32 *)fw_data;
3631 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3632 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3633 unsigned int fw_start_sec = FLASH_FW_START_SEC;
3634 unsigned int fw_size = FLASH_FW_MAX_SIZE;
3635 unsigned int fw_start = FLASH_FW_START;
3638 dev_err(adap->pdev_dev, "FW image has no data\n");
3642 dev_err(adap->pdev_dev,
3643 "FW image size not multiple of 512 bytes\n");
3646 if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
3647 dev_err(adap->pdev_dev,
3648 "FW image size differs from size in FW header\n");
3651 if (size > fw_size) {
3652 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3656 if (!t4_fw_matches_chip(adap, hdr))
3659 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3660 csum += be32_to_cpu(p[i]);
3662 if (csum != 0xffffffff) {
3663 dev_err(adap->pdev_dev,
3664 "corrupted firmware image, checksum %#x\n", csum);
3668 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3669 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3674 * We write the correct version at the end so the driver can see a bad
3675 * version if the FW write fails. Start by writing a copy of the
3676 * first page with a bad version.
3678 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3679 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3680 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
3685 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3686 addr += SF_PAGE_SIZE;
3687 fw_data += SF_PAGE_SIZE;
3688 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
3693 ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
3694 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
3698 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3701 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3706 * t4_phy_fw_ver - return current PHY firmware version
3707 * @adap: the adapter
3708 * @phy_fw_ver: return value buffer for PHY firmware version
3710 * Returns the current version of external PHY firmware on the
3713 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3718 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3719 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3720 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3721 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
3722 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3731 * t4_load_phy_fw - download port PHY firmware
3732 * @adap: the adapter
3733 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
3734 * @phy_fw_version: function to check PHY firmware versions
3735 * @phy_fw_data: the PHY firmware image to write
3736 * @phy_fw_size: image size
3738 * Transfer the specified PHY firmware to the adapter. If a non-NULL
3739 * @phy_fw_version is supplied, then it will be used to determine if
3740 * it's necessary to perform the transfer by comparing the version
3741 * of any existing adapter PHY firmware with that of the passed in
3742 * PHY firmware image.
3744 * A negative error number will be returned if an error occurs. If
3745 * version number support is available and there's no need to upgrade
3746 * the firmware, 0 will be returned. If firmware is successfully
3747 * transferred to the adapter, 1 will be returned.
3749 * NOTE: some adapters only have local RAM to store the PHY firmware. As
3750 * a result, a RESET of the adapter would cause that RAM to lose its
3751 * contents. Thus, loading PHY firmware on such adapters must happen
3752 * after any FW_RESET_CMDs ...
3754 int t4_load_phy_fw(struct adapter *adap, int win,
3755 int (*phy_fw_version)(const u8 *, size_t),
3756 const u8 *phy_fw_data, size_t phy_fw_size)
3758 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3759 unsigned long mtype = 0, maddr = 0;
3763 /* If we have version number support, then check to see if the adapter
3764 * already has up-to-date PHY firmware loaded.
3766 if (phy_fw_version) {
3767 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3768 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3772 if (cur_phy_fw_ver >= new_phy_fw_vers) {
3773 CH_WARN(adap, "PHY Firmware already up-to-date, "
3774 "version %#x\n", cur_phy_fw_ver);
3779 /* Ask the firmware where it wants us to copy the PHY firmware image.
3780 * The size of the file requires a special version of the READ command
3781 * which will pass the file size via the values field in PARAMS_CMD and
3782 * retrieve the return value from firmware and place it in the same
3785 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3786 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3787 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3788 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3790 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
3791 ¶m, &val, 1, true);
3795 maddr = (val & 0xff) << 16;
3797 /* Copy the supplied PHY Firmware image to the adapter memory location
3798 * allocated by the adapter firmware.
3800 spin_lock_bh(&adap->win0_lock);
3801 ret = t4_memory_rw(adap, win, mtype, maddr,
3802 phy_fw_size, (__be32 *)phy_fw_data,
3804 spin_unlock_bh(&adap->win0_lock);
3808 /* Tell the firmware that the PHY firmware image has been written to
3809 * RAM and it can now start copying it over to the PHYs. The chip
3810 * firmware will RESET the affected PHYs as part of this operation
3811 * leaving them running the new PHY firmware image.
3813 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3814 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3815 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3816 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3817 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
3818 ¶m, &val, 30000);
3822 /* If we have version number support, then check to see that the new
3823 * firmware got loaded properly.
3825 if (phy_fw_version) {
3826 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3830 if (cur_phy_fw_ver != new_phy_fw_vers) {
3831 CH_WARN(adap, "PHY Firmware did not update: "
3832 "version on adapter %#x, "
3833 "version flashed %#x\n",
3834 cur_phy_fw_ver, new_phy_fw_vers);
3843 * t4_fwcache - firmware cache operation
3844 * @adap: the adapter
3845 * @op : the operation (flush or flush and invalidate)
3847 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3849 struct fw_params_cmd c;
3851 memset(&c, 0, sizeof(c));
3853 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3854 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3855 FW_PARAMS_CMD_PFN_V(adap->pf) |
3856 FW_PARAMS_CMD_VFN_V(0));
3857 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3859 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3860 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3861 c.param[0].val = cpu_to_be32(op);
3863 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3866 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3867 unsigned int *pif_req_wrptr,
3868 unsigned int *pif_rsp_wrptr)
3871 u32 cfg, val, req, rsp;
3873 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3874 if (cfg & LADBGEN_F)
3875 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3877 val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3878 req = POLADBGWRPTR_G(val);
3879 rsp = PILADBGWRPTR_G(val);
3881 *pif_req_wrptr = req;
3883 *pif_rsp_wrptr = rsp;
3885 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3886 for (j = 0; j < 6; j++) {
3887 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3888 PILADBGRDPTR_V(rsp));
3889 *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3890 *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3894 req = (req + 2) & POLADBGRDPTR_M;
3895 rsp = (rsp + 2) & PILADBGRDPTR_M;
3897 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3900 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3905 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3906 if (cfg & LADBGEN_F)
3907 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3909 for (i = 0; i < CIM_MALA_SIZE; i++) {
3910 for (j = 0; j < 5; j++) {
3912 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3913 PILADBGRDPTR_V(idx));
3914 *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3915 *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3918 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3921 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3925 for (i = 0; i < 8; i++) {
3926 u32 *p = la_buf + i;
3928 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3929 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3930 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3931 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3932 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3936 /* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
3937 * Capabilities which we control with separate controls -- see, for instance,
3938 * Pause Frames and Forward Error Correction. In order to determine what the
3939 * full set of Advertised Port Capabilities are, the base Advertised Port
3940 * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
3941 * Port Capabilities associated with those other controls. See
3942 * t4_link_acaps() for how this is done.
3944 #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
3948 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3949 * @caps16: a 16-bit Port Capabilities value
3951 * Returns the equivalent 32-bit Port Capabilities value.
3953 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
3955 fw_port_cap32_t caps32 = 0;
3957 #define CAP16_TO_CAP32(__cap) \
3959 if (caps16 & FW_PORT_CAP_##__cap) \
3960 caps32 |= FW_PORT_CAP32_##__cap; \
3963 CAP16_TO_CAP32(SPEED_100M);
3964 CAP16_TO_CAP32(SPEED_1G);
3965 CAP16_TO_CAP32(SPEED_25G);
3966 CAP16_TO_CAP32(SPEED_10G);
3967 CAP16_TO_CAP32(SPEED_40G);
3968 CAP16_TO_CAP32(SPEED_100G);
3969 CAP16_TO_CAP32(FC_RX);
3970 CAP16_TO_CAP32(FC_TX);
3971 CAP16_TO_CAP32(ANEG);
3972 CAP16_TO_CAP32(FORCE_PAUSE);
3973 CAP16_TO_CAP32(MDIAUTO);
3974 CAP16_TO_CAP32(MDISTRAIGHT);
3975 CAP16_TO_CAP32(FEC_RS);
3976 CAP16_TO_CAP32(FEC_BASER_RS);
3977 CAP16_TO_CAP32(802_3_PAUSE);
3978 CAP16_TO_CAP32(802_3_ASM_DIR);
3980 #undef CAP16_TO_CAP32
3986 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3987 * @caps32: a 32-bit Port Capabilities value
3989 * Returns the equivalent 16-bit Port Capabilities value. Note that
3990 * not all 32-bit Port Capabilities can be represented in the 16-bit
3991 * Port Capabilities and some fields/values may not make it.
3993 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
3995 fw_port_cap16_t caps16 = 0;
3997 #define CAP32_TO_CAP16(__cap) \
3999 if (caps32 & FW_PORT_CAP32_##__cap) \
4000 caps16 |= FW_PORT_CAP_##__cap; \
4003 CAP32_TO_CAP16(SPEED_100M);
4004 CAP32_TO_CAP16(SPEED_1G);
4005 CAP32_TO_CAP16(SPEED_10G);
4006 CAP32_TO_CAP16(SPEED_25G);
4007 CAP32_TO_CAP16(SPEED_40G);
4008 CAP32_TO_CAP16(SPEED_100G);
4009 CAP32_TO_CAP16(FC_RX);
4010 CAP32_TO_CAP16(FC_TX);
4011 CAP32_TO_CAP16(802_3_PAUSE);
4012 CAP32_TO_CAP16(802_3_ASM_DIR);
4013 CAP32_TO_CAP16(ANEG);
4014 CAP32_TO_CAP16(FORCE_PAUSE);
4015 CAP32_TO_CAP16(MDIAUTO);
4016 CAP32_TO_CAP16(MDISTRAIGHT);
4017 CAP32_TO_CAP16(FEC_RS);
4018 CAP32_TO_CAP16(FEC_BASER_RS);
4020 #undef CAP32_TO_CAP16
4025 /* Translate Firmware Port Capabilities Pause specification to Common Code */
4026 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
4028 enum cc_pause cc_pause = 0;
4030 if (fw_pause & FW_PORT_CAP32_FC_RX)
4031 cc_pause |= PAUSE_RX;
4032 if (fw_pause & FW_PORT_CAP32_FC_TX)
4033 cc_pause |= PAUSE_TX;
4038 /* Translate Common Code Pause specification into Firmware Port Capabilities */
4039 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
4041 /* Translate orthogonal RX/TX Pause Controls for L1 Configure
4044 fw_port_cap32_t fw_pause = 0;
4046 if (cc_pause & PAUSE_RX)
4047 fw_pause |= FW_PORT_CAP32_FC_RX;
4048 if (cc_pause & PAUSE_TX)
4049 fw_pause |= FW_PORT_CAP32_FC_TX;
4050 if (!(cc_pause & PAUSE_AUTONEG))
4051 fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
4053 /* Translate orthogonal Pause controls into IEEE 802.3 Pause,
4054 * Asymmetrical Pause for use in reporting to upper layer OS code, etc.
4055 * Note that these bits are ignored in L1 Configure commands.
4057 if (cc_pause & PAUSE_RX) {
4058 if (cc_pause & PAUSE_TX)
4059 fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
4061 fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
4062 FW_PORT_CAP32_802_3_PAUSE;
4063 } else if (cc_pause & PAUSE_TX) {
4064 fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
4070 /* Translate Firmware Forward Error Correction specification to Common Code */
4071 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
4073 enum cc_fec cc_fec = 0;
4075 if (fw_fec & FW_PORT_CAP32_FEC_RS)
4077 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
4078 cc_fec |= FEC_BASER_RS;
4083 /* Translate Common Code Forward Error Correction specification to Firmware */
4084 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
4086 fw_port_cap32_t fw_fec = 0;
4088 if (cc_fec & FEC_RS)
4089 fw_fec |= FW_PORT_CAP32_FEC_RS;
4090 if (cc_fec & FEC_BASER_RS)
4091 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
4097 * t4_link_acaps - compute Link Advertised Port Capabilities
4098 * @adapter: the adapter
4099 * @port: the Port ID
4100 * @lc: the Port's Link Configuration
4102 * Synthesize the Advertised Port Capabilities we'll be using based on
4103 * the base Advertised Port Capabilities (which have been filtered by
4104 * ADVERT_MASK) plus the individual controls for things like Pause
4105 * Frames, Forward Error Correction, MDI, etc.
4107 fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
4108 struct link_config *lc)
4110 fw_port_cap32_t fw_fc, fw_fec, acaps;
4111 unsigned int fw_mdi;
4114 fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
4116 /* Convert driver coding of Pause Frame Flow Control settings into the
4119 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4121 /* Convert Common Code Forward Error Control settings into the
4122 * Firmware's API. If the current Requested FEC has "Automatic"
4123 * (IEEE 802.3) specified, then we use whatever the Firmware
4124 * sent us as part of its IEEE 802.3-based interpretation of
4125 * the Transceiver Module EPROM FEC parameters. Otherwise we
4126 * use whatever is in the current Requested FEC settings.
4128 if (lc->requested_fec & FEC_AUTO)
4129 cc_fec = fwcap_to_cc_fec(lc->def_acaps);
4131 cc_fec = lc->requested_fec;
4132 fw_fec = cc_to_fwcap_fec(cc_fec);
4134 /* Figure out what our Requested Port Capabilities are going to be.
4135 * Note parallel structure in t4_handle_get_port_info() and
4136 * init_link_config().
4138 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4139 acaps = lc->acaps | fw_fc | fw_fec;
4140 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4142 } else if (lc->autoneg == AUTONEG_DISABLE) {
4143 acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4144 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4147 acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
4150 /* Some Requested Port Capabilities are trivially wrong if they exceed
4151 * the Physical Port Capabilities. We can check that here and provide
4152 * moderately useful feedback in the system log.
4154 * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
4155 * we need to exclude this from this check in order to maintain
4158 if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
4159 dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
4168 * t4_link_l1cfg_core - apply link configuration to MAC/PHY
4169 * @adapter: the adapter
4170 * @mbox: the Firmware Mailbox to use
4171 * @port: the Port ID
4172 * @lc: the Port's Link Configuration
4173 * @sleep_ok: if true we may sleep while awaiting command completion
4174 * @timeout: time to wait for command to finish before timing out
4175 * (negative implies @sleep_ok=false)
4177 * Set up a port's MAC and PHY according to a desired link configuration.
4178 * - If the PHY can auto-negotiate first decide what to advertise, then
4179 * enable/disable auto-negotiation as desired, and reset.
4180 * - If the PHY does not auto-negotiate just reset it.
4181 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4182 * otherwise do it later based on the outcome of auto-negotiation.
4184 int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
4185 unsigned int port, struct link_config *lc,
4186 u8 sleep_ok, int timeout)
4188 unsigned int fw_caps = adapter->params.fw_caps_support;
4189 struct fw_port_cmd cmd;
4190 fw_port_cap32_t rcap;
4193 if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
4194 lc->autoneg == AUTONEG_ENABLE) {
4198 /* Compute our Requested Port Capabilities and send that on to the
4201 rcap = t4_link_acaps(adapter, port, lc);
4202 memset(&cmd, 0, sizeof(cmd));
4203 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4204 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4205 FW_PORT_CMD_PORTID_V(port));
4206 cmd.action_to_len16 =
4207 cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4208 ? FW_PORT_ACTION_L1_CFG
4209 : FW_PORT_ACTION_L1_CFG32) |
4211 if (fw_caps == FW_CAPS16)
4212 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4214 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4216 ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
4219 /* Unfortunately, even if the Requested Port Capabilities "fit" within
4220 * the Physical Port Capabilities, some combinations of features may
4221 * still not be legal. For example, 40Gb/s and Reed-Solomon Forward
4222 * Error Correction. So if the Firmware rejects the L1 Configure
4223 * request, flag that here.
4226 dev_err(adapter->pdev_dev,
4227 "Requested Port Capabilities %#x rejected, error %d\n",
4235 * t4_restart_aneg - restart autonegotiation
4236 * @adap: the adapter
4237 * @mbox: mbox to use for the FW command
4238 * @port: the port id
4240 * Restarts autonegotiation for the selected port.
4242 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4244 unsigned int fw_caps = adap->params.fw_caps_support;
4245 struct fw_port_cmd c;
4247 memset(&c, 0, sizeof(c));
4248 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4249 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4250 FW_PORT_CMD_PORTID_V(port));
4252 cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4253 ? FW_PORT_ACTION_L1_CFG
4254 : FW_PORT_ACTION_L1_CFG32) |
4256 if (fw_caps == FW_CAPS16)
4257 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
4259 c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
4260 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4263 typedef void (*int_handler_t)(struct adapter *adap);
4266 unsigned int mask; /* bits to check in interrupt status */
4267 const char *msg; /* message to print or NULL */
4268 short stat_idx; /* stat counter to increment or -1 */
4269 unsigned short fatal; /* whether the condition reported is fatal */
4270 int_handler_t int_handler; /* platform-specific int handler */
4274 * t4_handle_intr_status - table driven interrupt handler
4275 * @adapter: the adapter that generated the interrupt
4276 * @reg: the interrupt status register to process
4277 * @acts: table of interrupt actions
4279 * A table driven interrupt handler that applies a set of masks to an
4280 * interrupt status word and performs the corresponding actions if the
4281 * interrupts described by the mask have occurred. The actions include
4282 * optionally emitting a warning or alert message. The table is terminated
4283 * by an entry specifying mask 0. Returns the number of fatal interrupt
4286 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4287 const struct intr_info *acts)
4290 unsigned int mask = 0;
4291 unsigned int status = t4_read_reg(adapter, reg);
4293 for ( ; acts->mask; ++acts) {
4294 if (!(status & acts->mask))
4298 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4299 status & acts->mask);
4300 } else if (acts->msg && printk_ratelimit())
4301 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4302 status & acts->mask);
4303 if (acts->int_handler)
4304 acts->int_handler(adapter);
4308 if (status) /* clear processed interrupts */
4309 t4_write_reg(adapter, reg, status);
4314 * Interrupt handler for the PCIE module.
4316 static void pcie_intr_handler(struct adapter *adapter)
4318 static const struct intr_info sysbus_intr_info[] = {
4319 { RNPP_F, "RXNP array parity error", -1, 1 },
4320 { RPCP_F, "RXPC array parity error", -1, 1 },
4321 { RCIP_F, "RXCIF array parity error", -1, 1 },
4322 { RCCP_F, "Rx completions control array parity error", -1, 1 },
4323 { RFTP_F, "RXFT array parity error", -1, 1 },
4326 static const struct intr_info pcie_port_intr_info[] = {
4327 { TPCP_F, "TXPC array parity error", -1, 1 },
4328 { TNPP_F, "TXNP array parity error", -1, 1 },
4329 { TFTP_F, "TXFT array parity error", -1, 1 },
4330 { TCAP_F, "TXCA array parity error", -1, 1 },
4331 { TCIP_F, "TXCIF array parity error", -1, 1 },
4332 { RCAP_F, "RXCA array parity error", -1, 1 },
4333 { OTDD_F, "outbound request TLP discarded", -1, 1 },
4334 { RDPE_F, "Rx data parity error", -1, 1 },
4335 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
4338 static const struct intr_info pcie_intr_info[] = {
4339 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
4340 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
4341 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
4342 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4343 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4344 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4345 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4346 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
4347 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
4348 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4349 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
4350 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4351 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4352 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
4353 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4354 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4355 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
4356 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4357 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4358 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4359 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4360 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
4361 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
4362 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4363 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
4364 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
4365 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
4366 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
4367 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
4368 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
4373 static struct intr_info t5_pcie_intr_info[] = {
4374 { MSTGRPPERR_F, "Master Response Read Queue parity error",
4376 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
4377 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
4378 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4379 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4380 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4381 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4382 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
4384 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
4386 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4387 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
4388 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4389 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4390 { DREQWRPERR_F, "PCI DMA channel write request parity error",
4392 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4393 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4394 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
4395 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4396 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4397 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4398 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4399 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
4400 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
4401 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4402 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
4404 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
4406 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
4407 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
4408 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4409 { READRSPERR_F, "Outbound read error", -1, 0 },
4415 if (is_t4(adapter->params.chip))
4416 fat = t4_handle_intr_status(adapter,
4417 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
4419 t4_handle_intr_status(adapter,
4420 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
4421 pcie_port_intr_info) +
4422 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4425 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4429 t4_fatal_err(adapter);
4433 * TP interrupt handler.
4435 static void tp_intr_handler(struct adapter *adapter)
4437 static const struct intr_info tp_intr_info[] = {
4438 { 0x3fffffff, "TP parity error", -1, 1 },
4439 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
4443 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
4444 t4_fatal_err(adapter);
4448 * SGE interrupt handler.
4450 static void sge_intr_handler(struct adapter *adapter)
4455 static const struct intr_info sge_intr_info[] = {
4456 { ERR_CPL_EXCEED_IQE_SIZE_F,
4457 "SGE received CPL exceeding IQE size", -1, 1 },
4458 { ERR_INVALID_CIDX_INC_F,
4459 "SGE GTS CIDX increment too large", -1, 0 },
4460 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
4461 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
4462 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
4463 "SGE IQID > 1023 received CPL for FL", -1, 0 },
4464 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
4466 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
4468 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
4470 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
4472 { ERR_ING_CTXT_PRIO_F,
4473 "SGE too many priority ingress contexts", -1, 0 },
4474 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
4475 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
4479 static struct intr_info t4t5_sge_intr_info[] = {
4480 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
4481 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
4482 { ERR_EGR_CTXT_PRIO_F,
4483 "SGE too many priority egress contexts", -1, 0 },
4487 perr = t4_read_reg(adapter, SGE_INT_CAUSE1_A);
4490 dev_alert(adapter->pdev_dev, "SGE Cause1 Parity Error %#x\n",
4494 perr = t4_read_reg(adapter, SGE_INT_CAUSE2_A);
4497 dev_alert(adapter->pdev_dev, "SGE Cause2 Parity Error %#x\n",
4501 if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
4502 perr = t4_read_reg(adapter, SGE_INT_CAUSE5_A);
4503 /* Parity error (CRC) for err_T_RxCRC is trivial, ignore it */
4504 perr &= ~ERR_T_RXCRC_F;
4507 dev_alert(adapter->pdev_dev,
4508 "SGE Cause5 Parity Error %#x\n", perr);
4512 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4513 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4514 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4515 t4t5_sge_intr_info);
4517 err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4518 if (err & ERROR_QID_VALID_F) {
4519 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4521 if (err & UNCAPTURED_ERROR_F)
4522 dev_err(adapter->pdev_dev,
4523 "SGE UNCAPTURED_ERROR set (clearing)\n");
4524 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4525 UNCAPTURED_ERROR_F);
4529 t4_fatal_err(adapter);
4532 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4533 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4534 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4535 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4538 * CIM interrupt handler.
4540 static void cim_intr_handler(struct adapter *adapter)
4542 static const struct intr_info cim_intr_info[] = {
4543 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4544 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4545 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4546 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4547 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4548 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4549 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
4550 { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
4553 static const struct intr_info cim_upintr_info[] = {
4554 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4555 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4556 { ILLWRINT_F, "CIM illegal write", -1, 1 },
4557 { ILLRDINT_F, "CIM illegal read", -1, 1 },
4558 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4559 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4560 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4561 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4562 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4563 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4564 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4565 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4566 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4567 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4568 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4569 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4570 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4571 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4572 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4573 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4574 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4575 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4576 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4577 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4578 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4579 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4580 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4581 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
4588 fw_err = t4_read_reg(adapter, PCIE_FW_A);
4589 if (fw_err & PCIE_FW_ERR_F)
4590 t4_report_fw_error(adapter);
4592 /* When the Firmware detects an internal error which normally
4593 * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4594 * in order to make sure the Host sees the Firmware Crash. So
4595 * if we have a Timer0 interrupt and don't see a Firmware Crash,
4596 * ignore the Timer0 interrupt.
4599 val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4600 if (val & TIMER0INT_F)
4601 if (!(fw_err & PCIE_FW_ERR_F) ||
4602 (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4603 t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4606 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
4608 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
4611 t4_fatal_err(adapter);
4615 * ULP RX interrupt handler.
4617 static void ulprx_intr_handler(struct adapter *adapter)
4619 static const struct intr_info ulprx_intr_info[] = {
4620 { 0x1800000, "ULPRX context error", -1, 1 },
4621 { 0x7fffff, "ULPRX parity error", -1, 1 },
4625 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
4626 t4_fatal_err(adapter);
4630 * ULP TX interrupt handler.
4632 static void ulptx_intr_handler(struct adapter *adapter)
4634 static const struct intr_info ulptx_intr_info[] = {
4635 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
4637 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
4639 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
4641 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
4643 { 0xfffffff, "ULPTX parity error", -1, 1 },
4647 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
4648 t4_fatal_err(adapter);
4652 * PM TX interrupt handler.
4654 static void pmtx_intr_handler(struct adapter *adapter)
4656 static const struct intr_info pmtx_intr_info[] = {
4657 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4658 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4659 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4660 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4661 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4662 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4663 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4665 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4666 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
4670 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
4671 t4_fatal_err(adapter);
4675 * PM RX interrupt handler.
4677 static void pmrx_intr_handler(struct adapter *adapter)
4679 static const struct intr_info pmrx_intr_info[] = {
4680 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4681 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4682 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4683 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4685 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4686 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
4690 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
4691 t4_fatal_err(adapter);
4695 * CPL switch interrupt handler.
4697 static void cplsw_intr_handler(struct adapter *adapter)
4699 static const struct intr_info cplsw_intr_info[] = {
4700 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4701 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4702 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4703 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4704 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4705 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
4709 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
4710 t4_fatal_err(adapter);
4714 * LE interrupt handler.
4716 static void le_intr_handler(struct adapter *adap)
4718 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
4719 static const struct intr_info le_intr_info[] = {
4720 { LIPMISS_F, "LE LIP miss", -1, 0 },
4721 { LIP0_F, "LE 0 LIP error", -1, 0 },
4722 { PARITYERR_F, "LE parity error", -1, 1 },
4723 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4724 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
4728 static struct intr_info t6_le_intr_info[] = {
4729 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4730 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4731 { CMDTIDERR_F, "LE cmd tid error", -1, 1 },
4732 { TCAMINTPERR_F, "LE parity error", -1, 1 },
4733 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4734 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4735 { HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 },
4739 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4740 (chip <= CHELSIO_T5) ?
4741 le_intr_info : t6_le_intr_info))
4746 * MPS interrupt handler.
4748 static void mps_intr_handler(struct adapter *adapter)
4750 static const struct intr_info mps_rx_intr_info[] = {
4751 { 0xffffff, "MPS Rx parity error", -1, 1 },
4754 static const struct intr_info mps_tx_intr_info[] = {
4755 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4756 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4757 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4759 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4761 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
4762 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4763 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4766 static const struct intr_info t6_mps_tx_intr_info[] = {
4767 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4768 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4769 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4771 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4773 /* MPS Tx Bubble is normal for T6 */
4774 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4775 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4778 static const struct intr_info mps_trc_intr_info[] = {
4779 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4780 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4782 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
4785 static const struct intr_info mps_stat_sram_intr_info[] = {
4786 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4789 static const struct intr_info mps_stat_tx_intr_info[] = {
4790 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4793 static const struct intr_info mps_stat_rx_intr_info[] = {
4794 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4797 static const struct intr_info mps_cls_intr_info[] = {
4798 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4799 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4800 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
4806 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
4808 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
4809 is_t6(adapter->params.chip)
4810 ? t6_mps_tx_intr_info
4811 : mps_tx_intr_info) +
4812 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
4813 mps_trc_intr_info) +
4814 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
4815 mps_stat_sram_intr_info) +
4816 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
4817 mps_stat_tx_intr_info) +
4818 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
4819 mps_stat_rx_intr_info) +
4820 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
4823 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4824 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
4826 t4_fatal_err(adapter);
4829 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4833 * EDC/MC interrupt handler.
4835 static void mem_intr_handler(struct adapter *adapter, int idx)
4837 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4839 unsigned int addr, cnt_addr, v;
4841 if (idx <= MEM_EDC1) {
4842 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4843 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
4844 } else if (idx == MEM_MC) {
4845 if (is_t4(adapter->params.chip)) {
4846 addr = MC_INT_CAUSE_A;
4847 cnt_addr = MC_ECC_STATUS_A;
4849 addr = MC_P_INT_CAUSE_A;
4850 cnt_addr = MC_P_ECC_STATUS_A;
4853 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4854 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
4857 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4858 if (v & PERR_INT_CAUSE_F)
4859 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4861 if (v & ECC_CE_INT_CAUSE_F) {
4862 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
4864 t4_edc_err_read(adapter, idx);
4866 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
4867 if (printk_ratelimit())
4868 dev_warn(adapter->pdev_dev,
4869 "%u %s correctable ECC data error%s\n",
4870 cnt, name[idx], cnt > 1 ? "s" : "");
4872 if (v & ECC_UE_INT_CAUSE_F)
4873 dev_alert(adapter->pdev_dev,
4874 "%s uncorrectable ECC data error\n", name[idx]);
4876 t4_write_reg(adapter, addr, v);
4877 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
4878 t4_fatal_err(adapter);
4882 * MA interrupt handler.
4884 static void ma_intr_handler(struct adapter *adap)
4886 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
4888 if (status & MEM_PERR_INT_CAUSE_F) {
4889 dev_alert(adap->pdev_dev,
4890 "MA parity error, parity status %#x\n",
4891 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
4892 if (is_t5(adap->params.chip))
4893 dev_alert(adap->pdev_dev,
4894 "MA parity error, parity status %#x\n",
4896 MA_PARITY_ERROR_STATUS2_A));
4898 if (status & MEM_WRAP_INT_CAUSE_F) {
4899 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
4900 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4901 "client %u to address %#x\n",
4902 MEM_WRAP_CLIENT_NUM_G(v),
4903 MEM_WRAP_ADDRESS_G(v) << 4);
4905 t4_write_reg(adap, MA_INT_CAUSE_A, status);
4910 * SMB interrupt handler.
4912 static void smb_intr_handler(struct adapter *adap)
4914 static const struct intr_info smb_intr_info[] = {
4915 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4916 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4917 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
4921 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
4926 * NC-SI interrupt handler.
4928 static void ncsi_intr_handler(struct adapter *adap)
4930 static const struct intr_info ncsi_intr_info[] = {
4931 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4932 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4933 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4934 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
4938 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
4943 * XGMAC interrupt handler.
4945 static void xgmac_intr_handler(struct adapter *adap, int port)
4947 u32 v, int_cause_reg;
4949 if (is_t4(adap->params.chip))
4950 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
4952 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
4954 v = t4_read_reg(adap, int_cause_reg);
4956 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
4960 if (v & TXFIFO_PRTY_ERR_F)
4961 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4963 if (v & RXFIFO_PRTY_ERR_F)
4964 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4966 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
4971 * PL interrupt handler.
4973 static void pl_intr_handler(struct adapter *adap)
4975 static const struct intr_info pl_intr_info[] = {
4976 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
4977 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
4981 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
4985 #define PF_INTR_MASK (PFSW_F)
4986 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
4987 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
4988 CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
4991 * t4_slow_intr_handler - control path interrupt handler
4992 * @adapter: the adapter
4994 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4995 * The designation 'slow' is because it involves register reads, while
4996 * data interrupts typically don't involve any MMIOs.
4998 int t4_slow_intr_handler(struct adapter *adapter)
5000 /* There are rare cases where a PL_INT_CAUSE bit may end up getting
5001 * set when the corresponding PL_INT_ENABLE bit isn't set. It's
5002 * easiest just to mask that case here.
5004 u32 raw_cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
5005 u32 enable = t4_read_reg(adapter, PL_INT_ENABLE_A);
5006 u32 cause = raw_cause & enable;
5008 if (!(cause & GLBL_INTR_MASK))
5011 cim_intr_handler(adapter);
5013 mps_intr_handler(adapter);
5015 ncsi_intr_handler(adapter);
5017 pl_intr_handler(adapter);
5019 smb_intr_handler(adapter);
5020 if (cause & XGMAC0_F)
5021 xgmac_intr_handler(adapter, 0);
5022 if (cause & XGMAC1_F)
5023 xgmac_intr_handler(adapter, 1);
5024 if (cause & XGMAC_KR0_F)
5025 xgmac_intr_handler(adapter, 2);
5026 if (cause & XGMAC_KR1_F)
5027 xgmac_intr_handler(adapter, 3);
5029 pcie_intr_handler(adapter);
5031 mem_intr_handler(adapter, MEM_MC);
5032 if (is_t5(adapter->params.chip) && (cause & MC1_F))
5033 mem_intr_handler(adapter, MEM_MC1);
5035 mem_intr_handler(adapter, MEM_EDC0);
5037 mem_intr_handler(adapter, MEM_EDC1);
5039 le_intr_handler(adapter);
5041 tp_intr_handler(adapter);
5043 ma_intr_handler(adapter);
5044 if (cause & PM_TX_F)
5045 pmtx_intr_handler(adapter);
5046 if (cause & PM_RX_F)
5047 pmrx_intr_handler(adapter);
5048 if (cause & ULP_RX_F)
5049 ulprx_intr_handler(adapter);
5050 if (cause & CPL_SWITCH_F)
5051 cplsw_intr_handler(adapter);
5053 sge_intr_handler(adapter);
5054 if (cause & ULP_TX_F)
5055 ulptx_intr_handler(adapter);
5057 /* Clear the interrupts just processed for which we are the master. */
5058 t4_write_reg(adapter, PL_INT_CAUSE_A, raw_cause & GLBL_INTR_MASK);
5059 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
5064 * t4_intr_enable - enable interrupts
5065 * @adapter: the adapter whose interrupts should be enabled
5067 * Enable PF-specific interrupts for the calling function and the top-level
5068 * interrupt concentrator for global interrupts. Interrupts are already
5069 * enabled at each module, here we just enable the roots of the interrupt
5072 * Note: this function should be called only when the driver manages
5073 * non PF-specific interrupts from the various HW modules. Only one PCI
5074 * function at a time should be doing this.
5076 void t4_intr_enable(struct adapter *adapter)
5079 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5080 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5081 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5083 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5084 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
5085 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
5086 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
5087 ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
5088 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
5089 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
5090 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
5091 DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
5092 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
5093 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
5097 * t4_intr_disable - disable interrupts
5098 * @adapter: the adapter whose interrupts should be disabled
5100 * Disable interrupts. We only disable the top-level interrupt
5101 * concentrators. The caller must be a PCI function managing global
5104 void t4_intr_disable(struct adapter *adapter)
5108 if (pci_channel_offline(adapter->pdev))
5111 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5112 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5113 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5115 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
5116 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
5119 unsigned int t4_chip_rss_size(struct adapter *adap)
5121 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
5122 return RSS_NENTRIES;
5124 return T6_RSS_NENTRIES;
5128 * t4_config_rss_range - configure a portion of the RSS mapping table
5129 * @adapter: the adapter
5130 * @mbox: mbox to use for the FW command
5131 * @viid: virtual interface whose RSS subtable is to be written
5132 * @start: start entry in the table to write
5133 * @n: how many table entries to write
5134 * @rspq: values for the response queue lookup table
5135 * @nrspq: number of values in @rspq
5137 * Programs the selected part of the VI's RSS mapping table with the
5138 * provided values. If @nrspq < @n the supplied values are used repeatedly
5139 * until the full table range is populated.
5141 * The caller must ensure the values in @rspq are in the range allowed for
5144 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5145 int start, int n, const u16 *rspq, unsigned int nrspq)
5148 const u16 *rsp = rspq;
5149 const u16 *rsp_end = rspq + nrspq;
5150 struct fw_rss_ind_tbl_cmd cmd;
5152 memset(&cmd, 0, sizeof(cmd));
5153 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
5154 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5155 FW_RSS_IND_TBL_CMD_VIID_V(viid));
5156 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5158 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
5160 int nq = min(n, 32);
5161 __be32 *qp = &cmd.iq0_to_iq2;
5163 cmd.niqid = cpu_to_be16(nq);
5164 cmd.startidx = cpu_to_be16(start);
5172 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
5173 if (++rsp >= rsp_end)
5175 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
5176 if (++rsp >= rsp_end)
5178 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
5179 if (++rsp >= rsp_end)
5182 *qp++ = cpu_to_be32(v);
5186 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5194 * t4_config_glbl_rss - configure the global RSS mode
5195 * @adapter: the adapter
5196 * @mbox: mbox to use for the FW command
5197 * @mode: global RSS mode
5198 * @flags: mode-specific flags
5200 * Sets the global RSS mode.
5202 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5205 struct fw_rss_glb_config_cmd c;
5207 memset(&c, 0, sizeof(c));
5208 c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
5209 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5210 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5211 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5212 c.u.manual.mode_pkd =
5213 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5214 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5215 c.u.basicvirtual.mode_pkd =
5216 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5217 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5220 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5224 * t4_config_vi_rss - configure per VI RSS settings
5225 * @adapter: the adapter
5226 * @mbox: mbox to use for the FW command
5229 * @defq: id of the default RSS queue for the VI.
5231 * Configures VI-specific RSS properties.
5233 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5234 unsigned int flags, unsigned int defq)
5236 struct fw_rss_vi_config_cmd c;
5238 memset(&c, 0, sizeof(c));
5239 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5240 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5241 FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
5242 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5243 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5244 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
5245 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5248 /* Read an RSS table row */
5249 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5251 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
5252 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5257 * t4_read_rss - read the contents of the RSS mapping table
5258 * @adapter: the adapter
5259 * @map: holds the contents of the RSS mapping table
5261 * Reads the contents of the RSS hash->queue mapping table.
5263 int t4_read_rss(struct adapter *adapter, u16 *map)
5265 int i, ret, nentries;
5268 nentries = t4_chip_rss_size(adapter);
5269 for (i = 0; i < nentries / 2; ++i) {
5270 ret = rd_rss_row(adapter, i, &val);
5273 *map++ = LKPTBLQUEUE0_G(val);
5274 *map++ = LKPTBLQUEUE1_G(val);
5279 static unsigned int t4_use_ldst(struct adapter *adap)
5281 return (adap->flags & CXGB4_FW_OK) && !adap->use_bd;
5285 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5286 * @adap: the adapter
5287 * @cmd: TP fw ldst address space type
5288 * @vals: where the indirect register values are stored/written
5289 * @nregs: how many indirect registers to read/write
5290 * @start_index: index of first indirect register to read/write
5291 * @rw: Read (1) or Write (0)
5292 * @sleep_ok: if true we may sleep while awaiting command completion
5294 * Access TP indirect registers through LDST
5296 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5297 unsigned int nregs, unsigned int start_index,
5298 unsigned int rw, bool sleep_ok)
5302 struct fw_ldst_cmd c;
5304 for (i = 0; i < nregs; i++) {
5305 memset(&c, 0, sizeof(c));
5306 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5308 (rw ? FW_CMD_READ_F :
5310 FW_LDST_CMD_ADDRSPACE_V(cmd));
5311 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5313 c.u.addrval.addr = cpu_to_be32(start_index + i);
5314 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5315 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5321 vals[i] = be32_to_cpu(c.u.addrval.val);
5327 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5328 * @adap: the adapter
5329 * @reg_addr: Address Register
5330 * @reg_data: Data register
5331 * @buff: where the indirect register values are stored/written
5332 * @nregs: how many indirect registers to read/write
5333 * @start_index: index of first indirect register to read/write
5334 * @rw: READ(1) or WRITE(0)
5335 * @sleep_ok: if true we may sleep while awaiting command completion
5337 * Read/Write TP indirect registers through LDST if possible.
5338 * Else, use backdoor access
5340 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5341 u32 *buff, u32 nregs, u32 start_index, int rw,
5349 cmd = FW_LDST_ADDRSPC_TP_PIO;
5351 case TP_TM_PIO_ADDR_A:
5352 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5354 case TP_MIB_INDEX_A:
5355 cmd = FW_LDST_ADDRSPC_TP_MIB;
5358 goto indirect_access;
5361 if (t4_use_ldst(adap))
5362 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5369 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5372 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5378 * t4_tp_pio_read - Read TP PIO registers
5379 * @adap: the adapter
5380 * @buff: where the indirect register values are written
5381 * @nregs: how many indirect registers to read
5382 * @start_index: index of first indirect register to read
5383 * @sleep_ok: if true we may sleep while awaiting command completion
5385 * Read TP PIO Registers
5387 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5388 u32 start_index, bool sleep_ok)
5390 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5391 start_index, 1, sleep_ok);
5395 * t4_tp_pio_write - Write TP PIO registers
5396 * @adap: the adapter
5397 * @buff: where the indirect register values are stored
5398 * @nregs: how many indirect registers to write
5399 * @start_index: index of first indirect register to write
5400 * @sleep_ok: if true we may sleep while awaiting command completion
5402 * Write TP PIO Registers
5404 static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5405 u32 start_index, bool sleep_ok)
5407 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5408 start_index, 0, sleep_ok);
5412 * t4_tp_tm_pio_read - Read TP TM PIO registers
5413 * @adap: the adapter
5414 * @buff: where the indirect register values are written
5415 * @nregs: how many indirect registers to read
5416 * @start_index: index of first indirect register to read
5417 * @sleep_ok: if true we may sleep while awaiting command completion
5419 * Read TP TM PIO Registers
5421 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5422 u32 start_index, bool sleep_ok)
5424 t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
5425 nregs, start_index, 1, sleep_ok);
5429 * t4_tp_mib_read - Read TP MIB registers
5430 * @adap: the adapter
5431 * @buff: where the indirect register values are written
5432 * @nregs: how many indirect registers to read
5433 * @start_index: index of first indirect register to read
5434 * @sleep_ok: if true we may sleep while awaiting command completion
5436 * Read TP MIB Registers
5438 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5441 t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
5442 start_index, 1, sleep_ok);
5446 * t4_read_rss_key - read the global RSS key
5447 * @adap: the adapter
5448 * @key: 10-entry array holding the 320-bit RSS key
5449 * @sleep_ok: if true we may sleep while awaiting command completion
5451 * Reads the global 320-bit RSS key.
5453 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5455 t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5459 * t4_write_rss_key - program one of the RSS keys
5460 * @adap: the adapter
5461 * @key: 10-entry array holding the 320-bit RSS key
5462 * @idx: which RSS key to write
5463 * @sleep_ok: if true we may sleep while awaiting command completion
5465 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5466 * 0..15 the corresponding entry in the RSS key table is written,
5467 * otherwise the global RSS key is written.
5469 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5472 u8 rss_key_addr_cnt = 16;
5473 u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
5475 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5476 * allows access to key addresses 16-63 by using KeyWrAddrX
5477 * as index[5:4](upper 2) into key table
5479 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5480 (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
5481 rss_key_addr_cnt = 32;
5483 t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5485 if (idx >= 0 && idx < rss_key_addr_cnt) {
5486 if (rss_key_addr_cnt > 16)
5487 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5488 KEYWRADDRX_V(idx >> 4) |
5489 T6_VFWRADDR_V(idx) | KEYWREN_F);
5491 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5492 KEYWRADDR_V(idx) | KEYWREN_F);
5497 * t4_read_rss_pf_config - read PF RSS Configuration Table
5498 * @adapter: the adapter
5499 * @index: the entry in the PF RSS table to read
5500 * @valp: where to store the returned value
5501 * @sleep_ok: if true we may sleep while awaiting command completion
5503 * Reads the PF RSS Configuration Table at the specified index and returns
5504 * the value found there.
5506 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5507 u32 *valp, bool sleep_ok)
5509 t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
5513 * t4_read_rss_vf_config - read VF RSS Configuration Table
5514 * @adapter: the adapter
5515 * @index: the entry in the VF RSS table to read
5516 * @vfl: where to store the returned VFL
5517 * @vfh: where to store the returned VFH
5518 * @sleep_ok: if true we may sleep while awaiting command completion
5520 * Reads the VF RSS Configuration Table at the specified index and returns
5521 * the (VFL, VFH) values found there.
5523 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5524 u32 *vfl, u32 *vfh, bool sleep_ok)
5526 u32 vrt, mask, data;
5528 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5529 mask = VFWRADDR_V(VFWRADDR_M);
5530 data = VFWRADDR_V(index);
5532 mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
5533 data = T6_VFWRADDR_V(index);
5536 /* Request that the index'th VF Table values be read into VFL/VFH.
5538 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
5539 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
5540 vrt |= data | VFRDEN_F;
5541 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
5543 /* Grab the VFL/VFH values ...
5545 t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
5546 t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
5550 * t4_read_rss_pf_map - read PF RSS Map
5551 * @adapter: the adapter
5552 * @sleep_ok: if true we may sleep while awaiting command completion
5554 * Reads the PF RSS Map register and returns its value.
5556 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5560 t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
5565 * t4_read_rss_pf_mask - read PF RSS Mask
5566 * @adapter: the adapter
5567 * @sleep_ok: if true we may sleep while awaiting command completion
5569 * Reads the PF RSS Mask register and returns its value.
5571 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5575 t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
5580 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5581 * @adap: the adapter
5582 * @v4: holds the TCP/IP counter values
5583 * @v6: holds the TCP/IPv6 counter values
5584 * @sleep_ok: if true we may sleep while awaiting command completion
5586 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5587 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5589 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5590 struct tp_tcp_stats *v6, bool sleep_ok)
5592 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
5594 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
5595 #define STAT(x) val[STAT_IDX(x)]
5596 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5599 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5600 TP_MIB_TCP_OUT_RST_A, sleep_ok);
5601 v4->tcp_out_rsts = STAT(OUT_RST);
5602 v4->tcp_in_segs = STAT64(IN_SEG);
5603 v4->tcp_out_segs = STAT64(OUT_SEG);
5604 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5607 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5608 TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
5609 v6->tcp_out_rsts = STAT(OUT_RST);
5610 v6->tcp_in_segs = STAT64(IN_SEG);
5611 v6->tcp_out_segs = STAT64(OUT_SEG);
5612 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5620 * t4_tp_get_err_stats - read TP's error MIB counters
5621 * @adap: the adapter
5622 * @st: holds the counter values
5623 * @sleep_ok: if true we may sleep while awaiting command completion
5625 * Returns the values of TP's error counters.
5627 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5630 int nchan = adap->params.arch.nchan;
5632 t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
5634 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
5636 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
5638 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5639 TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
5640 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5641 TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
5642 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
5644 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5645 TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
5646 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5647 TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
5648 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
5653 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5654 * @adap: the adapter
5655 * @st: holds the counter values
5656 * @sleep_ok: if true we may sleep while awaiting command completion
5658 * Returns the values of TP's CPL counters.
5660 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5663 int nchan = adap->params.arch.nchan;
5665 t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
5667 t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
5671 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5672 * @adap: the adapter
5673 * @st: holds the counter values
5674 * @sleep_ok: if true we may sleep while awaiting command completion
5676 * Returns the values of TP's RDMA counters.
5678 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5681 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
5686 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5687 * @adap: the adapter
5688 * @idx: the port index
5689 * @st: holds the counter values
5690 * @sleep_ok: if true we may sleep while awaiting command completion
5692 * Returns the values of TP's FCoE counters for the selected port.
5694 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5695 struct tp_fcoe_stats *st, bool sleep_ok)
5699 t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
5702 t4_tp_mib_read(adap, &st->frames_drop, 1,
5703 TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
5705 t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
5708 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5712 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5713 * @adap: the adapter
5714 * @st: holds the counter values
5715 * @sleep_ok: if true we may sleep while awaiting command completion
5717 * Returns the values of TP's counters for non-TCP directly-placed packets.
5719 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5724 t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
5725 st->frames = val[0];
5727 st->octets = ((u64)val[2] << 32) | val[3];
5731 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5732 * @adap: the adapter
5733 * @mtus: where to store the MTU values
5734 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5736 * Reads the HW path MTU table.
5738 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5743 for (i = 0; i < NMTUS; ++i) {
5744 t4_write_reg(adap, TP_MTU_TABLE_A,
5745 MTUINDEX_V(0xff) | MTUVALUE_V(i));
5746 v = t4_read_reg(adap, TP_MTU_TABLE_A);
5747 mtus[i] = MTUVALUE_G(v);
5749 mtu_log[i] = MTUWIDTH_G(v);
5754 * t4_read_cong_tbl - reads the congestion control table
5755 * @adap: the adapter
5756 * @incr: where to store the alpha values
5758 * Reads the additive increments programmed into the HW congestion
5761 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5763 unsigned int mtu, w;
5765 for (mtu = 0; mtu < NMTUS; ++mtu)
5766 for (w = 0; w < NCCTRL_WIN; ++w) {
5767 t4_write_reg(adap, TP_CCTRL_TABLE_A,
5768 ROWINDEX_V(0xffff) | (mtu << 5) | w);
5769 incr[mtu][w] = (u16)t4_read_reg(adap,
5770 TP_CCTRL_TABLE_A) & 0x1fff;
5775 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5776 * @adap: the adapter
5777 * @addr: the indirect TP register address
5778 * @mask: specifies the field within the register to modify
5779 * @val: new value for the field
5781 * Sets a field of an indirect TP register to the given value.
5783 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5784 unsigned int mask, unsigned int val)
5786 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5787 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5788 t4_write_reg(adap, TP_PIO_DATA_A, val);
5792 * init_cong_ctrl - initialize congestion control parameters
5793 * @a: the alpha values for congestion control
5794 * @b: the beta values for congestion control
5796 * Initialize the congestion control parameters.
5798 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5800 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5825 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5828 b[13] = b[14] = b[15] = b[16] = 3;
5829 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5830 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5835 /* The minimum additive increment value for the congestion control table */
5836 #define CC_MIN_INCR 2U
5839 * t4_load_mtus - write the MTU and congestion control HW tables
5840 * @adap: the adapter
5841 * @mtus: the values for the MTU table
5842 * @alpha: the values for the congestion control alpha parameter
5843 * @beta: the values for the congestion control beta parameter
5845 * Write the HW MTU table with the supplied MTUs and the high-speed
5846 * congestion control table with the supplied alpha, beta, and MTUs.
5847 * We write the two tables together because the additive increments
5848 * depend on the MTUs.
5850 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5851 const unsigned short *alpha, const unsigned short *beta)
5853 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5854 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5855 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5856 28672, 40960, 57344, 81920, 114688, 163840, 229376
5861 for (i = 0; i < NMTUS; ++i) {
5862 unsigned int mtu = mtus[i];
5863 unsigned int log2 = fls(mtu);
5865 if (!(mtu & ((1 << log2) >> 2))) /* round */
5867 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5868 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
5870 for (w = 0; w < NCCTRL_WIN; ++w) {
5873 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5876 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
5877 (w << 16) | (beta[w] << 13) | inc);
5882 /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5883 * clocks. The formula is
5885 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5887 * which is equivalent to
5889 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5891 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5893 u64 v = bytes256 * adap->params.vpd.cclk;
5895 return v * 62 + v / 2;
5899 * t4_get_chan_txrate - get the current per channel Tx rates
5900 * @adap: the adapter
5901 * @nic_rate: rates for NIC traffic
5902 * @ofld_rate: rates for offloaded traffic
5904 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5907 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5911 v = t4_read_reg(adap, TP_TX_TRATE_A);
5912 nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5913 nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5914 if (adap->params.arch.nchan == NCHAN) {
5915 nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5916 nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5919 v = t4_read_reg(adap, TP_TX_ORATE_A);
5920 ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5921 ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5922 if (adap->params.arch.nchan == NCHAN) {
5923 ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5924 ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5929 * t4_set_trace_filter - configure one of the tracing filters
5930 * @adap: the adapter
5931 * @tp: the desired trace filter parameters
5932 * @idx: which filter to configure
5933 * @enable: whether to enable or disable the filter
5935 * Configures one of the tracing filters available in HW. If @enable is
5936 * %0 @tp is not examined and may be %NULL. The user is responsible to
5937 * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5939 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5940 int idx, int enable)
5942 int i, ofst = idx * 4;
5943 u32 data_reg, mask_reg, cfg;
5946 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5950 cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5951 if (cfg & TRCMULTIFILTER_F) {
5952 /* If multiple tracers are enabled, then maximum
5953 * capture size is 2.5KB (FIFO size of a single channel)
5954 * minus 2 flits for CPL_TRACE_PKT header.
5956 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5959 /* If multiple tracers are disabled, to avoid deadlocks
5960 * maximum packet capture size of 9600 bytes is recommended.
5961 * Also in this mode, only trace0 can be enabled and running.
5963 if (tp->snap_len > 9600 || idx)
5967 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5968 tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5969 tp->min_len > TFMINPKTSIZE_M)
5972 /* stop the tracer we'll be changing */
5973 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5975 idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
5976 data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
5977 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
5979 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5980 t4_write_reg(adap, data_reg, tp->data[i]);
5981 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5983 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
5984 TFCAPTUREMAX_V(tp->snap_len) |
5985 TFMINPKTSIZE_V(tp->min_len));
5986 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
5987 TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
5988 (is_t4(adap->params.chip) ?
5989 TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
5990 T5_TFPORT_V(tp->port) | T5_TFEN_F |
5991 T5_TFINVERTMATCH_V(tp->invert)));
5997 * t4_get_trace_filter - query one of the tracing filters
5998 * @adap: the adapter
5999 * @tp: the current trace filter parameters
6000 * @idx: which trace filter to query
6001 * @enabled: non-zero if the filter is enabled
6003 * Returns the current settings of one of the HW tracing filters.
6005 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6009 int i, ofst = idx * 4;
6010 u32 data_reg, mask_reg;
6012 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
6013 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
6015 if (is_t4(adap->params.chip)) {
6016 *enabled = !!(ctla & TFEN_F);
6017 tp->port = TFPORT_G(ctla);
6018 tp->invert = !!(ctla & TFINVERTMATCH_F);
6020 *enabled = !!(ctla & T5_TFEN_F);
6021 tp->port = T5_TFPORT_G(ctla);
6022 tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
6024 tp->snap_len = TFCAPTUREMAX_G(ctlb);
6025 tp->min_len = TFMINPKTSIZE_G(ctlb);
6026 tp->skip_ofst = TFOFFSET_G(ctla);
6027 tp->skip_len = TFLENGTH_G(ctla);
6029 ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
6030 data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
6031 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
6033 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6034 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6035 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6040 * t4_pmtx_get_stats - returns the HW stats from PMTX
6041 * @adap: the adapter
6042 * @cnt: where to store the count statistics
6043 * @cycles: where to store the cycle statistics
6045 * Returns performance statistics from PMTX.
6047 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6052 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6053 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
6054 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
6055 if (is_t4(adap->params.chip)) {
6056 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
6058 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
6059 PM_TX_DBG_DATA_A, data, 2,
6060 PM_TX_DBG_STAT_MSB_A);
6061 cycles[i] = (((u64)data[0] << 32) | data[1]);
6067 * t4_pmrx_get_stats - returns the HW stats from PMRX
6068 * @adap: the adapter
6069 * @cnt: where to store the count statistics
6070 * @cycles: where to store the cycle statistics
6072 * Returns performance statistics from PMRX.
6074 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6079 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6080 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
6081 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
6082 if (is_t4(adap->params.chip)) {
6083 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
6085 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
6086 PM_RX_DBG_DATA_A, data, 2,
6087 PM_RX_DBG_STAT_MSB_A);
6088 cycles[i] = (((u64)data[0] << 32) | data[1]);
6094 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
6095 * @adapter: the adapter
6096 * @pidx: the port index
6098 * Computes and returns a bitmap indicating which MPS buffer groups are
6099 * associated with the given Port. Bit i is set if buffer group i is
6102 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
6105 unsigned int chip_version, nports;
6107 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6108 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6110 switch (chip_version) {
6115 case 2: return 3 << (2 * pidx);
6116 case 4: return 1 << pidx;
6122 case 2: return 1 << (2 * pidx);
6127 dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6128 chip_version, nports);
6134 * t4_get_mps_bg_map - return the buffer groups associated with a port
6135 * @adapter: the adapter
6136 * @pidx: the port index
6138 * Returns a bitmap indicating which MPS buffer groups are associated
6139 * with the given Port. Bit i is set if buffer group i is used by the
6142 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6145 unsigned int nports;
6147 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6148 if (pidx >= nports) {
6149 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
6154 /* If we've already retrieved/computed this, just return the result.
6156 mps_bg_map = adapter->params.mps_bg_map;
6157 if (mps_bg_map[pidx])
6158 return mps_bg_map[pidx];
6160 /* Newer Firmware can tell us what the MPS Buffer Group Map is.
6161 * If we're talking to such Firmware, let it tell us. If the new
6162 * API isn't supported, revert back to old hardcoded way. The value
6163 * obtained from Firmware is encoded in below format:
6165 * val = (( MPSBGMAP[Port 3] << 24 ) |
6166 * ( MPSBGMAP[Port 2] << 16 ) |
6167 * ( MPSBGMAP[Port 1] << 8 ) |
6168 * ( MPSBGMAP[Port 0] << 0 ))
6170 if (adapter->flags & CXGB4_FW_OK) {
6174 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6175 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6176 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6177 0, 1, ¶m, &val);
6181 /* Store the BG Map for all of the Ports in order to
6182 * avoid more calls to the Firmware in the future.
6184 for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
6185 mps_bg_map[p] = val & 0xff;
6187 return mps_bg_map[pidx];
6191 /* Either we're not talking to the Firmware or we're dealing with
6192 * older Firmware which doesn't support the new API to get the MPS
6193 * Buffer Group Map. Fall back to computing it ourselves.
6195 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6196 return mps_bg_map[pidx];
6200 * t4_get_tp_e2c_map - return the E2C channel map associated with a port
6201 * @adapter: the adapter
6202 * @pidx: the port index
6204 static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6206 unsigned int nports;
6210 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6211 if (pidx >= nports) {
6212 CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n",
6217 /* FW version >= 1.16.44.0 can determine E2C channel map using
6218 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6220 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6221 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPCHMAP));
6222 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6223 0, 1, ¶m, &val);
6225 return (val >> (8 * pidx)) & 0xff;
6231 * t4_get_tp_ch_map - return TP ingress channels associated with a port
6232 * @adap: the adapter
6233 * @pidx: the port index
6235 * Returns a bitmap indicating which TP Ingress Channels are associated
6236 * with a given Port. Bit i is set if TP Ingress Channel i is used by
6239 unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
6241 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
6242 unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
6244 if (pidx >= nports) {
6245 dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
6250 switch (chip_version) {
6253 /* Note that this happens to be the same values as the MPS
6254 * Buffer Group Map for these Chips. But we replicate the code
6255 * here because they're really separate concepts.
6259 case 2: return 3 << (2 * pidx);
6260 case 4: return 1 << pidx;
6267 case 2: return 1 << pidx;
6272 dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
6273 chip_version, nports);
6278 * t4_get_port_type_description - return Port Type string description
6279 * @port_type: firmware Port Type enumeration
6281 const char *t4_get_port_type_description(enum fw_port_type port_type)
6283 static const char *const port_type_description[] = {
6309 if (port_type < ARRAY_SIZE(port_type_description))
6310 return port_type_description[port_type];
6315 * t4_get_port_stats_offset - collect port stats relative to a previous
6317 * @adap: The adapter
6319 * @stats: Current stats to fill
6320 * @offset: Previous stats snapshot
6322 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6323 struct port_stats *stats,
6324 struct port_stats *offset)
6329 t4_get_port_stats(adap, idx, stats);
6330 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
6331 i < (sizeof(struct port_stats) / sizeof(u64));
6337 * t4_get_port_stats - collect port statistics
6338 * @adap: the adapter
6339 * @idx: the port index
6340 * @p: the stats structure to fill
6342 * Collect statistics related to the given port from HW.
6344 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6346 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6347 u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
6349 #define GET_STAT(name) \
6350 t4_read_reg64(adap, \
6351 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
6352 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
6353 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6355 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6356 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6357 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6358 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6359 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6360 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6361 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6362 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6363 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6364 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6365 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6366 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6367 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6368 p->tx_drop = GET_STAT(TX_PORT_DROP);
6369 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6370 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6371 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6372 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6373 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6374 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6375 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6376 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6377 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6379 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6380 if (stat_ctl & COUNTPAUSESTATTX_F)
6381 p->tx_frames_64 -= p->tx_pause;
6382 if (stat_ctl & COUNTPAUSEMCTX_F)
6383 p->tx_mcast_frames -= p->tx_pause;
6385 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6386 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6387 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6388 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6389 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6390 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6391 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6392 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6393 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6394 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6395 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6396 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6397 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6398 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6399 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6400 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6401 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6402 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6403 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6404 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6405 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6406 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6407 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6408 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6409 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6410 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6411 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6413 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6414 if (stat_ctl & COUNTPAUSESTATRX_F)
6415 p->rx_frames_64 -= p->rx_pause;
6416 if (stat_ctl & COUNTPAUSEMCRX_F)
6417 p->rx_mcast_frames -= p->rx_pause;
6420 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6421 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6422 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6423 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6424 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6425 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6426 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6427 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6434 * t4_get_lb_stats - collect loopback port statistics
6435 * @adap: the adapter
6436 * @idx: the loopback port index
6437 * @p: the stats structure to fill
6439 * Return HW statistics for the given loopback port.
6441 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6443 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6445 #define GET_STAT(name) \
6446 t4_read_reg64(adap, \
6447 (is_t4(adap->params.chip) ? \
6448 PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
6449 T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
6450 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6452 p->octets = GET_STAT(BYTES);
6453 p->frames = GET_STAT(FRAMES);
6454 p->bcast_frames = GET_STAT(BCAST);
6455 p->mcast_frames = GET_STAT(MCAST);
6456 p->ucast_frames = GET_STAT(UCAST);
6457 p->error_frames = GET_STAT(ERROR);
6459 p->frames_64 = GET_STAT(64B);
6460 p->frames_65_127 = GET_STAT(65B_127B);
6461 p->frames_128_255 = GET_STAT(128B_255B);
6462 p->frames_256_511 = GET_STAT(256B_511B);
6463 p->frames_512_1023 = GET_STAT(512B_1023B);
6464 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6465 p->frames_1519_max = GET_STAT(1519B_MAX);
6466 p->drop = GET_STAT(DROP_FRAMES);
6468 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6469 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6470 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6471 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6472 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6473 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6474 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6475 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6481 /* t4_mk_filtdelwr - create a delete filter WR
6482 * @ftid: the filter ID
6483 * @wr: the filter work request to populate
6484 * @qid: ingress queue to receive the delete notification
6486 * Creates a filter work request to delete the supplied filter. If @qid is
6487 * negative the delete notification is suppressed.
6489 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6491 memset(wr, 0, sizeof(*wr));
6492 wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
6493 wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
6494 wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
6495 FW_FILTER_WR_NOREPLY_V(qid < 0));
6496 wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
6498 wr->rx_chan_rx_rpl_iq =
6499 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
6502 #define INIT_CMD(var, cmd, rd_wr) do { \
6503 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
6504 FW_CMD_REQUEST_F | \
6505 FW_CMD_##rd_wr##_F); \
6506 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6509 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6513 struct fw_ldst_cmd c;
6515 memset(&c, 0, sizeof(c));
6516 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
6517 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6521 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6522 c.u.addrval.addr = cpu_to_be32(addr);
6523 c.u.addrval.val = cpu_to_be32(val);
6525 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6529 * t4_mdio_rd - read a PHY register through MDIO
6530 * @adap: the adapter
6531 * @mbox: mailbox to use for the FW command
6532 * @phy_addr: the PHY address
6533 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6534 * @reg: the register to read
6535 * @valp: where to store the value
6537 * Issues a FW command through the given mailbox to read a PHY register.
6539 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6540 unsigned int mmd, unsigned int reg, u16 *valp)
6544 struct fw_ldst_cmd c;
6546 memset(&c, 0, sizeof(c));
6547 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6548 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6549 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6551 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6552 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6553 FW_LDST_CMD_MMD_V(mmd));
6554 c.u.mdio.raddr = cpu_to_be16(reg);
6556 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6558 *valp = be16_to_cpu(c.u.mdio.rval);
6563 * t4_mdio_wr - write a PHY register through MDIO
6564 * @adap: the adapter
6565 * @mbox: mailbox to use for the FW command
6566 * @phy_addr: the PHY address
6567 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6568 * @reg: the register to write
6569 * @val: value to write
6571 * Issues a FW command through the given mailbox to write a PHY register.
6573 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6574 unsigned int mmd, unsigned int reg, u16 val)
6577 struct fw_ldst_cmd c;
6579 memset(&c, 0, sizeof(c));
6580 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6581 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6582 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6584 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6585 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6586 FW_LDST_CMD_MMD_V(mmd));
6587 c.u.mdio.raddr = cpu_to_be16(reg);
6588 c.u.mdio.rval = cpu_to_be16(val);
6590 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6594 * t4_sge_decode_idma_state - decode the idma state
6595 * @adapter: the adapter
6596 * @state: the state idma is stuck in
6598 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6600 static const char * const t4_decode[] = {
6602 "IDMA_PUSH_MORE_CPL_FIFO",
6603 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6605 "IDMA_PHYSADDR_SEND_PCIEHDR",
6606 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6607 "IDMA_PHYSADDR_SEND_PAYLOAD",
6608 "IDMA_SEND_FIFO_TO_IMSG",
6609 "IDMA_FL_REQ_DATA_FL_PREP",
6610 "IDMA_FL_REQ_DATA_FL",
6612 "IDMA_FL_H_REQ_HEADER_FL",
6613 "IDMA_FL_H_SEND_PCIEHDR",
6614 "IDMA_FL_H_PUSH_CPL_FIFO",
6615 "IDMA_FL_H_SEND_CPL",
6616 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6617 "IDMA_FL_H_SEND_IP_HDR",
6618 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6619 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6620 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6621 "IDMA_FL_D_SEND_PCIEHDR",
6622 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6623 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6624 "IDMA_FL_SEND_PCIEHDR",
6625 "IDMA_FL_PUSH_CPL_FIFO",
6627 "IDMA_FL_SEND_PAYLOAD_FIRST",
6628 "IDMA_FL_SEND_PAYLOAD",
6629 "IDMA_FL_REQ_NEXT_DATA_FL",
6630 "IDMA_FL_SEND_NEXT_PCIEHDR",
6631 "IDMA_FL_SEND_PADDING",
6632 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6633 "IDMA_FL_SEND_FIFO_TO_IMSG",
6634 "IDMA_FL_REQ_DATAFL_DONE",
6635 "IDMA_FL_REQ_HEADERFL_DONE",
6637 static const char * const t5_decode[] = {
6640 "IDMA_PUSH_MORE_CPL_FIFO",
6641 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6642 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6643 "IDMA_PHYSADDR_SEND_PCIEHDR",
6644 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6645 "IDMA_PHYSADDR_SEND_PAYLOAD",
6646 "IDMA_SEND_FIFO_TO_IMSG",
6647 "IDMA_FL_REQ_DATA_FL",
6649 "IDMA_FL_DROP_SEND_INC",
6650 "IDMA_FL_H_REQ_HEADER_FL",
6651 "IDMA_FL_H_SEND_PCIEHDR",
6652 "IDMA_FL_H_PUSH_CPL_FIFO",
6653 "IDMA_FL_H_SEND_CPL",
6654 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6655 "IDMA_FL_H_SEND_IP_HDR",
6656 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6657 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6658 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6659 "IDMA_FL_D_SEND_PCIEHDR",
6660 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6661 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6662 "IDMA_FL_SEND_PCIEHDR",
6663 "IDMA_FL_PUSH_CPL_FIFO",
6665 "IDMA_FL_SEND_PAYLOAD_FIRST",
6666 "IDMA_FL_SEND_PAYLOAD",
6667 "IDMA_FL_REQ_NEXT_DATA_FL",
6668 "IDMA_FL_SEND_NEXT_PCIEHDR",
6669 "IDMA_FL_SEND_PADDING",
6670 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6672 static const char * const t6_decode[] = {
6674 "IDMA_PUSH_MORE_CPL_FIFO",
6675 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6676 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6677 "IDMA_PHYSADDR_SEND_PCIEHDR",
6678 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6679 "IDMA_PHYSADDR_SEND_PAYLOAD",
6680 "IDMA_FL_REQ_DATA_FL",
6682 "IDMA_FL_DROP_SEND_INC",
6683 "IDMA_FL_H_REQ_HEADER_FL",
6684 "IDMA_FL_H_SEND_PCIEHDR",
6685 "IDMA_FL_H_PUSH_CPL_FIFO",
6686 "IDMA_FL_H_SEND_CPL",
6687 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6688 "IDMA_FL_H_SEND_IP_HDR",
6689 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6690 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6691 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6692 "IDMA_FL_D_SEND_PCIEHDR",
6693 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6694 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6695 "IDMA_FL_SEND_PCIEHDR",
6696 "IDMA_FL_PUSH_CPL_FIFO",
6698 "IDMA_FL_SEND_PAYLOAD_FIRST",
6699 "IDMA_FL_SEND_PAYLOAD",
6700 "IDMA_FL_REQ_NEXT_DATA_FL",
6701 "IDMA_FL_SEND_NEXT_PCIEHDR",
6702 "IDMA_FL_SEND_PADDING",
6703 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6705 static const u32 sge_regs[] = {
6706 SGE_DEBUG_DATA_LOW_INDEX_2_A,
6707 SGE_DEBUG_DATA_LOW_INDEX_3_A,
6708 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
6710 const char **sge_idma_decode;
6711 int sge_idma_decode_nstates;
6713 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6715 /* Select the right set of decode strings to dump depending on the
6716 * adapter chip type.
6718 switch (chip_version) {
6720 sge_idma_decode = (const char **)t4_decode;
6721 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6725 sge_idma_decode = (const char **)t5_decode;
6726 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6730 sge_idma_decode = (const char **)t6_decode;
6731 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6735 dev_err(adapter->pdev_dev,
6736 "Unsupported chip version %d\n", chip_version);
6740 if (is_t4(adapter->params.chip)) {
6741 sge_idma_decode = (const char **)t4_decode;
6742 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6744 sge_idma_decode = (const char **)t5_decode;
6745 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6748 if (state < sge_idma_decode_nstates)
6749 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6751 CH_WARN(adapter, "idma state %d unknown\n", state);
6753 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6754 CH_WARN(adapter, "SGE register %#x value %#x\n",
6755 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6759 * t4_sge_ctxt_flush - flush the SGE context cache
6760 * @adap: the adapter
6761 * @mbox: mailbox to use for the FW command
6762 * @ctxt_type: Egress or Ingress
6764 * Issues a FW command through the given mailbox to flush the
6765 * SGE context cache.
6767 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
6771 struct fw_ldst_cmd c;
6773 memset(&c, 0, sizeof(c));
6774 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
6775 FW_LDST_ADDRSPC_SGE_EGRC :
6776 FW_LDST_ADDRSPC_SGE_INGC);
6777 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6778 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6780 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6781 c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
6783 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6788 * t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
6789 * @adap: the adapter
6790 * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
6791 * @dbqtimers: SGE Doorbell Queue Timer table
6793 * Reads the SGE Doorbell Queue Timer values into the provided table.
6794 * Returns 0 on success (Firmware and Hardware support this feature),
6795 * an error on failure.
6797 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
6800 int ret, dbqtimerix;
6804 while (dbqtimerix < ndbqtimers) {
6806 u32 params[7], vals[7];
6808 nparams = ndbqtimers - dbqtimerix;
6809 if (nparams > ARRAY_SIZE(params))
6810 nparams = ARRAY_SIZE(params);
6812 for (param = 0; param < nparams; param++)
6814 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6815 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
6816 FW_PARAMS_PARAM_Y_V(dbqtimerix + param));
6817 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
6818 nparams, params, vals);
6822 for (param = 0; param < nparams; param++)
6823 dbqtimers[dbqtimerix++] = vals[param];
6829 * t4_fw_hello - establish communication with FW
6830 * @adap: the adapter
6831 * @mbox: mailbox to use for the FW command
6832 * @evt_mbox: mailbox to receive async FW events
6833 * @master: specifies the caller's willingness to be the device master
6834 * @state: returns the current device state (if non-NULL)
6836 * Issues a command to establish communication with FW. Returns either
6837 * an error (negative integer) or the mailbox of the Master PF.
6839 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6840 enum dev_master master, enum dev_state *state)
6843 struct fw_hello_cmd c;
6845 unsigned int master_mbox;
6846 int retries = FW_CMD_HELLO_RETRIES;
6849 memset(&c, 0, sizeof(c));
6850 INIT_CMD(c, HELLO, WRITE);
6851 c.err_to_clearinit = cpu_to_be32(
6852 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6853 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
6854 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6855 mbox : FW_HELLO_CMD_MBMASTER_M) |
6856 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6857 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6858 FW_HELLO_CMD_CLEARINIT_F);
6861 * Issue the HELLO command to the firmware. If it's not successful
6862 * but indicates that we got a "busy" or "timeout" condition, retry
6863 * the HELLO until we exhaust our retry limit. If we do exceed our
6864 * retry limit, check to see if the firmware left us any error
6865 * information and report that if so.
6867 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6869 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6871 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
6872 t4_report_fw_error(adap);
6876 v = be32_to_cpu(c.err_to_clearinit);
6877 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
6879 if (v & FW_HELLO_CMD_ERR_F)
6880 *state = DEV_STATE_ERR;
6881 else if (v & FW_HELLO_CMD_INIT_F)
6882 *state = DEV_STATE_INIT;
6884 *state = DEV_STATE_UNINIT;
6888 * If we're not the Master PF then we need to wait around for the
6889 * Master PF Driver to finish setting up the adapter.
6891 * Note that we also do this wait if we're a non-Master-capable PF and
6892 * there is no current Master PF; a Master PF may show up momentarily
6893 * and we wouldn't want to fail pointlessly. (This can happen when an
6894 * OS loads lots of different drivers rapidly at the same time). In
6895 * this case, the Master PF returned by the firmware will be
6896 * PCIE_FW_MASTER_M so the test below will work ...
6898 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
6899 master_mbox != mbox) {
6900 int waiting = FW_CMD_HELLO_TIMEOUT;
6903 * Wait for the firmware to either indicate an error or
6904 * initialized state. If we see either of these we bail out
6905 * and report the issue to the caller. If we exhaust the
6906 * "hello timeout" and we haven't exhausted our retries, try
6907 * again. Otherwise bail with a timeout error.
6916 * If neither Error nor Initialized are indicated
6917 * by the firmware keep waiting till we exhaust our
6918 * timeout ... and then retry if we haven't exhausted
6921 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6922 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
6933 * We either have an Error or Initialized condition
6934 * report errors preferentially.
6937 if (pcie_fw & PCIE_FW_ERR_F)
6938 *state = DEV_STATE_ERR;
6939 else if (pcie_fw & PCIE_FW_INIT_F)
6940 *state = DEV_STATE_INIT;
6944 * If we arrived before a Master PF was selected and
6945 * there's not a valid Master PF, grab its identity
6948 if (master_mbox == PCIE_FW_MASTER_M &&
6949 (pcie_fw & PCIE_FW_MASTER_VLD_F))
6950 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
6959 * t4_fw_bye - end communication with FW
6960 * @adap: the adapter
6961 * @mbox: mailbox to use for the FW command
6963 * Issues a command to terminate communication with FW.
6965 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6967 struct fw_bye_cmd c;
6969 memset(&c, 0, sizeof(c));
6970 INIT_CMD(c, BYE, WRITE);
6971 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6975 * t4_early_init - ask FW to initialize the device
6976 * @adap: the adapter
6977 * @mbox: mailbox to use for the FW command
6979 * Issues a command to FW to partially initialize the device. This
6980 * performs initialization that generally doesn't depend on user input.
6982 int t4_early_init(struct adapter *adap, unsigned int mbox)
6984 struct fw_initialize_cmd c;
6986 memset(&c, 0, sizeof(c));
6987 INIT_CMD(c, INITIALIZE, WRITE);
6988 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6992 * t4_fw_reset - issue a reset to FW
6993 * @adap: the adapter
6994 * @mbox: mailbox to use for the FW command
6995 * @reset: specifies the type of reset to perform
6997 * Issues a reset command of the specified type to FW.
6999 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7001 struct fw_reset_cmd c;
7003 memset(&c, 0, sizeof(c));
7004 INIT_CMD(c, RESET, WRITE);
7005 c.val = cpu_to_be32(reset);
7006 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7010 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7011 * @adap: the adapter
7012 * @mbox: mailbox to use for the FW RESET command (if desired)
7013 * @force: force uP into RESET even if FW RESET command fails
7015 * Issues a RESET command to firmware (if desired) with a HALT indication
7016 * and then puts the microprocessor into RESET state. The RESET command
7017 * will only be issued if a legitimate mailbox is provided (mbox <=
7018 * PCIE_FW_MASTER_M).
7020 * This is generally used in order for the host to safely manipulate the
7021 * adapter without fear of conflicting with whatever the firmware might
7022 * be doing. The only way out of this state is to RESTART the firmware
7025 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7030 * If a legitimate mailbox is provided, issue a RESET command
7031 * with a HALT indication.
7033 if (mbox <= PCIE_FW_MASTER_M) {
7034 struct fw_reset_cmd c;
7036 memset(&c, 0, sizeof(c));
7037 INIT_CMD(c, RESET, WRITE);
7038 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
7039 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
7040 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7044 * Normally we won't complete the operation if the firmware RESET
7045 * command fails but if our caller insists we'll go ahead and put the
7046 * uP into RESET. This can be useful if the firmware is hung or even
7047 * missing ... We'll have to take the risk of putting the uP into
7048 * RESET without the cooperation of firmware in that case.
7050 * We also force the firmware's HALT flag to be on in case we bypassed
7051 * the firmware RESET command above or we're dealing with old firmware
7052 * which doesn't have the HALT capability. This will serve as a flag
7053 * for the incoming firmware to know that it's coming out of a HALT
7054 * rather than a RESET ... if it's new enough to understand that ...
7056 if (ret == 0 || force) {
7057 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
7058 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
7063 * And we always return the result of the firmware RESET command
7064 * even when we force the uP into RESET ...
7070 * t4_fw_restart - restart the firmware by taking the uP out of RESET
7071 * @adap: the adapter
7072 * @mbox: mailbox to use for the FW command
7073 * @reset: if we want to do a RESET to restart things
7075 * Restart firmware previously halted by t4_fw_halt(). On successful
7076 * return the previous PF Master remains as the new PF Master and there
7077 * is no need to issue a new HELLO command, etc.
7079 * We do this in two ways:
7081 * 1. If we're dealing with newer firmware we'll simply want to take
7082 * the chip's microprocessor out of RESET. This will cause the
7083 * firmware to start up from its start vector. And then we'll loop
7084 * until the firmware indicates it's started again (PCIE_FW.HALT
7085 * reset to 0) or we timeout.
7087 * 2. If we're dealing with older firmware then we'll need to RESET
7088 * the chip since older firmware won't recognize the PCIE_FW.HALT
7089 * flag and automatically RESET itself on startup.
7091 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7095 * Since we're directing the RESET instead of the firmware
7096 * doing it automatically, we need to clear the PCIE_FW.HALT
7099 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
7102 * If we've been given a valid mailbox, first try to get the
7103 * firmware to do the RESET. If that works, great and we can
7104 * return success. Otherwise, if we haven't been given a
7105 * valid mailbox or the RESET command failed, fall back to
7106 * hitting the chip with a hammer.
7108 if (mbox <= PCIE_FW_MASTER_M) {
7109 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
7111 if (t4_fw_reset(adap, mbox,
7112 PIORST_F | PIORSTMODE_F) == 0)
7116 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
7121 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
7122 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7123 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
7134 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7135 * @adap: the adapter
7136 * @mbox: mailbox to use for the FW RESET command (if desired)
7137 * @fw_data: the firmware image to write
7139 * @force: force upgrade even if firmware doesn't cooperate
7141 * Perform all of the steps necessary for upgrading an adapter's
7142 * firmware image. Normally this requires the cooperation of the
7143 * existing firmware in order to halt all existing activities
7144 * but if an invalid mailbox token is passed in we skip that step
7145 * (though we'll still put the adapter microprocessor into RESET in
7148 * On successful return the new firmware will have been loaded and
7149 * the adapter will have been fully RESET losing all previous setup
7150 * state. On unsuccessful return the adapter may be completely hosed ...
7151 * positive errno indicates that the adapter is ~probably~ intact, a
7152 * negative errno indicates that things are looking bad ...
7154 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7155 const u8 *fw_data, unsigned int size, int force)
7157 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7160 if (!t4_fw_matches_chip(adap, fw_hdr))
7163 /* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag
7164 * set wont be sent when we are flashing FW.
7166 adap->flags &= ~CXGB4_FW_OK;
7168 ret = t4_fw_halt(adap, mbox, force);
7169 if (ret < 0 && !force)
7172 ret = t4_load_fw(adap, fw_data, size);
7177 * If there was a Firmware Configuration File stored in FLASH,
7178 * there's a good chance that it won't be compatible with the new
7179 * Firmware. In order to prevent difficult to diagnose adapter
7180 * initialization issues, we clear out the Firmware Configuration File
7181 * portion of the FLASH . The user will need to re-FLASH a new
7182 * Firmware Configuration File which is compatible with the new
7183 * Firmware if that's desired.
7185 (void)t4_load_cfg(adap, NULL, 0);
7188 * Older versions of the firmware don't understand the new
7189 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7190 * restart. So for newly loaded older firmware we'll have to do the
7191 * RESET for it so it starts up on a clean slate. We can tell if
7192 * the newly loaded firmware will handle this right by checking
7193 * its header flags to see if it advertises the capability.
7195 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7196 ret = t4_fw_restart(adap, mbox, reset);
7198 /* Grab potentially new Firmware Device Log parameters so we can see
7199 * how healthy the new Firmware is. It's okay to contact the new
7200 * Firmware for these parameters even though, as far as it's
7201 * concerned, we've never said "HELLO" to it ...
7203 (void)t4_init_devlog_params(adap);
7205 adap->flags |= CXGB4_FW_OK;
7210 * t4_fl_pkt_align - return the fl packet alignment
7211 * @adap: the adapter
7213 * T4 has a single field to specify the packing and padding boundary.
7214 * T5 onwards has separate fields for this and hence the alignment for
7215 * next packet offset is maximum of these two.
7218 int t4_fl_pkt_align(struct adapter *adap)
7220 u32 sge_control, sge_control2;
7221 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7223 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
7225 /* T4 uses a single control field to specify both the PCIe Padding and
7226 * Packing Boundary. T5 introduced the ability to specify these
7227 * separately. The actual Ingress Packet Data alignment boundary
7228 * within Packed Buffer Mode is the maximum of these two
7229 * specifications. (Note that it makes no real practical sense to
7230 * have the Padding Boundary be larger than the Packing Boundary but you
7231 * could set the chip up that way and, in fact, legacy T4 code would
7232 * end doing this because it would initialize the Padding Boundary and
7233 * leave the Packing Boundary initialized to 0 (16 bytes).)
7234 * Padding Boundary values in T6 starts from 8B,
7235 * where as it is 32B for T4 and T5.
7237 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7238 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
7240 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
7242 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
7244 fl_align = ingpadboundary;
7245 if (!is_t4(adap->params.chip)) {
7246 /* T5 has a weird interpretation of one of the PCIe Packing
7247 * Boundary values. No idea why ...
7249 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
7250 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
7251 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
7252 ingpackboundary = 16;
7254 ingpackboundary = 1 << (ingpackboundary +
7255 INGPACKBOUNDARY_SHIFT_X);
7257 fl_align = max(ingpadboundary, ingpackboundary);
7263 * t4_fixup_host_params - fix up host-dependent parameters
7264 * @adap: the adapter
7265 * @page_size: the host's Base Page Size
7266 * @cache_line_size: the host's Cache Line Size
7268 * Various registers in T4 contain values which are dependent on the
7269 * host's Base Page and Cache Line Sizes. This function will fix all of
7270 * those registers with the appropriate values as passed in ...
7272 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7273 unsigned int cache_line_size)
7275 unsigned int page_shift = fls(page_size) - 1;
7276 unsigned int sge_hps = page_shift - 10;
7277 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7278 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7279 unsigned int fl_align_log = fls(fl_align) - 1;
7281 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
7282 HOSTPAGESIZEPF0_V(sge_hps) |
7283 HOSTPAGESIZEPF1_V(sge_hps) |
7284 HOSTPAGESIZEPF2_V(sge_hps) |
7285 HOSTPAGESIZEPF3_V(sge_hps) |
7286 HOSTPAGESIZEPF4_V(sge_hps) |
7287 HOSTPAGESIZEPF5_V(sge_hps) |
7288 HOSTPAGESIZEPF6_V(sge_hps) |
7289 HOSTPAGESIZEPF7_V(sge_hps));
7291 if (is_t4(adap->params.chip)) {
7292 t4_set_reg_field(adap, SGE_CONTROL_A,
7293 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7294 EGRSTATUSPAGESIZE_F,
7295 INGPADBOUNDARY_V(fl_align_log -
7296 INGPADBOUNDARY_SHIFT_X) |
7297 EGRSTATUSPAGESIZE_V(stat_len != 64));
7299 unsigned int pack_align;
7300 unsigned int ingpad, ingpack;
7302 /* T5 introduced the separation of the Free List Padding and
7303 * Packing Boundaries. Thus, we can select a smaller Padding
7304 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7305 * Bandwidth, and use a Packing Boundary which is large enough
7306 * to avoid false sharing between CPUs, etc.
7308 * For the PCI Link, the smaller the Padding Boundary the
7309 * better. For the Memory Controller, a smaller Padding
7310 * Boundary is better until we cross under the Memory Line
7311 * Size (the minimum unit of transfer to/from Memory). If we
7312 * have a Padding Boundary which is smaller than the Memory
7313 * Line Size, that'll involve a Read-Modify-Write cycle on the
7314 * Memory Controller which is never good.
7317 /* We want the Packing Boundary to be based on the Cache Line
7318 * Size in order to help avoid False Sharing performance
7319 * issues between CPUs, etc. We also want the Packing
7320 * Boundary to incorporate the PCI-E Maximum Payload Size. We
7321 * get best performance when the Packing Boundary is a
7322 * multiple of the Maximum Payload Size.
7324 pack_align = fl_align;
7325 if (pci_is_pcie(adap->pdev)) {
7326 unsigned int mps, mps_log;
7329 /* The PCIe Device Control Maximum Payload Size field
7330 * [bits 7:5] encodes sizes as powers of 2 starting at
7333 pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL,
7335 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7337 if (mps > pack_align)
7341 /* N.B. T5/T6 have a crazy special interpretation of the "0"
7342 * value for the Packing Boundary. This corresponds to 16
7343 * bytes instead of the expected 32 bytes. So if we want 32
7344 * bytes, the best we can really do is 64 bytes ...
7346 if (pack_align <= 16) {
7347 ingpack = INGPACKBOUNDARY_16B_X;
7349 } else if (pack_align == 32) {
7350 ingpack = INGPACKBOUNDARY_64B_X;
7353 unsigned int pack_align_log = fls(pack_align) - 1;
7355 ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
7356 fl_align = pack_align;
7359 /* Use the smallest Ingress Padding which isn't smaller than
7360 * the Memory Controller Read/Write Size. We'll take that as
7361 * being 8 bytes since we don't know of any system with a
7362 * wider Memory Controller Bus Width.
7364 if (is_t5(adap->params.chip))
7365 ingpad = INGPADBOUNDARY_32B_X;
7367 ingpad = T6_INGPADBOUNDARY_8B_X;
7369 t4_set_reg_field(adap, SGE_CONTROL_A,
7370 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7371 EGRSTATUSPAGESIZE_F,
7372 INGPADBOUNDARY_V(ingpad) |
7373 EGRSTATUSPAGESIZE_V(stat_len != 64));
7374 t4_set_reg_field(adap, SGE_CONTROL2_A,
7375 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
7376 INGPACKBOUNDARY_V(ingpack));
7379 * Adjust various SGE Free List Host Buffer Sizes.
7381 * This is something of a crock since we're using fixed indices into
7382 * the array which are also known by the sge.c code and the T4
7383 * Firmware Configuration File. We need to come up with a much better
7384 * approach to managing this array. For now, the first four entries
7389 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7390 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7392 * For the single-MTU buffers in unpacked mode we need to include
7393 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7394 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7395 * Padding boundary. All of these are accommodated in the Factory
7396 * Default Firmware Configuration File but we need to adjust it for
7397 * this host's cache line size.
7399 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
7400 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
7401 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
7403 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
7404 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
7407 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
7413 * t4_fw_initialize - ask FW to initialize the device
7414 * @adap: the adapter
7415 * @mbox: mailbox to use for the FW command
7417 * Issues a command to FW to partially initialize the device. This
7418 * performs initialization that generally doesn't depend on user input.
7420 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7422 struct fw_initialize_cmd c;
7424 memset(&c, 0, sizeof(c));
7425 INIT_CMD(c, INITIALIZE, WRITE);
7426 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7430 * t4_query_params_rw - query FW or device parameters
7431 * @adap: the adapter
7432 * @mbox: mailbox to use for the FW command
7435 * @nparams: the number of parameters
7436 * @params: the parameter names
7437 * @val: the parameter values
7438 * @rw: Write and read flag
7439 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
7441 * Reads the value of FW or device parameters. Up to 7 parameters can be
7444 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7445 unsigned int vf, unsigned int nparams, const u32 *params,
7446 u32 *val, int rw, bool sleep_ok)
7449 struct fw_params_cmd c;
7450 __be32 *p = &c.param[0].mnem;
7455 memset(&c, 0, sizeof(c));
7456 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7457 FW_CMD_REQUEST_F | FW_CMD_READ_F |
7458 FW_PARAMS_CMD_PFN_V(pf) |
7459 FW_PARAMS_CMD_VFN_V(vf));
7460 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7462 for (i = 0; i < nparams; i++) {
7463 *p++ = cpu_to_be32(*params++);
7465 *p = cpu_to_be32(*(val + i));
7469 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7471 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7472 *val++ = be32_to_cpu(*p);
7476 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7477 unsigned int vf, unsigned int nparams, const u32 *params,
7480 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7484 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7485 unsigned int vf, unsigned int nparams, const u32 *params,
7488 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7493 * t4_set_params_timeout - sets FW or device parameters
7494 * @adap: the adapter
7495 * @mbox: mailbox to use for the FW command
7498 * @nparams: the number of parameters
7499 * @params: the parameter names
7500 * @val: the parameter values
7501 * @timeout: the timeout time
7503 * Sets the value of FW or device parameters. Up to 7 parameters can be
7504 * specified at once.
7506 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7507 unsigned int pf, unsigned int vf,
7508 unsigned int nparams, const u32 *params,
7509 const u32 *val, int timeout)
7511 struct fw_params_cmd c;
7512 __be32 *p = &c.param[0].mnem;
7517 memset(&c, 0, sizeof(c));
7518 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7519 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7520 FW_PARAMS_CMD_PFN_V(pf) |
7521 FW_PARAMS_CMD_VFN_V(vf));
7522 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7525 *p++ = cpu_to_be32(*params++);
7526 *p++ = cpu_to_be32(*val++);
7529 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7533 * t4_set_params - sets FW or device parameters
7534 * @adap: the adapter
7535 * @mbox: mailbox to use for the FW command
7538 * @nparams: the number of parameters
7539 * @params: the parameter names
7540 * @val: the parameter values
7542 * Sets the value of FW or device parameters. Up to 7 parameters can be
7543 * specified at once.
7545 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7546 unsigned int vf, unsigned int nparams, const u32 *params,
7549 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7550 FW_CMD_MAX_TIMEOUT);
7554 * t4_cfg_pfvf - configure PF/VF resource limits
7555 * @adap: the adapter
7556 * @mbox: mailbox to use for the FW command
7557 * @pf: the PF being configured
7558 * @vf: the VF being configured
7559 * @txq: the max number of egress queues
7560 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7561 * @rxqi: the max number of interrupt-capable ingress queues
7562 * @rxq: the max number of interruptless ingress queues
7563 * @tc: the PCI traffic class
7564 * @vi: the max number of virtual interfaces
7565 * @cmask: the channel access rights mask for the PF/VF
7566 * @pmask: the port access rights mask for the PF/VF
7567 * @nexact: the maximum number of exact MPS filters
7568 * @rcaps: read capabilities
7569 * @wxcaps: write/execute capabilities
7571 * Configures resource limits and capabilities for a physical or virtual
7574 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7575 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7576 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7577 unsigned int vi, unsigned int cmask, unsigned int pmask,
7578 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7580 struct fw_pfvf_cmd c;
7582 memset(&c, 0, sizeof(c));
7583 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
7584 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
7585 FW_PFVF_CMD_VFN_V(vf));
7586 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7587 c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
7588 FW_PFVF_CMD_NIQ_V(rxq));
7589 c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
7590 FW_PFVF_CMD_PMASK_V(pmask) |
7591 FW_PFVF_CMD_NEQ_V(txq));
7592 c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
7593 FW_PFVF_CMD_NVI_V(vi) |
7594 FW_PFVF_CMD_NEXACTF_V(nexact));
7595 c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
7596 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
7597 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
7598 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7602 * t4_alloc_vi - allocate a virtual interface
7603 * @adap: the adapter
7604 * @mbox: mailbox to use for the FW command
7605 * @port: physical port associated with the VI
7606 * @pf: the PF owning the VI
7607 * @vf: the VF owning the VI
7608 * @nmac: number of MAC addresses needed (1 to 5)
7609 * @mac: the MAC addresses of the VI
7610 * @rss_size: size of RSS table slice associated with this VI
7611 * @vivld: the destination to store the VI Valid value.
7612 * @vin: the destination to store the VIN value.
7614 * Allocates a virtual interface for the given physical port. If @mac is
7615 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7616 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7617 * stored consecutively so the space needed is @nmac * 6 bytes.
7618 * Returns a negative error number or the non-negative VI id.
7620 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7621 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7622 unsigned int *rss_size, u8 *vivld, u8 *vin)
7627 memset(&c, 0, sizeof(c));
7628 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
7629 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
7630 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
7631 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
7632 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
7635 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7640 memcpy(mac, c.mac, sizeof(c.mac));
7643 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7646 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7649 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7652 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7656 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
7659 *vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16));
7662 *vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16));
7664 return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
7668 * t4_free_vi - free a virtual interface
7669 * @adap: the adapter
7670 * @mbox: mailbox to use for the FW command
7671 * @pf: the PF owning the VI
7672 * @vf: the VF owning the VI
7673 * @viid: virtual interface identifiler
7675 * Free a previously allocated virtual interface.
7677 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7678 unsigned int vf, unsigned int viid)
7682 memset(&c, 0, sizeof(c));
7683 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
7686 FW_VI_CMD_PFN_V(pf) |
7687 FW_VI_CMD_VFN_V(vf));
7688 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
7689 c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
7691 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7695 * t4_set_rxmode - set Rx properties of a virtual interface
7696 * @adap: the adapter
7697 * @mbox: mailbox to use for the FW command
7699 * @viid_mirror: the mirror VI id
7700 * @mtu: the new MTU or -1
7701 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7702 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7703 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7704 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7705 * @sleep_ok: if true we may sleep while awaiting command completion
7707 * Sets Rx properties of a virtual interface.
7709 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7710 unsigned int viid_mirror, int mtu, int promisc, int all_multi,
7711 int bcast, int vlanex, bool sleep_ok)
7713 struct fw_vi_rxmode_cmd c, c_mirror;
7716 /* convert to FW values */
7718 mtu = FW_RXMODE_MTU_NO_CHG;
7720 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
7722 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
7724 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
7726 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
7728 memset(&c, 0, sizeof(c));
7729 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7730 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7731 FW_VI_RXMODE_CMD_VIID_V(viid));
7732 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7734 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
7735 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
7736 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
7737 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
7738 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
7741 memcpy(&c_mirror, &c, sizeof(c_mirror));
7742 c_mirror.op_to_viid =
7743 cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7744 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7745 FW_VI_RXMODE_CMD_VIID_V(viid_mirror));
7748 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7753 ret = t4_wr_mbox_meat(adap, mbox, &c_mirror, sizeof(c_mirror),
7760 * t4_free_encap_mac_filt - frees MPS entry at given index
7761 * @adap: the adapter
7763 * @idx: index of MPS entry to be freed
7764 * @sleep_ok: call is allowed to sleep
7766 * Frees the MPS entry at supplied index
7768 * Returns a negative error number or zero on success
7770 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
7771 int idx, bool sleep_ok)
7773 struct fw_vi_mac_exact *p;
7774 struct fw_vi_mac_cmd c;
7778 memset(&c, 0, sizeof(c));
7779 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7780 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7782 FW_VI_MAC_CMD_VIID_V(viid));
7783 exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC);
7784 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7788 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7789 FW_VI_MAC_CMD_IDX_V(idx));
7790 eth_zero_addr(p->macaddr);
7791 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7796 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
7797 * @adap: the adapter
7799 * @addr: the MAC address
7801 * @idx: index of the entry in mps tcam
7802 * @lookup_type: MAC address for inner (1) or outer (0) header
7803 * @port_id: the port index
7804 * @sleep_ok: call is allowed to sleep
7806 * Removes the mac entry at the specified index using raw mac interface.
7808 * Returns a negative error number on failure.
7810 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
7811 const u8 *addr, const u8 *mask, unsigned int idx,
7812 u8 lookup_type, u8 port_id, bool sleep_ok)
7814 struct fw_vi_mac_cmd c;
7815 struct fw_vi_mac_raw *p = &c.u.raw;
7818 memset(&c, 0, sizeof(c));
7819 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7820 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7822 FW_VI_MAC_CMD_VIID_V(viid));
7823 val = FW_CMD_LEN16_V(1) |
7824 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7825 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7826 FW_CMD_LEN16_V(val));
7828 p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
7829 FW_VI_MAC_ID_BASED_FREE);
7831 /* Lookup Type. Outer header: 0, Inner header: 1 */
7832 p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7833 DATAPORTNUM_V(port_id));
7834 /* Lookup mask and port mask */
7835 p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7836 DATAPORTNUM_V(DATAPORTNUM_M));
7838 /* Copy the address and the mask */
7839 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7840 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7842 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7846 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
7847 * @adap: the adapter
7849 * @addr: the MAC address
7851 * @vni: the VNI id for the tunnel protocol
7852 * @vni_mask: mask for the VNI id
7853 * @dip_hit: to enable DIP match for the MPS entry
7854 * @lookup_type: MAC address for inner (1) or outer (0) header
7855 * @sleep_ok: call is allowed to sleep
7857 * Allocates an MPS entry with specified MAC address and VNI value.
7859 * Returns a negative error number or the allocated index for this mac.
7861 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
7862 const u8 *addr, const u8 *mask, unsigned int vni,
7863 unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
7866 struct fw_vi_mac_cmd c;
7867 struct fw_vi_mac_vni *p = c.u.exact_vni;
7871 memset(&c, 0, sizeof(c));
7872 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7873 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7874 FW_VI_MAC_CMD_VIID_V(viid));
7875 val = FW_CMD_LEN16_V(1) |
7876 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI);
7877 c.freemacs_to_len16 = cpu_to_be32(val);
7878 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7879 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
7880 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7881 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
7883 p->lookup_type_to_vni =
7884 cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) |
7885 FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) |
7886 FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type));
7887 p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
7888 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7890 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
7895 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
7896 * @adap: the adapter
7898 * @addr: the MAC address
7900 * @idx: index at which to add this entry
7901 * @lookup_type: MAC address for inner (1) or outer (0) header
7902 * @port_id: the port index
7903 * @sleep_ok: call is allowed to sleep
7905 * Adds the mac entry at the specified index using raw mac interface.
7907 * Returns a negative error number or the allocated index for this mac.
7909 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
7910 const u8 *addr, const u8 *mask, unsigned int idx,
7911 u8 lookup_type, u8 port_id, bool sleep_ok)
7914 struct fw_vi_mac_cmd c;
7915 struct fw_vi_mac_raw *p = &c.u.raw;
7918 memset(&c, 0, sizeof(c));
7919 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7920 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7921 FW_VI_MAC_CMD_VIID_V(viid));
7922 val = FW_CMD_LEN16_V(1) |
7923 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7924 c.freemacs_to_len16 = cpu_to_be32(val);
7926 /* Specify that this is an inner mac address */
7927 p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
7929 /* Lookup Type. Outer header: 0, Inner header: 1 */
7930 p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7931 DATAPORTNUM_V(port_id));
7932 /* Lookup mask and port mask */
7933 p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7934 DATAPORTNUM_V(DATAPORTNUM_M));
7936 /* Copy the address and the mask */
7937 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7938 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7940 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7942 ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
7951 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7952 * @adap: the adapter
7953 * @mbox: mailbox to use for the FW command
7955 * @free: if true any existing filters for this VI id are first removed
7956 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7957 * @addr: the MAC address(es)
7958 * @idx: where to store the index of each allocated filter
7959 * @hash: pointer to hash address filter bitmap
7960 * @sleep_ok: call is allowed to sleep
7962 * Allocates an exact-match filter for each of the supplied addresses and
7963 * sets it to the corresponding address. If @idx is not %NULL it should
7964 * have at least @naddr entries, each of which will be set to the index of
7965 * the filter allocated for the corresponding MAC address. If a filter
7966 * could not be allocated for an address its index is set to 0xffff.
7967 * If @hash is not %NULL addresses that fail to allocate an exact filter
7968 * are hashed and update the hash filter bitmap pointed at by @hash.
7970 * Returns a negative error number or the number of filters allocated.
7972 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7973 unsigned int viid, bool free, unsigned int naddr,
7974 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7976 int offset, ret = 0;
7977 struct fw_vi_mac_cmd c;
7978 unsigned int nfilters = 0;
7979 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
7980 unsigned int rem = naddr;
7982 if (naddr > max_naddr)
7985 for (offset = 0; offset < naddr ; /**/) {
7986 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
7987 rem : ARRAY_SIZE(c.u.exact));
7988 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7989 u.exact[fw_naddr]), 16);
7990 struct fw_vi_mac_exact *p;
7993 memset(&c, 0, sizeof(c));
7994 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7997 FW_CMD_EXEC_V(free) |
7998 FW_VI_MAC_CMD_VIID_V(viid));
7999 c.freemacs_to_len16 =
8000 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
8001 FW_CMD_LEN16_V(len16));
8003 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8005 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
8006 FW_VI_MAC_CMD_IDX_V(
8007 FW_VI_MAC_ADD_MAC));
8008 memcpy(p->macaddr, addr[offset + i],
8009 sizeof(p->macaddr));
8012 /* It's okay if we run out of space in our MAC address arena.
8013 * Some of the addresses we submit may get stored so we need
8014 * to run through the reply to see what the results were ...
8016 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8017 if (ret && ret != -FW_ENOMEM)
8020 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8021 u16 index = FW_VI_MAC_CMD_IDX_G(
8022 be16_to_cpu(p->valid_to_idx));
8025 idx[offset + i] = (index >= max_naddr ?
8027 if (index < max_naddr)
8031 hash_mac_addr(addr[offset + i]));
8039 if (ret == 0 || ret == -FW_ENOMEM)
8045 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
8046 * @adap: the adapter
8047 * @mbox: mailbox to use for the FW command
8049 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8050 * @addr: the MAC address(es)
8051 * @sleep_ok: call is allowed to sleep
8053 * Frees the exact-match filter for each of the supplied addresses
8055 * Returns a negative error number or the number of filters freed.
8057 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8058 unsigned int viid, unsigned int naddr,
8059 const u8 **addr, bool sleep_ok)
8061 int offset, ret = 0;
8062 struct fw_vi_mac_cmd c;
8063 unsigned int nfilters = 0;
8064 unsigned int max_naddr = is_t4(adap->params.chip) ?
8065 NUM_MPS_CLS_SRAM_L_INSTANCES :
8066 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8067 unsigned int rem = naddr;
8069 if (naddr > max_naddr)
8072 for (offset = 0; offset < (int)naddr ; /**/) {
8073 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8075 : ARRAY_SIZE(c.u.exact));
8076 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8077 u.exact[fw_naddr]), 16);
8078 struct fw_vi_mac_exact *p;
8081 memset(&c, 0, sizeof(c));
8082 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8086 FW_VI_MAC_CMD_VIID_V(viid));
8087 c.freemacs_to_len16 =
8088 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
8089 FW_CMD_LEN16_V(len16));
8091 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8092 p->valid_to_idx = cpu_to_be16(
8093 FW_VI_MAC_CMD_VALID_F |
8094 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
8095 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8098 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8102 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8103 u16 index = FW_VI_MAC_CMD_IDX_G(
8104 be16_to_cpu(p->valid_to_idx));
8106 if (index < max_naddr)
8120 * t4_change_mac - modifies the exact-match filter for a MAC address
8121 * @adap: the adapter
8122 * @mbox: mailbox to use for the FW command
8124 * @idx: index of existing filter for old value of MAC address, or -1
8125 * @addr: the new MAC address value
8126 * @persist: whether a new MAC allocation should be persistent
8127 * @smt_idx: the destination to store the new SMT index.
8129 * Modifies an exact-match filter and sets it to the new MAC address.
8130 * Note that in general it is not possible to modify the value of a given
8131 * filter so the generic way to modify an address filter is to free the one
8132 * being used by the old address value and allocate a new filter for the
8133 * new address value. @idx can be -1 if the address is a new addition.
8135 * Returns a negative error number or the index of the filter with the new
8138 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8139 int idx, const u8 *addr, bool persist, u8 *smt_idx)
8142 struct fw_vi_mac_cmd c;
8143 struct fw_vi_mac_exact *p = c.u.exact;
8144 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
8146 if (idx < 0) /* new allocation */
8147 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
8148 mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
8150 memset(&c, 0, sizeof(c));
8151 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8152 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
8153 FW_VI_MAC_CMD_VIID_V(viid));
8154 c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
8155 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
8156 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
8157 FW_VI_MAC_CMD_IDX_V(idx));
8158 memcpy(p->macaddr, addr, sizeof(p->macaddr));
8160 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8162 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
8163 if (ret >= max_mac_addr)
8166 if (adap->params.viid_smt_extn_support) {
8167 *smt_idx = FW_VI_MAC_CMD_SMTID_G
8168 (be32_to_cpu(c.op_to_viid));
8170 /* In T4/T5, SMT contains 256 SMAC entries
8171 * organized in 128 rows of 2 entries each.
8172 * In T6, SMT contains 256 SMAC entries in
8175 if (CHELSIO_CHIP_VERSION(adap->params.chip) <=
8177 *smt_idx = (viid & FW_VIID_VIN_M) << 1;
8179 *smt_idx = (viid & FW_VIID_VIN_M);
8187 * t4_set_addr_hash - program the MAC inexact-match hash filter
8188 * @adap: the adapter
8189 * @mbox: mailbox to use for the FW command
8191 * @ucast: whether the hash filter should also match unicast addresses
8192 * @vec: the value to be written to the hash filter
8193 * @sleep_ok: call is allowed to sleep
8195 * Sets the 64-bit inexact-match hash filter for a virtual interface.
8197 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8198 bool ucast, u64 vec, bool sleep_ok)
8200 struct fw_vi_mac_cmd c;
8202 memset(&c, 0, sizeof(c));
8203 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8204 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
8205 FW_VI_ENABLE_CMD_VIID_V(viid));
8206 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
8207 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
8209 c.u.hash.hashvec = cpu_to_be64(vec);
8210 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8214 * t4_enable_vi_params - enable/disable a virtual interface
8215 * @adap: the adapter
8216 * @mbox: mailbox to use for the FW command
8218 * @rx_en: 1=enable Rx, 0=disable Rx
8219 * @tx_en: 1=enable Tx, 0=disable Tx
8220 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8222 * Enables/disables a virtual interface. Note that setting DCB Enable
8223 * only makes sense when enabling a Virtual Interface ...
8225 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8226 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8228 struct fw_vi_enable_cmd c;
8230 memset(&c, 0, sizeof(c));
8231 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8232 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8233 FW_VI_ENABLE_CMD_VIID_V(viid));
8234 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
8235 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
8236 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
8238 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8242 * t4_enable_vi - enable/disable a virtual interface
8243 * @adap: the adapter
8244 * @mbox: mailbox to use for the FW command
8246 * @rx_en: 1=enable Rx, 0=disable Rx
8247 * @tx_en: 1=enable Tx, 0=disable Tx
8249 * Enables/disables a virtual interface.
8251 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8252 bool rx_en, bool tx_en)
8254 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8258 * t4_enable_pi_params - enable/disable a Port's Virtual Interface
8259 * @adap: the adapter
8260 * @mbox: mailbox to use for the FW command
8261 * @pi: the Port Information structure
8262 * @rx_en: 1=enable Rx, 0=disable Rx
8263 * @tx_en: 1=enable Tx, 0=disable Tx
8264 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8266 * Enables/disables a Port's Virtual Interface. Note that setting DCB
8267 * Enable only makes sense when enabling a Virtual Interface ...
8268 * If the Virtual Interface enable/disable operation is successful,
8269 * we notify the OS-specific code of a potential Link Status change
8270 * via the OS Contract API t4_os_link_changed().
8272 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
8273 struct port_info *pi,
8274 bool rx_en, bool tx_en, bool dcb_en)
8276 int ret = t4_enable_vi_params(adap, mbox, pi->viid,
8277 rx_en, tx_en, dcb_en);
8280 t4_os_link_changed(adap, pi->port_id,
8281 rx_en && tx_en && pi->link_cfg.link_ok);
8286 * t4_identify_port - identify a VI's port by blinking its LED
8287 * @adap: the adapter
8288 * @mbox: mailbox to use for the FW command
8290 * @nblinks: how many times to blink LED at 2.5 Hz
8292 * Identifies a VI's port by blinking its LED.
8294 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8295 unsigned int nblinks)
8297 struct fw_vi_enable_cmd c;
8299 memset(&c, 0, sizeof(c));
8300 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8301 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8302 FW_VI_ENABLE_CMD_VIID_V(viid));
8303 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
8304 c.blinkdur = cpu_to_be16(nblinks);
8305 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8309 * t4_iq_stop - stop an ingress queue and its FLs
8310 * @adap: the adapter
8311 * @mbox: mailbox to use for the FW command
8312 * @pf: the PF owning the queues
8313 * @vf: the VF owning the queues
8314 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8315 * @iqid: ingress queue id
8316 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8317 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8319 * Stops an ingress queue and its associated FLs, if any. This causes
8320 * any current or future data/messages destined for these queues to be
8323 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8324 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8325 unsigned int fl0id, unsigned int fl1id)
8329 memset(&c, 0, sizeof(c));
8330 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8331 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8332 FW_IQ_CMD_VFN_V(vf));
8333 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
8334 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8335 c.iqid = cpu_to_be16(iqid);
8336 c.fl0id = cpu_to_be16(fl0id);
8337 c.fl1id = cpu_to_be16(fl1id);
8338 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8342 * t4_iq_free - free an ingress queue and its FLs
8343 * @adap: the adapter
8344 * @mbox: mailbox to use for the FW command
8345 * @pf: the PF owning the queues
8346 * @vf: the VF owning the queues
8347 * @iqtype: the ingress queue type
8348 * @iqid: ingress queue id
8349 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8350 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8352 * Frees an ingress queue and its associated FLs, if any.
8354 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8355 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8356 unsigned int fl0id, unsigned int fl1id)
8360 memset(&c, 0, sizeof(c));
8361 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8362 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8363 FW_IQ_CMD_VFN_V(vf));
8364 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
8365 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8366 c.iqid = cpu_to_be16(iqid);
8367 c.fl0id = cpu_to_be16(fl0id);
8368 c.fl1id = cpu_to_be16(fl1id);
8369 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8373 * t4_eth_eq_free - free an Ethernet egress queue
8374 * @adap: the adapter
8375 * @mbox: mailbox to use for the FW command
8376 * @pf: the PF owning the queue
8377 * @vf: the VF owning the queue
8378 * @eqid: egress queue id
8380 * Frees an Ethernet egress queue.
8382 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8383 unsigned int vf, unsigned int eqid)
8385 struct fw_eq_eth_cmd c;
8387 memset(&c, 0, sizeof(c));
8388 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
8389 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8390 FW_EQ_ETH_CMD_PFN_V(pf) |
8391 FW_EQ_ETH_CMD_VFN_V(vf));
8392 c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
8393 c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
8394 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8398 * t4_ctrl_eq_free - free a control egress queue
8399 * @adap: the adapter
8400 * @mbox: mailbox to use for the FW command
8401 * @pf: the PF owning the queue
8402 * @vf: the VF owning the queue
8403 * @eqid: egress queue id
8405 * Frees a control egress queue.
8407 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8408 unsigned int vf, unsigned int eqid)
8410 struct fw_eq_ctrl_cmd c;
8412 memset(&c, 0, sizeof(c));
8413 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
8414 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8415 FW_EQ_CTRL_CMD_PFN_V(pf) |
8416 FW_EQ_CTRL_CMD_VFN_V(vf));
8417 c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
8418 c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
8419 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8423 * t4_ofld_eq_free - free an offload egress queue
8424 * @adap: the adapter
8425 * @mbox: mailbox to use for the FW command
8426 * @pf: the PF owning the queue
8427 * @vf: the VF owning the queue
8428 * @eqid: egress queue id
8430 * Frees a control egress queue.
8432 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8433 unsigned int vf, unsigned int eqid)
8435 struct fw_eq_ofld_cmd c;
8437 memset(&c, 0, sizeof(c));
8438 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
8439 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8440 FW_EQ_OFLD_CMD_PFN_V(pf) |
8441 FW_EQ_OFLD_CMD_VFN_V(vf));
8442 c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
8443 c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
8444 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8448 * t4_link_down_rc_str - return a string for a Link Down Reason Code
8449 * @link_down_rc: Link Down Reason Code
8451 * Returns a string representation of the Link Down Reason Code.
8453 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
8455 static const char * const reason[] = {
8458 "Auto-negotiation Failure",
8460 "Insufficient Airflow",
8461 "Unable To Determine Reason",
8462 "No RX Signal Detected",
8466 if (link_down_rc >= ARRAY_SIZE(reason))
8467 return "Bad Reason Code";
8469 return reason[link_down_rc];
8472 /* Return the highest speed set in the port capabilities, in Mb/s. */
8473 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
8475 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8477 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8481 TEST_SPEED_RETURN(400G, 400000);
8482 TEST_SPEED_RETURN(200G, 200000);
8483 TEST_SPEED_RETURN(100G, 100000);
8484 TEST_SPEED_RETURN(50G, 50000);
8485 TEST_SPEED_RETURN(40G, 40000);
8486 TEST_SPEED_RETURN(25G, 25000);
8487 TEST_SPEED_RETURN(10G, 10000);
8488 TEST_SPEED_RETURN(1G, 1000);
8489 TEST_SPEED_RETURN(100M, 100);
8491 #undef TEST_SPEED_RETURN
8497 * fwcap_to_fwspeed - return highest speed in Port Capabilities
8498 * @acaps: advertised Port Capabilities
8500 * Get the highest speed for the port from the advertised Port
8501 * Capabilities. It will be either the highest speed from the list of
8502 * speeds or whatever user has set using ethtool.
8504 static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
8506 #define TEST_SPEED_RETURN(__caps_speed) \
8508 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8509 return FW_PORT_CAP32_SPEED_##__caps_speed; \
8512 TEST_SPEED_RETURN(400G);
8513 TEST_SPEED_RETURN(200G);
8514 TEST_SPEED_RETURN(100G);
8515 TEST_SPEED_RETURN(50G);
8516 TEST_SPEED_RETURN(40G);
8517 TEST_SPEED_RETURN(25G);
8518 TEST_SPEED_RETURN(10G);
8519 TEST_SPEED_RETURN(1G);
8520 TEST_SPEED_RETURN(100M);
8522 #undef TEST_SPEED_RETURN
8528 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
8529 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
8531 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8532 * 32-bit Port Capabilities value.
8534 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
8536 fw_port_cap32_t linkattr = 0;
8538 /* Unfortunately the format of the Link Status in the old
8539 * 16-bit Port Information message isn't the same as the
8540 * 16-bit Port Capabilities bitfield used everywhere else ...
8542 if (lstatus & FW_PORT_CMD_RXPAUSE_F)
8543 linkattr |= FW_PORT_CAP32_FC_RX;
8544 if (lstatus & FW_PORT_CMD_TXPAUSE_F)
8545 linkattr |= FW_PORT_CAP32_FC_TX;
8546 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
8547 linkattr |= FW_PORT_CAP32_SPEED_100M;
8548 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
8549 linkattr |= FW_PORT_CAP32_SPEED_1G;
8550 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
8551 linkattr |= FW_PORT_CAP32_SPEED_10G;
8552 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
8553 linkattr |= FW_PORT_CAP32_SPEED_25G;
8554 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
8555 linkattr |= FW_PORT_CAP32_SPEED_40G;
8556 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
8557 linkattr |= FW_PORT_CAP32_SPEED_100G;
8563 * t4_handle_get_port_info - process a FW reply message
8564 * @pi: the port info
8565 * @rpl: start of the FW message
8567 * Processes a GET_PORT_INFO FW reply message.
8569 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8571 const struct fw_port_cmd *cmd = (const void *)rpl;
8572 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
8573 struct link_config *lc = &pi->link_cfg;
8574 struct adapter *adapter = pi->adapter;
8575 unsigned int speed, fc, fec, adv_fc;
8576 enum fw_port_module_type mod_type;
8577 int action, link_ok, linkdnrc;
8578 enum fw_port_type port_type;
8580 /* Extract the various fields from the Port Information message.
8582 action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
8584 case FW_PORT_ACTION_GET_PORT_INFO: {
8585 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
8587 link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
8588 linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
8589 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
8590 mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
8591 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
8592 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
8593 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
8594 linkattr = lstatus_to_fwcap(lstatus);
8598 case FW_PORT_ACTION_GET_PORT_INFO32: {
8601 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
8602 link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
8603 linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
8604 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
8605 mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
8606 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
8607 acaps = be32_to_cpu(cmd->u.info32.acaps32);
8608 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
8609 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
8614 dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
8615 be32_to_cpu(cmd->action_to_len16));
8619 fec = fwcap_to_cc_fec(acaps);
8620 adv_fc = fwcap_to_cc_pause(acaps);
8621 fc = fwcap_to_cc_pause(linkattr);
8622 speed = fwcap_to_speed(linkattr);
8624 /* Reset state for communicating new Transceiver Module status and
8625 * whether the OS-dependent layer wants us to redo the current
8626 * "sticky" L1 Configure Link Parameters.
8628 lc->new_module = false;
8629 lc->redo_l1cfg = false;
8631 if (mod_type != pi->mod_type) {
8632 /* With the newer SFP28 and QSFP28 Transceiver Module Types,
8633 * various fundamental Port Capabilities which used to be
8634 * immutable can now change radically. We can now have
8635 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
8636 * all change based on what Transceiver Module is inserted.
8637 * So we need to record the Physical "Port" Capabilities on
8638 * every Transceiver Module change.
8642 /* When a new Transceiver Module is inserted, the Firmware
8643 * will examine its i2c EPROM to determine its type and
8644 * general operating parameters including things like Forward
8645 * Error Control, etc. Various IEEE 802.3 standards dictate
8646 * how to interpret these i2c values to determine default
8647 * "sutomatic" settings. We record these for future use when
8648 * the user explicitly requests these standards-based values.
8650 lc->def_acaps = acaps;
8652 /* Some versions of the early T6 Firmware "cheated" when
8653 * handling different Transceiver Modules by changing the
8654 * underlaying Port Type reported to the Host Drivers. As
8655 * such we need to capture whatever Port Type the Firmware
8656 * sends us and record it in case it's different from what we
8657 * were told earlier. Unfortunately, since Firmware is
8658 * forever, we'll need to keep this code here forever, but in
8659 * later T6 Firmware it should just be an assignment of the
8660 * same value already recorded.
8662 pi->port_type = port_type;
8664 /* Record new Module Type information.
8666 pi->mod_type = mod_type;
8668 /* Let the OS-dependent layer know if we have a new
8669 * Transceiver Module inserted.
8671 lc->new_module = t4_is_inserted_mod_type(mod_type);
8673 t4_os_portmod_changed(adapter, pi->port_id);
8676 if (link_ok != lc->link_ok || speed != lc->speed ||
8677 fc != lc->fc || adv_fc != lc->advertised_fc ||
8679 /* something changed */
8680 if (!link_ok && lc->link_ok) {
8681 lc->link_down_rc = linkdnrc;
8682 dev_warn_ratelimited(adapter->pdev_dev,
8683 "Port %d link down, reason: %s\n",
8685 t4_link_down_rc_str(linkdnrc));
8687 lc->link_ok = link_ok;
8689 lc->advertised_fc = adv_fc;
8693 lc->lpacaps = lpacaps;
8694 lc->acaps = acaps & ADVERT_MASK;
8696 /* If we're not physically capable of Auto-Negotiation, note
8697 * this as Auto-Negotiation disabled. Otherwise, we track
8698 * what Auto-Negotiation settings we have. Note parallel
8699 * structure in t4_link_l1cfg_core() and init_link_config().
8701 if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
8702 lc->autoneg = AUTONEG_DISABLE;
8703 } else if (lc->acaps & FW_PORT_CAP32_ANEG) {
8704 lc->autoneg = AUTONEG_ENABLE;
8706 /* When Autoneg is disabled, user needs to set
8708 * Similar to cxgb4_ethtool.c: set_link_ksettings
8711 lc->speed_caps = fwcap_to_fwspeed(acaps);
8712 lc->autoneg = AUTONEG_DISABLE;
8715 t4_os_link_changed(adapter, pi->port_id, link_ok);
8718 /* If we have a new Transceiver Module and the OS-dependent code has
8719 * told us that it wants us to redo whatever "sticky" L1 Configuration
8720 * Link Parameters are set, do that now.
8722 if (lc->new_module && lc->redo_l1cfg) {
8723 struct link_config old_lc;
8726 /* Save the current L1 Configuration and restore it if an
8727 * error occurs. We probably should fix the l1_cfg*()
8728 * routines not to change the link_config when an error
8732 ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc);
8735 dev_warn(adapter->pdev_dev,
8736 "Attempt to update new Transceiver Module settings failed\n");
8739 lc->new_module = false;
8740 lc->redo_l1cfg = false;
8744 * t4_update_port_info - retrieve and update port information if changed
8745 * @pi: the port_info
8747 * We issue a Get Port Information Command to the Firmware and, if
8748 * successful, we check to see if anything is different from what we
8749 * last recorded and update things accordingly.
8751 int t4_update_port_info(struct port_info *pi)
8753 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8754 struct fw_port_cmd port_cmd;
8757 memset(&port_cmd, 0, sizeof(port_cmd));
8758 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8759 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8760 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8761 port_cmd.action_to_len16 = cpu_to_be32(
8762 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
8763 ? FW_PORT_ACTION_GET_PORT_INFO
8764 : FW_PORT_ACTION_GET_PORT_INFO32) |
8765 FW_LEN16(port_cmd));
8766 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8767 &port_cmd, sizeof(port_cmd), &port_cmd);
8771 t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8776 * t4_get_link_params - retrieve basic link parameters for given port
8778 * @link_okp: value return pointer for link up/down
8779 * @speedp: value return pointer for speed (Mb/s)
8780 * @mtup: value return pointer for mtu
8782 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
8783 * and MTU for a specified port. A negative error is returned on
8784 * failure; 0 on success.
8786 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
8787 unsigned int *speedp, unsigned int *mtup)
8789 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8790 unsigned int action, link_ok, mtu;
8791 struct fw_port_cmd port_cmd;
8792 fw_port_cap32_t linkattr;
8795 memset(&port_cmd, 0, sizeof(port_cmd));
8796 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8797 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8798 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8799 action = (fw_caps == FW_CAPS16
8800 ? FW_PORT_ACTION_GET_PORT_INFO
8801 : FW_PORT_ACTION_GET_PORT_INFO32);
8802 port_cmd.action_to_len16 = cpu_to_be32(
8803 FW_PORT_CMD_ACTION_V(action) |
8804 FW_LEN16(port_cmd));
8805 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8806 &port_cmd, sizeof(port_cmd), &port_cmd);
8810 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8811 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
8813 link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
8814 linkattr = lstatus_to_fwcap(lstatus);
8815 mtu = be16_to_cpu(port_cmd.u.info.mtu);
8818 be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
8820 link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
8821 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
8822 mtu = FW_PORT_CMD_MTU32_G(
8823 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
8827 *link_okp = link_ok;
8829 *speedp = fwcap_to_speed(linkattr);
8837 * t4_handle_fw_rpl - process a FW reply message
8838 * @adap: the adapter
8839 * @rpl: start of the FW message
8841 * Processes a FW message, such as link state change messages.
8843 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8845 u8 opcode = *(const u8 *)rpl;
8847 /* This might be a port command ... this simplifies the following
8848 * conditionals ... We can get away with pre-dereferencing
8849 * action_to_len16 because it's in the first 16 bytes and all messages
8850 * will be at least that long.
8852 const struct fw_port_cmd *p = (const void *)rpl;
8853 unsigned int action =
8854 FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
8856 if (opcode == FW_PORT_CMD &&
8857 (action == FW_PORT_ACTION_GET_PORT_INFO ||
8858 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8860 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
8861 struct port_info *pi = NULL;
8863 for_each_port(adap, i) {
8864 pi = adap2pinfo(adap, i);
8865 if (pi->tx_chan == chan)
8869 t4_handle_get_port_info(pi, rpl);
8871 dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
8878 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
8882 if (pci_is_pcie(adapter->pdev)) {
8883 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
8884 p->speed = val & PCI_EXP_LNKSTA_CLS;
8885 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8890 * init_link_config - initialize a link's SW state
8891 * @lc: pointer to structure holding the link state
8892 * @pcaps: link Port Capabilities
8893 * @acaps: link current Advertised Port Capabilities
8895 * Initializes the SW state maintained for each link, including the link's
8896 * capabilities and default speed/flow-control/autonegotiation settings.
8898 static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
8899 fw_port_cap32_t acaps)
8902 lc->def_acaps = acaps;
8906 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8908 /* For Forward Error Control, we default to whatever the Firmware
8909 * tells us the Link is currently advertising.
8911 lc->requested_fec = FEC_AUTO;
8912 lc->fec = fwcap_to_cc_fec(lc->def_acaps);
8914 /* If the Port is capable of Auto-Negtotiation, initialize it as
8915 * "enabled" and copy over all of the Physical Port Capabilities
8916 * to the Advertised Port Capabilities. Otherwise mark it as
8917 * Auto-Negotiate disabled and select the highest supported speed
8918 * for the link. Note parallel structure in t4_link_l1cfg_core()
8919 * and t4_handle_get_port_info().
8921 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
8922 lc->acaps = lc->pcaps & ADVERT_MASK;
8923 lc->autoneg = AUTONEG_ENABLE;
8924 lc->requested_fc |= PAUSE_AUTONEG;
8927 lc->autoneg = AUTONEG_DISABLE;
8928 lc->speed_caps = fwcap_to_fwspeed(acaps);
8932 #define CIM_PF_NOACCESS 0xeeeeeeee
8934 int t4_wait_dev_ready(void __iomem *regs)
8938 whoami = readl(regs + PL_WHOAMI_A);
8939 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
8943 whoami = readl(regs + PL_WHOAMI_A);
8944 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
8948 u32 vendor_and_model_id;
8952 static int t4_get_flash_params(struct adapter *adap)
8954 /* Table for non-Numonix supported flash parts. Numonix parts are left
8955 * to the preexisting code. All flash parts have 64KB sectors.
8957 static struct flash_desc supported_flash[] = {
8958 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8961 unsigned int part, manufacturer;
8962 unsigned int density, size = 0;
8966 /* Issue a Read ID Command to the Flash part. We decode supported
8967 * Flash parts and their sizes from this. There's a newer Query
8968 * Command which can retrieve detailed geometry information but many
8969 * Flash parts don't support it.
8972 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
8974 ret = sf1_read(adap, 3, 0, 1, &flashid);
8975 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
8979 /* Check to see if it's one of our non-standard supported Flash parts.
8981 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8982 if (supported_flash[part].vendor_and_model_id == flashid) {
8983 adap->params.sf_size = supported_flash[part].size_mb;
8984 adap->params.sf_nsec =
8985 adap->params.sf_size / SF_SEC_SIZE;
8989 /* Decode Flash part size. The code below looks repetitive with
8990 * common encodings, but that's not guaranteed in the JEDEC
8991 * specification for the Read JEDEC ID command. The only thing that
8992 * we're guaranteed by the JEDEC specification is where the
8993 * Manufacturer ID is in the returned result. After that each
8994 * Manufacturer ~could~ encode things completely differently.
8995 * Note, all Flash parts must have 64KB sectors.
8997 manufacturer = flashid & 0xff;
8998 switch (manufacturer) {
8999 case 0x20: { /* Micron/Numonix */
9000 /* This Density -> Size decoding table is taken from Micron
9003 density = (flashid >> 16) & 0xff;
9005 case 0x14: /* 1MB */
9008 case 0x15: /* 2MB */
9011 case 0x16: /* 4MB */
9014 case 0x17: /* 8MB */
9017 case 0x18: /* 16MB */
9020 case 0x19: /* 32MB */
9023 case 0x20: /* 64MB */
9026 case 0x21: /* 128MB */
9029 case 0x22: /* 256MB */
9035 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
9036 /* This Density -> Size decoding table is taken from ISSI
9039 density = (flashid >> 16) & 0xff;
9041 case 0x16: /* 32 MB */
9044 case 0x17: /* 64MB */
9050 case 0xc2: { /* Macronix */
9051 /* This Density -> Size decoding table is taken from Macronix
9054 density = (flashid >> 16) & 0xff;
9056 case 0x17: /* 8MB */
9059 case 0x18: /* 16MB */
9065 case 0xef: { /* Winbond */
9066 /* This Density -> Size decoding table is taken from Winbond
9069 density = (flashid >> 16) & 0xff;
9071 case 0x17: /* 8MB */
9074 case 0x18: /* 16MB */
9082 /* If we didn't recognize the FLASH part, that's no real issue: the
9083 * Hardware/Software contract says that Hardware will _*ALWAYS*_
9084 * use a FLASH part which is at least 4MB in size and has 64KB
9085 * sectors. The unrecognized FLASH part is likely to be much larger
9086 * than 4MB, but that's all we really need.
9089 dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
9094 /* Store decoded Flash size and fall through into vetting code. */
9095 adap->params.sf_size = size;
9096 adap->params.sf_nsec = size / SF_SEC_SIZE;
9099 if (adap->params.sf_size < FLASH_MIN_SIZE)
9100 dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
9101 flashid, adap->params.sf_size, FLASH_MIN_SIZE);
9106 * t4_prep_adapter - prepare SW and HW for operation
9107 * @adapter: the adapter
9109 * Initialize adapter SW state for the various HW modules, set initial
9110 * values for some adapter tunables, take PHYs out of reset, and
9111 * initialize the MDIO interface.
9113 int t4_prep_adapter(struct adapter *adapter)
9119 get_pci_mode(adapter, &adapter->params.pci);
9120 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
9122 ret = t4_get_flash_params(adapter);
9124 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
9128 /* Retrieve adapter's device ID
9130 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
9131 ver = device_id >> 12;
9132 adapter->params.chip = 0;
9135 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
9136 adapter->params.arch.sge_fl_db = DBPRIO_F;
9137 adapter->params.arch.mps_tcam_size =
9138 NUM_MPS_CLS_SRAM_L_INSTANCES;
9139 adapter->params.arch.mps_rplc_size = 128;
9140 adapter->params.arch.nchan = NCHAN;
9141 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9142 adapter->params.arch.vfcount = 128;
9143 /* Congestion map is for 4 channels so that
9144 * MPS can have 4 priority per port.
9146 adapter->params.arch.cng_ch_bits_log = 2;
9149 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
9150 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
9151 adapter->params.arch.mps_tcam_size =
9152 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9153 adapter->params.arch.mps_rplc_size = 128;
9154 adapter->params.arch.nchan = NCHAN;
9155 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9156 adapter->params.arch.vfcount = 128;
9157 adapter->params.arch.cng_ch_bits_log = 2;
9160 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
9161 adapter->params.arch.sge_fl_db = 0;
9162 adapter->params.arch.mps_tcam_size =
9163 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9164 adapter->params.arch.mps_rplc_size = 256;
9165 adapter->params.arch.nchan = 2;
9166 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
9167 adapter->params.arch.vfcount = 256;
9168 /* Congestion map will be for 2 channels so that
9169 * MPS can have 8 priority per port.
9171 adapter->params.arch.cng_ch_bits_log = 3;
9174 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
9179 adapter->params.cim_la_size = CIMLA_SIZE;
9180 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
9183 * Default port for debugging in case we can't reach FW.
9185 adapter->params.nports = 1;
9186 adapter->params.portvec = 1;
9187 adapter->params.vpd.cclk = 50000;
9189 /* Set PCIe completion timeout to 4 seconds. */
9190 pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
9191 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
9196 * t4_shutdown_adapter - shut down adapter, host & wire
9197 * @adapter: the adapter
9199 * Perform an emergency shutdown of the adapter and stop it from
9200 * continuing any further communication on the ports or DMA to the
9201 * host. This is typically used when the adapter and/or firmware
9202 * have crashed and we want to prevent any further accidental
9203 * communication with the rest of the world. This will also force
9204 * the port Link Status to go down -- if register writes work --
9205 * which should help our peers figure out that we're down.
9207 int t4_shutdown_adapter(struct adapter *adapter)
9211 t4_intr_disable(adapter);
9212 t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
9213 for_each_port(adapter, port) {
9214 u32 a_port_cfg = is_t4(adapter->params.chip) ?
9215 PORT_REG(port, XGMAC_PORT_CFG_A) :
9216 T5_PORT_REG(port, MAC_PORT_CFG_A);
9218 t4_write_reg(adapter, a_port_cfg,
9219 t4_read_reg(adapter, a_port_cfg)
9220 & ~SIGNAL_DET_V(1));
9222 t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
9228 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
9229 * @adapter: the adapter
9230 * @qid: the Queue ID
9231 * @qtype: the Ingress or Egress type for @qid
9232 * @user: true if this request is for a user mode queue
9233 * @pbar2_qoffset: BAR2 Queue Offset
9234 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
9236 * Returns the BAR2 SGE Queue Registers information associated with the
9237 * indicated Absolute Queue ID. These are passed back in return value
9238 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
9239 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
9241 * This may return an error which indicates that BAR2 SGE Queue
9242 * registers aren't available. If an error is not returned, then the
9243 * following values are returned:
9245 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
9246 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
9248 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
9249 * require the "Inferred Queue ID" ability may be used. E.g. the
9250 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
9251 * then these "Inferred Queue ID" register may not be used.
9253 int t4_bar2_sge_qregs(struct adapter *adapter,
9255 enum t4_bar2_qtype qtype,
9258 unsigned int *pbar2_qid)
9260 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
9261 u64 bar2_page_offset, bar2_qoffset;
9262 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
9264 /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
9265 if (!user && is_t4(adapter->params.chip))
9268 /* Get our SGE Page Size parameters.
9270 page_shift = adapter->params.sge.hps + 10;
9271 page_size = 1 << page_shift;
9273 /* Get the right Queues per Page parameters for our Queue.
9275 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9276 ? adapter->params.sge.eq_qpp
9277 : adapter->params.sge.iq_qpp);
9278 qpp_mask = (1 << qpp_shift) - 1;
9280 /* Calculate the basics of the BAR2 SGE Queue register area:
9281 * o The BAR2 page the Queue registers will be in.
9282 * o The BAR2 Queue ID.
9283 * o The BAR2 Queue ID Offset into the BAR2 page.
9285 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9286 bar2_qid = qid & qpp_mask;
9287 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9289 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
9290 * hardware will infer the Absolute Queue ID simply from the writes to
9291 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9292 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
9293 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9294 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9295 * from the BAR2 Page and BAR2 Queue ID.
9297 * One important censequence of this is that some BAR2 SGE registers
9298 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9299 * there. But other registers synthesize the SGE Queue ID purely
9300 * from the writes to the registers -- the Write Combined Doorbell
9301 * Buffer is a good example. These BAR2 SGE Registers are only
9302 * available for those BAR2 SGE Register areas where the SGE Absolute
9303 * Queue ID can be inferred from simple writes.
9305 bar2_qoffset = bar2_page_offset;
9306 bar2_qinferred = (bar2_qid_offset < page_size);
9307 if (bar2_qinferred) {
9308 bar2_qoffset += bar2_qid_offset;
9312 *pbar2_qoffset = bar2_qoffset;
9313 *pbar2_qid = bar2_qid;
9318 * t4_init_devlog_params - initialize adapter->params.devlog
9319 * @adap: the adapter
9321 * Initialize various fields of the adapter's Firmware Device Log
9322 * Parameters structure.
9324 int t4_init_devlog_params(struct adapter *adap)
9326 struct devlog_params *dparams = &adap->params.devlog;
9328 unsigned int devlog_meminfo;
9329 struct fw_devlog_cmd devlog_cmd;
9332 /* If we're dealing with newer firmware, the Device Log Parameters
9333 * are stored in a designated register which allows us to access the
9334 * Device Log even if we can't talk to the firmware.
9337 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
9339 unsigned int nentries, nentries128;
9341 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
9342 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
9344 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
9345 nentries = (nentries128 + 1) * 128;
9346 dparams->size = nentries * sizeof(struct fw_devlog_e);
9351 /* Otherwise, ask the firmware for it's Device Log Parameters.
9353 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9354 devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
9355 FW_CMD_REQUEST_F | FW_CMD_READ_F);
9356 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9357 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9363 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9364 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
9365 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
9366 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9372 * t4_init_sge_params - initialize adap->params.sge
9373 * @adapter: the adapter
9375 * Initialize various fields of the adapter's SGE Parameters structure.
9377 int t4_init_sge_params(struct adapter *adapter)
9379 struct sge_params *sge_params = &adapter->params.sge;
9381 unsigned int s_hps, s_qpp;
9383 /* Extract the SGE Page Size for our PF.
9385 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
9386 s_hps = (HOSTPAGESIZEPF0_S +
9387 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
9388 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
9390 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
9392 s_qpp = (QUEUESPERPAGEPF0_S +
9393 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
9394 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
9395 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9396 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
9397 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9403 * t4_init_tp_params - initialize adap->params.tp
9404 * @adap: the adapter
9405 * @sleep_ok: if true we may sleep while awaiting command completion
9407 * Initialize various fields of the adapter's TP Parameters structure.
9409 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9415 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
9416 adap->params.tp.tre = TIMERRESOLUTION_G(v);
9417 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
9419 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9420 for (chan = 0; chan < NCHAN; chan++)
9421 adap->params.tp.tx_modq[chan] = chan;
9423 /* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
9426 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
9427 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FILTER) |
9428 FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_FILTER_MODE_MASK));
9430 /* Read current value */
9431 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
9434 dev_info(adap->pdev_dev,
9435 "Current filter mode/mask 0x%x:0x%x\n",
9436 FW_PARAMS_PARAM_FILTER_MODE_G(val),
9437 FW_PARAMS_PARAM_FILTER_MASK_G(val));
9438 adap->params.tp.vlan_pri_map =
9439 FW_PARAMS_PARAM_FILTER_MODE_G(val);
9440 adap->params.tp.filter_mask =
9441 FW_PARAMS_PARAM_FILTER_MASK_G(val);
9443 dev_info(adap->pdev_dev,
9444 "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
9446 /* Incase of older-fw (which doesn't expose the api
9447 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
9448 * the fw api) combination, fall-back to older method of reading
9449 * the filter mode from indirect-register
9451 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
9452 TP_VLAN_PRI_MAP_A, sleep_ok);
9454 /* With the older-fw and newer-driver combination we might run
9455 * into an issue when user wants to use hash filter region but
9456 * the filter_mask is zero, in this case filter_mask validation
9457 * is tough. To avoid that we set the filter_mask same as filter
9458 * mode, which will behave exactly as the older way of ignoring
9459 * the filter mask validation.
9461 adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
9464 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
9465 TP_INGRESS_CONFIG_A, sleep_ok);
9467 /* For T6, cache the adapter's compressed error vector
9468 * and passing outer header info for encapsulated packets.
9470 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
9471 v = t4_read_reg(adap, TP_OUT_CONFIG_A);
9472 adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
9475 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9476 * shift positions of several elements of the Compressed Filter Tuple
9477 * for this adapter which we need frequently ...
9479 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
9480 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
9481 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
9482 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
9483 adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
9484 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
9486 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
9488 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
9490 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
9492 adap->params.tp.frag_shift = t4_filter_field_shift(adap,
9495 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
9496 * represents the presence of an Outer VLAN instead of a VNIC ID.
9498 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
9499 adap->params.tp.vnic_shift = -1;
9501 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
9502 adap->params.tp.hash_filter_mask = v;
9503 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
9504 adap->params.tp.hash_filter_mask |= ((u64)v << 32);
9509 * t4_filter_field_shift - calculate filter field shift
9510 * @adap: the adapter
9511 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9513 * Return the shift position of a filter field within the Compressed
9514 * Filter Tuple. The filter field is specified via its selection bit
9515 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
9517 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9519 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9523 if ((filter_mode & filter_sel) == 0)
9526 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9527 switch (filter_mode & sel) {
9529 field_shift += FT_FCOE_W;
9532 field_shift += FT_PORT_W;
9535 field_shift += FT_VNIC_ID_W;
9538 field_shift += FT_VLAN_W;
9541 field_shift += FT_TOS_W;
9544 field_shift += FT_PROTOCOL_W;
9547 field_shift += FT_ETHERTYPE_W;
9550 field_shift += FT_MACMATCH_W;
9553 field_shift += FT_MPSHITTYPE_W;
9555 case FRAGMENTATION_F:
9556 field_shift += FT_FRAGMENTATION_W;
9563 int t4_init_rss_mode(struct adapter *adap, int mbox)
9566 struct fw_rss_vi_config_cmd rvc;
9568 memset(&rvc, 0, sizeof(rvc));
9570 for_each_port(adap, i) {
9571 struct port_info *p = adap2pinfo(adap, i);
9574 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
9575 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9576 FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
9577 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
9578 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
9581 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
9587 * t4_init_portinfo - allocate a virtual interface and initialize port_info
9588 * @pi: the port_info
9589 * @mbox: mailbox to use for the FW command
9590 * @port: physical port associated with the VI
9591 * @pf: the PF owning the VI
9592 * @vf: the VF owning the VI
9593 * @mac: the MAC address of the VI
9595 * Allocates a virtual interface for the given physical port. If @mac is
9596 * not %NULL it contains the MAC address of the VI as assigned by FW.
9597 * @mac should be large enough to hold an Ethernet address.
9598 * Returns < 0 on error.
9600 int t4_init_portinfo(struct port_info *pi, int mbox,
9601 int port, int pf, int vf, u8 mac[])
9603 struct adapter *adapter = pi->adapter;
9604 unsigned int fw_caps = adapter->params.fw_caps_support;
9605 struct fw_port_cmd cmd;
9606 unsigned int rss_size;
9607 enum fw_port_type port_type;
9609 fw_port_cap32_t pcaps, acaps;
9610 u8 vivld = 0, vin = 0;
9613 /* If we haven't yet determined whether we're talking to Firmware
9614 * which knows the new 32-bit Port Capabilities, it's time to find
9615 * out now. This will also tell new Firmware to send us Port Status
9616 * Updates using the new 32-bit Port Capabilities version of the
9617 * Port Information message.
9619 if (fw_caps == FW_CAPS_UNKNOWN) {
9622 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
9623 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
9625 ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val);
9626 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
9627 adapter->params.fw_caps_support = fw_caps;
9630 memset(&cmd, 0, sizeof(cmd));
9631 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
9632 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9633 FW_PORT_CMD_PORTID_V(port));
9634 cmd.action_to_len16 = cpu_to_be32(
9635 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
9636 ? FW_PORT_ACTION_GET_PORT_INFO
9637 : FW_PORT_ACTION_GET_PORT_INFO32) |
9639 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
9643 /* Extract the various fields from the Port Information message.
9645 if (fw_caps == FW_CAPS16) {
9646 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
9648 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
9649 mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
9650 ? FW_PORT_CMD_MDIOADDR_G(lstatus)
9652 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
9653 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
9655 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
9657 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
9658 mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
9659 ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
9661 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
9662 acaps = be32_to_cpu(cmd.u.info32.acaps32);
9665 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size,
9673 pi->rss_size = rss_size;
9674 pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
9676 /* If fw supports returning the VIN as part of FW_VI_CMD,
9677 * save the returned values.
9679 if (adapter->params.viid_smt_extn_support) {
9683 /* Retrieve the values from VIID */
9684 pi->vivld = FW_VIID_VIVLD_G(pi->viid);
9685 pi->vin = FW_VIID_VIN_G(pi->viid);
9688 pi->port_type = port_type;
9689 pi->mdio_addr = mdio_addr;
9690 pi->mod_type = FW_PORT_MOD_TYPE_NA;
9692 init_link_config(&pi->link_cfg, pcaps, acaps);
9696 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9701 for_each_port(adap, i) {
9702 struct port_info *pi = adap2pinfo(adap, i);
9704 while ((adap->params.portvec & (1 << j)) == 0)
9707 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9711 eth_hw_addr_set(adap->port[i], addr);
9717 int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
9722 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
9734 * t4_read_cimq_cfg - read CIM queue configuration
9735 * @adap: the adapter
9736 * @base: holds the queue base addresses in bytes
9737 * @size: holds the queue sizes in bytes
9738 * @thres: holds the queue full thresholds in bytes
9740 * Returns the current configuration of the CIM queues, starting with
9741 * the IBQs, then the OBQs.
9743 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9746 int cim_num_obq = is_t4(adap->params.chip) ?
9747 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9749 for (i = 0; i < CIM_NUM_IBQ; i++) {
9750 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
9752 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9753 /* value is in 256-byte units */
9754 *base++ = CIMQBASE_G(v) * 256;
9755 *size++ = CIMQSIZE_G(v) * 256;
9756 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
9758 for (i = 0; i < cim_num_obq; i++) {
9759 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9761 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9762 /* value is in 256-byte units */
9763 *base++ = CIMQBASE_G(v) * 256;
9764 *size++ = CIMQSIZE_G(v) * 256;
9769 * t4_read_cim_ibq - read the contents of a CIM inbound queue
9770 * @adap: the adapter
9771 * @qid: the queue index
9772 * @data: where to store the queue contents
9773 * @n: capacity of @data in 32-bit words
9775 * Reads the contents of the selected CIM queue starting at address 0 up
9776 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9777 * error and the number of 32-bit words actually read on success.
9779 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9781 int i, err, attempts;
9783 const unsigned int nwords = CIM_IBQ_SIZE * 4;
9785 if (qid > 5 || (n & 3))
9788 addr = qid * nwords;
9792 /* It might take 3-10ms before the IBQ debug read access is allowed.
9793 * Wait for 1 Sec with a delay of 1 usec.
9797 for (i = 0; i < n; i++, addr++) {
9798 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
9800 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
9804 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
9806 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
9811 * t4_read_cim_obq - read the contents of a CIM outbound queue
9812 * @adap: the adapter
9813 * @qid: the queue index
9814 * @data: where to store the queue contents
9815 * @n: capacity of @data in 32-bit words
9817 * Reads the contents of the selected CIM queue starting at address 0 up
9818 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9819 * error and the number of 32-bit words actually read on success.
9821 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9824 unsigned int addr, v, nwords;
9825 int cim_num_obq = is_t4(adap->params.chip) ?
9826 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9828 if ((qid > (cim_num_obq - 1)) || (n & 3))
9831 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9832 QUENUMSELECT_V(qid));
9833 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9835 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
9836 nwords = CIMQSIZE_G(v) * 64; /* same */
9840 for (i = 0; i < n; i++, addr++) {
9841 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
9843 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
9847 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
9849 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
9854 * t4_cim_read - read a block from CIM internal address space
9855 * @adap: the adapter
9856 * @addr: the start address within the CIM address space
9857 * @n: number of words to read
9858 * @valp: where to store the result
9860 * Reads a block of 4-byte words from the CIM intenal address space.
9862 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9867 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9870 for ( ; !ret && n--; addr += 4) {
9871 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
9872 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9875 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
9881 * t4_cim_write - write a block into CIM internal address space
9882 * @adap: the adapter
9883 * @addr: the start address within the CIM address space
9884 * @n: number of words to write
9885 * @valp: set of values to write
9887 * Writes a block of 4-byte words into the CIM intenal address space.
9889 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9890 const unsigned int *valp)
9894 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9897 for ( ; !ret && n--; addr += 4) {
9898 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
9899 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
9900 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9906 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9909 return t4_cim_write(adap, addr, 1, &val);
9913 * t4_cim_read_la - read CIM LA capture buffer
9914 * @adap: the adapter
9915 * @la_buf: where to store the LA data
9916 * @wrptr: the HW write pointer within the capture buffer
9918 * Reads the contents of the CIM LA buffer with the most recent entry at
9919 * the end of the returned data and with the entry at @wrptr first.
9920 * We try to leave the LA in the running state we find it in.
9922 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9925 unsigned int cfg, val, idx;
9927 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
9931 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
9932 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
9937 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9941 idx = UPDBGLAWRPTR_G(val);
9945 for (i = 0; i < adap->params.cim_la_size; i++) {
9946 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9947 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
9950 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9953 if (val & UPDBGLARDEN_F) {
9957 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
9961 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9962 * identify the 32-bit portion of the full 312-bit data
9964 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
9965 idx = (idx & 0xff0) + 0x10;
9968 /* address can't exceed 0xfff */
9969 idx &= UPDBGLARDPTR_M;
9972 if (cfg & UPDBGLAEN_F) {
9973 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9974 cfg & ~UPDBGLARDEN_F);
9982 * t4_tp_read_la - read TP LA capture buffer
9983 * @adap: the adapter
9984 * @la_buf: where to store the LA data
9985 * @wrptr: the HW write pointer within the capture buffer
9987 * Reads the contents of the TP LA buffer with the most recent entry at
9988 * the end of the returned data and with the entry at @wrptr first.
9989 * We leave the LA in the running state we find it in.
9991 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9993 bool last_incomplete;
9994 unsigned int i, cfg, val, idx;
9996 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
9997 if (cfg & DBGLAENABLE_F) /* freeze LA */
9998 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9999 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
10001 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
10002 idx = DBGLAWPTR_G(val);
10003 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
10004 if (last_incomplete)
10005 idx = (idx + 1) & DBGLARPTR_M;
10010 val &= ~DBGLARPTR_V(DBGLARPTR_M);
10011 val |= adap->params.tp.la_mask;
10013 for (i = 0; i < TPLA_SIZE; i++) {
10014 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
10015 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
10016 idx = (idx + 1) & DBGLARPTR_M;
10019 /* Wipe out last entry if it isn't valid */
10020 if (last_incomplete)
10021 la_buf[TPLA_SIZE - 1] = ~0ULL;
10023 if (cfg & DBGLAENABLE_F) /* restore running state */
10024 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
10025 cfg | adap->params.tp.la_mask);
10028 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
10029 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
10030 * state for more than the Warning Threshold then we'll issue a warning about
10031 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
10032 * appears to be hung every Warning Repeat second till the situation clears.
10033 * If the situation clears, we'll note that as well.
10035 #define SGE_IDMA_WARN_THRESH 1
10036 #define SGE_IDMA_WARN_REPEAT 300
10039 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
10040 * @adapter: the adapter
10041 * @idma: the adapter IDMA Monitor state
10043 * Initialize the state of an SGE Ingress DMA Monitor.
10045 void t4_idma_monitor_init(struct adapter *adapter,
10046 struct sge_idma_monitor_state *idma)
10048 /* Initialize the state variables for detecting an SGE Ingress DMA
10049 * hang. The SGE has internal counters which count up on each clock
10050 * tick whenever the SGE finds its Ingress DMA State Engines in the
10051 * same state they were on the previous clock tick. The clock used is
10052 * the Core Clock so we have a limit on the maximum "time" they can
10053 * record; typically a very small number of seconds. For instance,
10054 * with a 600MHz Core Clock, we can only count up to a bit more than
10055 * 7s. So we'll synthesize a larger counter in order to not run the
10056 * risk of having the "timers" overflow and give us the flexibility to
10057 * maintain a Hung SGE State Machine of our own which operates across
10058 * a longer time frame.
10060 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
10061 idma->idma_stalled[0] = 0;
10062 idma->idma_stalled[1] = 0;
10066 * t4_idma_monitor - monitor SGE Ingress DMA state
10067 * @adapter: the adapter
10068 * @idma: the adapter IDMA Monitor state
10069 * @hz: number of ticks/second
10070 * @ticks: number of ticks since the last IDMA Monitor call
10072 void t4_idma_monitor(struct adapter *adapter,
10073 struct sge_idma_monitor_state *idma,
10076 int i, idma_same_state_cnt[2];
10078 /* Read the SGE Debug Ingress DMA Same State Count registers. These
10079 * are counters inside the SGE which count up on each clock when the
10080 * SGE finds its Ingress DMA State Engines in the same states they
10081 * were in the previous clock. The counters will peg out at
10082 * 0xffffffff without wrapping around so once they pass the 1s
10083 * threshold they'll stay above that till the IDMA state changes.
10085 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
10086 idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
10087 idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10089 for (i = 0; i < 2; i++) {
10090 u32 debug0, debug11;
10092 /* If the Ingress DMA Same State Counter ("timer") is less
10093 * than 1s, then we can reset our synthesized Stall Timer and
10094 * continue. If we have previously emitted warnings about a
10095 * potential stalled Ingress Queue, issue a note indicating
10096 * that the Ingress Queue has resumed forward progress.
10098 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
10099 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
10100 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
10101 "resumed after %d seconds\n",
10102 i, idma->idma_qid[i],
10103 idma->idma_stalled[i] / hz);
10104 idma->idma_stalled[i] = 0;
10108 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
10109 * domain. The first time we get here it'll be because we
10110 * passed the 1s Threshold; each additional time it'll be
10111 * because the RX Timer Callback is being fired on its regular
10114 * If the stall is below our Potential Hung Ingress Queue
10115 * Warning Threshold, continue.
10117 if (idma->idma_stalled[i] == 0) {
10118 idma->idma_stalled[i] = hz;
10119 idma->idma_warn[i] = 0;
10121 idma->idma_stalled[i] += ticks;
10122 idma->idma_warn[i] -= ticks;
10125 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
10128 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
10130 if (idma->idma_warn[i] > 0)
10132 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
10134 /* Read and save the SGE IDMA State and Queue ID information.
10135 * We do this every time in case it changes across time ...
10136 * can't be too careful ...
10138 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
10139 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10140 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
10142 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
10143 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10144 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
10146 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
10147 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
10148 i, idma->idma_qid[i], idma->idma_state[i],
10149 idma->idma_stalled[i] / hz,
10151 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
10156 * t4_load_cfg - download config file
10157 * @adap: the adapter
10158 * @cfg_data: the cfg text file to write
10159 * @size: text file size
10161 * Write the supplied config text file to the card's serial flash.
10163 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10165 int ret, i, n, cfg_addr;
10167 unsigned int flash_cfg_start_sec;
10168 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10170 cfg_addr = t4_flash_cfg_addr(adap);
10175 flash_cfg_start_sec = addr / SF_SEC_SIZE;
10177 if (size > FLASH_CFG_MAX_SIZE) {
10178 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
10179 FLASH_CFG_MAX_SIZE);
10183 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
10185 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10186 flash_cfg_start_sec + i - 1);
10187 /* If size == 0 then we're simply erasing the FLASH sectors associated
10188 * with the on-adapter Firmware Configuration File.
10190 if (ret || size == 0)
10193 /* this will write to the flash up to SF_PAGE_SIZE at a time */
10194 for (i = 0; i < size; i += SF_PAGE_SIZE) {
10195 if ((size - i) < SF_PAGE_SIZE)
10199 ret = t4_write_flash(adap, addr, n, cfg_data, true);
10203 addr += SF_PAGE_SIZE;
10204 cfg_data += SF_PAGE_SIZE;
10209 dev_err(adap->pdev_dev, "config file %s failed %d\n",
10210 (size == 0 ? "clear" : "download"), ret);
10215 * t4_set_vf_mac_acl - Set MAC address for the specified VF
10216 * @adapter: The adapter
10217 * @vf: one of the VFs instantiated by the specified PF
10218 * @naddr: the number of MAC addresses
10219 * @addr: the MAC address(es) to be set to the specified VF
10221 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
10222 unsigned int naddr, u8 *addr)
10224 struct fw_acl_mac_cmd cmd;
10226 memset(&cmd, 0, sizeof(cmd));
10227 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
10230 FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
10231 FW_ACL_MAC_CMD_VFN_V(vf));
10233 /* Note: Do not enable the ACL */
10234 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
10237 switch (adapter->pf) {
10239 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
10242 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
10245 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
10248 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
10252 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
10256 * t4_read_pace_tbl - read the pace table
10257 * @adap: the adapter
10258 * @pace_vals: holds the returned values
10260 * Returns the values of TP's pace table in microseconds.
10262 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
10266 for (i = 0; i < NTX_SCHED; i++) {
10267 t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
10268 v = t4_read_reg(adap, TP_PACE_TABLE_A);
10269 pace_vals[i] = dack_ticks_to_usec(adap, v);
10274 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
10275 * @adap: the adapter
10276 * @sched: the scheduler index
10277 * @kbps: the byte rate in Kbps
10278 * @ipg: the interpacket delay in tenths of nanoseconds
10279 * @sleep_ok: if true we may sleep while awaiting command completion
10281 * Return the current configuration of a HW Tx scheduler.
10283 void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
10284 unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
10286 unsigned int v, addr, bpt, cpt;
10289 addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
10290 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10293 bpt = (v >> 8) & 0xff;
10296 *kbps = 0; /* scheduler disabled */
10298 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
10299 *kbps = (v * bpt) / 125;
10303 addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
10304 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10308 *ipg = (10000 * v) / core_ticks_per_usec(adap);
10312 /* t4_sge_ctxt_rd - read an SGE context through FW
10313 * @adap: the adapter
10314 * @mbox: mailbox to use for the FW command
10315 * @cid: the context id
10316 * @ctype: the context type
10317 * @data: where to store the context data
10319 * Issues a FW command through the given mailbox to read an SGE context.
10321 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10322 enum ctxt_type ctype, u32 *data)
10324 struct fw_ldst_cmd c;
10327 if (ctype == CTXT_FLM)
10328 ret = FW_LDST_ADDRSPC_SGE_FLMC;
10330 ret = FW_LDST_ADDRSPC_SGE_CONMC;
10332 memset(&c, 0, sizeof(c));
10333 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10334 FW_CMD_REQUEST_F | FW_CMD_READ_F |
10335 FW_LDST_CMD_ADDRSPACE_V(ret));
10336 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10337 c.u.idctxt.physid = cpu_to_be32(cid);
10339 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10341 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10342 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10343 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10344 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10345 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10346 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10352 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10353 * @adap: the adapter
10354 * @cid: the context id
10355 * @ctype: the context type
10356 * @data: where to store the context data
10358 * Reads an SGE context directly, bypassing FW. This is only for
10359 * debugging when FW is unavailable.
10361 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
10362 enum ctxt_type ctype, u32 *data)
10366 t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
10367 ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
10369 for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
10370 *data++ = t4_read_reg(adap, i);
10374 int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
10375 u8 rateunit, u8 ratemode, u8 channel, u8 class,
10376 u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
10379 struct fw_sched_cmd cmd;
10381 memset(&cmd, 0, sizeof(cmd));
10382 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
10385 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10387 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10388 cmd.u.params.type = type;
10389 cmd.u.params.level = level;
10390 cmd.u.params.mode = mode;
10391 cmd.u.params.ch = channel;
10392 cmd.u.params.cl = class;
10393 cmd.u.params.unit = rateunit;
10394 cmd.u.params.rate = ratemode;
10395 cmd.u.params.min = cpu_to_be32(minrate);
10396 cmd.u.params.max = cpu_to_be32(maxrate);
10397 cmd.u.params.weight = cpu_to_be16(weight);
10398 cmd.u.params.pktsize = cpu_to_be16(pktsize);
10399 cmd.u.params.burstsize = cpu_to_be16(burstsize);
10401 return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
10406 * t4_i2c_rd - read I2C data from adapter
10407 * @adap: the adapter
10408 * @mbox: mailbox to use for the FW command
10409 * @port: Port number if per-port device; <0 if not
10410 * @devid: per-port device ID or absolute device ID
10411 * @offset: byte offset into device I2C space
10412 * @len: byte length of I2C space data
10413 * @buf: buffer in which to return I2C data
10415 * Reads the I2C data from the indicated device and location.
10417 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
10418 unsigned int devid, unsigned int offset,
10419 unsigned int len, u8 *buf)
10421 struct fw_ldst_cmd ldst_cmd, ldst_rpl;
10422 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
10425 if (len > I2C_PAGE_SIZE)
10428 /* Dont allow reads that spans multiple pages */
10429 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
10432 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10433 ldst_cmd.op_to_addrspace =
10434 cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10437 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
10438 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
10439 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
10440 ldst_cmd.u.i2c.did = devid;
10443 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
10445 ldst_cmd.u.i2c.boffset = offset;
10446 ldst_cmd.u.i2c.blen = i2c_len;
10448 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
10453 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
10463 * t4_set_vlan_acl - Set a VLAN id for the specified VF
10464 * @adap: the adapter
10465 * @mbox: mailbox to use for the FW command
10466 * @vf: one of the VFs instantiated by the specified PF
10467 * @vlan: The vlanid to be set
10469 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
10472 struct fw_acl_vlan_cmd vlan_cmd;
10473 unsigned int enable;
10475 enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
10476 memset(&vlan_cmd, 0, sizeof(vlan_cmd));
10477 vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
10481 FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
10482 FW_ACL_VLAN_CMD_VFN_V(vf));
10483 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
10484 /* Drop all packets that donot match vlan id */
10485 vlan_cmd.dropnovlan_fm = (enable
10486 ? (FW_ACL_VLAN_CMD_DROPNOVLAN_F |
10487 FW_ACL_VLAN_CMD_FM_F) : 0);
10489 vlan_cmd.nvlan = 1;
10490 vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
10493 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
10497 * modify_device_id - Modifies the device ID of the Boot BIOS image
10498 * @device_id: the device ID to write.
10499 * @boot_data: the boot image to modify.
10501 * Write the supplied device ID to the boot BIOS image.
10503 static void modify_device_id(int device_id, u8 *boot_data)
10505 struct cxgb4_pcir_data *pcir_header;
10506 struct legacy_pci_rom_hdr *header;
10507 u8 *cur_header = boot_data;
10510 /* Loop through all chained images and change the device ID's */
10512 header = (struct legacy_pci_rom_hdr *)cur_header;
10513 pcir_offset = le16_to_cpu(header->pcir_offset);
10514 pcir_header = (struct cxgb4_pcir_data *)(cur_header +
10518 * Only modify the Device ID if code type is Legacy or HP.
10519 * 0x00: Okay to modify
10520 * 0x01: FCODE. Do not modify
10521 * 0x03: Okay to modify
10522 * 0x04-0xFF: Do not modify
10524 if (pcir_header->code_type == CXGB4_HDR_CODE1) {
10529 * Modify Device ID to match current adatper
10531 pcir_header->device_id = cpu_to_le16(device_id);
10534 * Set checksum temporarily to 0.
10535 * We will recalculate it later.
10537 header->cksum = 0x0;
10540 * Calculate and update checksum
10542 for (i = 0; i < (header->size512 * 512); i++)
10543 csum += cur_header[i];
10546 * Invert summed value to create the checksum
10547 * Writing new checksum value directly to the boot data
10549 cur_header[7] = -csum;
10551 } else if (pcir_header->code_type == CXGB4_HDR_CODE2) {
10553 * Modify Device ID to match current adatper
10555 pcir_header->device_id = cpu_to_le16(device_id);
10559 * Move header pointer up to the next image in the ROM.
10561 cur_header += header->size512 * 512;
10562 } while (!(pcir_header->indicator & CXGB4_HDR_INDI));
10566 * t4_load_boot - download boot flash
10567 * @adap: the adapter
10568 * @boot_data: the boot image to write
10569 * @boot_addr: offset in flash to write boot_data
10570 * @size: image size
10572 * Write the supplied boot image to the card's serial flash.
10573 * The boot image has the following sections: a 28-byte header and the
10576 int t4_load_boot(struct adapter *adap, u8 *boot_data,
10577 unsigned int boot_addr, unsigned int size)
10579 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10580 unsigned int boot_sector = (boot_addr * 1024);
10581 struct cxgb4_pci_exp_rom_header *header;
10582 struct cxgb4_pcir_data *pcir_header;
10589 * Make sure the boot image does not encroach on the firmware region
10591 if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10592 dev_err(adap->pdev_dev, "boot image encroaching on firmware region\n");
10596 /* Get boot header */
10597 header = (struct cxgb4_pci_exp_rom_header *)boot_data;
10598 pcir_offset = le16_to_cpu(header->pcir_offset);
10599 /* PCIR Data Structure */
10600 pcir_header = (struct cxgb4_pcir_data *)&boot_data[pcir_offset];
10603 * Perform some primitive sanity testing to avoid accidentally
10604 * writing garbage over the boot sectors. We ought to check for
10605 * more but it's not worth it for now ...
10607 if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10608 dev_err(adap->pdev_dev, "boot image too small/large\n");
10612 if (le16_to_cpu(header->signature) != BOOT_SIGNATURE) {
10613 dev_err(adap->pdev_dev, "Boot image missing signature\n");
10617 /* Check PCI header signature */
10618 if (le32_to_cpu(pcir_header->signature) != PCIR_SIGNATURE) {
10619 dev_err(adap->pdev_dev, "PCI header missing signature\n");
10623 /* Check Vendor ID matches Chelsio ID*/
10624 if (le16_to_cpu(pcir_header->vendor_id) != PCI_VENDOR_ID_CHELSIO) {
10625 dev_err(adap->pdev_dev, "Vendor ID missing signature\n");
10630 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10631 * and Boot configuration data sections. These 3 boot sections span
10632 * sectors 0 to 7 in flash and live right before the FW image location.
10634 i = DIV_ROUND_UP(size ? size : FLASH_FW_START, sf_sec_size);
10635 ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10636 (boot_sector >> 16) + i - 1);
10639 * If size == 0 then we're simply erasing the FLASH sectors associated
10640 * with the on-adapter option ROM file
10642 if (ret || size == 0)
10644 /* Retrieve adapter's device ID */
10645 pci_read_config_word(adap->pdev, PCI_DEVICE_ID, &device_id);
10646 /* Want to deal with PF 0 so I strip off PF 4 indicator */
10647 device_id = device_id & 0xf0ff;
10649 /* Check PCIE Device ID */
10650 if (le16_to_cpu(pcir_header->device_id) != device_id) {
10652 * Change the device ID in the Boot BIOS image to match
10653 * the Device ID of the current adapter.
10655 modify_device_id(device_id, boot_data);
10659 * Skip over the first SF_PAGE_SIZE worth of data and write it after
10660 * we finish copying the rest of the boot image. This will ensure
10661 * that the BIOS boot header will only be written if the boot image
10662 * was written in full.
10664 addr = boot_sector;
10665 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10666 addr += SF_PAGE_SIZE;
10667 boot_data += SF_PAGE_SIZE;
10668 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
10674 ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10675 (const u8 *)header, false);
10679 dev_err(adap->pdev_dev, "boot image load failed, error %d\n",
10685 * t4_flash_bootcfg_addr - return the address of the flash
10686 * optionrom configuration
10687 * @adapter: the adapter
10689 * Return the address within the flash where the OptionROM Configuration
10690 * is stored, or an error if the device FLASH is too small to contain
10691 * a OptionROM Configuration.
10693 static int t4_flash_bootcfg_addr(struct adapter *adapter)
10696 * If the device FLASH isn't large enough to hold a Firmware
10697 * Configuration File, return an error.
10699 if (adapter->params.sf_size <
10700 FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10703 return FLASH_BOOTCFG_START;
10706 int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10708 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10709 struct cxgb4_bootcfg_data *header;
10710 unsigned int flash_cfg_start_sec;
10711 unsigned int addr, npad;
10712 int ret, i, n, cfg_addr;
10714 cfg_addr = t4_flash_bootcfg_addr(adap);
10719 flash_cfg_start_sec = addr / SF_SEC_SIZE;
10721 if (size > FLASH_BOOTCFG_MAX_SIZE) {
10722 dev_err(adap->pdev_dev, "bootcfg file too large, max is %u bytes\n",
10723 FLASH_BOOTCFG_MAX_SIZE);
10727 header = (struct cxgb4_bootcfg_data *)cfg_data;
10728 if (le16_to_cpu(header->signature) != BOOT_CFG_SIG) {
10729 dev_err(adap->pdev_dev, "Wrong bootcfg signature\n");
10734 i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,
10736 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10737 flash_cfg_start_sec + i - 1);
10740 * If size == 0 then we're simply erasing the FLASH sectors associated
10741 * with the on-adapter OptionROM Configuration File.
10743 if (ret || size == 0)
10746 /* this will write to the flash up to SF_PAGE_SIZE at a time */
10747 for (i = 0; i < size; i += SF_PAGE_SIZE) {
10748 n = min_t(u32, size - i, SF_PAGE_SIZE);
10750 ret = t4_write_flash(adap, addr, n, cfg_data, false);
10754 addr += SF_PAGE_SIZE;
10755 cfg_data += SF_PAGE_SIZE;
10758 npad = ((size + 4 - 1) & ~3) - size;
10759 for (i = 0; i < npad; i++) {
10762 ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
10770 dev_err(adap->pdev_dev, "boot config data %s failed %d\n",
10771 (size == 0 ? "clear" : "download"), ret);