cxgb4: Don't sleep when mbox cmd is issued from interrupt context
[linux-block.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
ce100b8b 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
56d36be4
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
56d36be4
DM
35#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
f612b815 38#include "t4_values.h"
56d36be4 39#include "t4fw_api.h"
a69265e9 40#include "t4fw_version.h"
56d36be4
DM
41
42/**
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
51 *
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
56 */
de498c89
RD
57static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
59{
60 while (1) {
61 u32 val = t4_read_reg(adapter, reg);
62
63 if (!!(val & mask) == polarity) {
64 if (valp)
65 *valp = val;
66 return 0;
67 }
68 if (--attempts == 0)
69 return -EAGAIN;
70 if (delay)
71 udelay(delay);
72 }
73}
74
75static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
77{
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79 delay, NULL);
80}
81
82/**
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
88 *
89 * Sets a register field specified by the supplied mask to the
90 * given value.
91 */
92void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93 u32 val)
94{
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
96
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
99}
100
101/**
102 * t4_read_indirect - read indirectly addressed registers
103 * @adap: the adapter
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
109 *
110 * Reads registers that are accessed indirectly through an address/data
111 * register pair.
112 */
f2b7e78d 113void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
de498c89
RD
114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
116{
117 while (nregs--) {
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
120 start_idx++;
121 }
122}
123
13ee15d3
VP
124/**
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
132 *
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
135 */
136void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
139{
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
143 }
144}
145
0abfd152
HS
146/*
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
151 */
152void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153{
3ccc6cf7
HS
154 u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
155
156 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
157 req |= ENABLE_F;
158 else
159 req |= T6_ENABLE_F;
0abfd152
HS
160
161 if (is_t4(adap->params.chip))
f061de42 162 req |= LOCALCFG_F;
0abfd152 163
f061de42
HS
164 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
0abfd152
HS
166
167 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168 * Configuration Space read. (None of the other fields matter when
169 * ENABLE is 0 so a simple register write is easier than a
170 * read-modify-write via t4_set_reg_field().)
171 */
f061de42 172 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
0abfd152
HS
173}
174
31d55c2d
HS
175/*
176 * t4_report_fw_error - report firmware error
177 * @adap: the adapter
178 *
179 * The adapter firmware can indicate error conditions to the host.
180 * If the firmware has indicated an error, print out the reason for
181 * the firmware error.
182 */
183static void t4_report_fw_error(struct adapter *adap)
184{
185 static const char *const reason[] = {
186 "Crash", /* PCIE_FW_EVAL_CRASH */
187 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
188 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
189 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
190 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
192 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193 "Reserved", /* reserved */
194 };
195 u32 pcie_fw;
196
f061de42
HS
197 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198 if (pcie_fw & PCIE_FW_ERR_F)
31d55c2d 199 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
b2e1a3f0 200 reason[PCIE_FW_EVAL_G(pcie_fw)]);
31d55c2d
HS
201}
202
56d36be4
DM
203/*
204 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
205 */
206static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
207 u32 mbox_addr)
208{
209 for ( ; nflit; nflit--, mbox_addr += 8)
210 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
211}
212
213/*
214 * Handle a FW assertion reported in a mailbox.
215 */
216static void fw_asrt(struct adapter *adap, u32 mbox_addr)
217{
218 struct fw_debug_cmd asrt;
219
220 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
221 dev_alert(adap->pdev_dev,
222 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
f404f80c
HS
223 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
224 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
56d36be4
DM
225}
226
7f080c3f
HS
227/**
228 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
229 * @adapter: the adapter
230 * @cmd: the Firmware Mailbox Command or Reply
231 * @size: command length in bytes
232 * @access: the time (ms) needed to access the Firmware Mailbox
233 * @execute: the time (ms) the command spent being executed
234 */
235static void t4_record_mbox(struct adapter *adapter,
236 const __be64 *cmd, unsigned int size,
237 int access, int execute)
56d36be4 238{
7f080c3f
HS
239 struct mbox_cmd_log *log = adapter->mbox_log;
240 struct mbox_cmd *entry;
241 int i;
242
243 entry = mbox_cmd_log_entry(log, log->cursor++);
244 if (log->cursor == log->size)
245 log->cursor = 0;
246
247 for (i = 0; i < size / 8; i++)
248 entry->cmd[i] = be64_to_cpu(cmd[i]);
249 while (i < MBOX_LEN / 8)
250 entry->cmd[i++] = 0;
251 entry->timestamp = jiffies;
252 entry->seqno = log->seqno++;
253 entry->access = access;
254 entry->execute = execute;
56d36be4
DM
255}
256
257/**
01b69614 258 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
56d36be4
DM
259 * @adap: the adapter
260 * @mbox: index of the mailbox to use
261 * @cmd: the command to write
262 * @size: command length in bytes
263 * @rpl: where to optionally store the reply
264 * @sleep_ok: if true we may sleep while awaiting command completion
01b69614 265 * @timeout: time to wait for command to finish before timing out
56d36be4
DM
266 *
267 * Sends the given command to FW through the selected mailbox and waits
268 * for the FW to execute the command. If @rpl is not %NULL it is used to
269 * store the FW's reply to the command. The command and its optional
270 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
271 * to respond. @sleep_ok determines whether we may sleep while awaiting
272 * the response. If sleeping is allowed we use progressive backoff
273 * otherwise we spin.
274 *
275 * The return value is 0 on success or a negative errno on failure. A
276 * failure can happen either because we are not able to execute the
277 * command or FW executes it but signals an error. In the latter case
278 * the return value is the error code indicated by FW (negated).
279 */
01b69614
HS
280int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
281 int size, void *rpl, bool sleep_ok, int timeout)
56d36be4 282{
005b5717 283 static const int delay[] = {
56d36be4
DM
284 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
285 };
286
7f080c3f
HS
287 u16 access = 0;
288 u16 execute = 0;
56d36be4
DM
289 u32 v;
290 u64 res;
7f080c3f 291 int i, ms, delay_idx, ret;
56d36be4 292 const __be64 *p = cmd;
89c3a86c
HS
293 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
294 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
7f080c3f 295 __be64 cmd_rpl[MBOX_LEN / 8];
56d36be4
DM
296
297 if ((size & 15) || size > MBOX_LEN)
298 return -EINVAL;
299
204dc3c0
DM
300 /*
301 * If the device is off-line, as in EEH, commands will time out.
302 * Fail them early so we don't waste time waiting.
303 */
304 if (adap->pdev->error_state != pci_channel_io_normal)
305 return -EIO;
306
5a20f5cf
HS
307 /* If we have a negative timeout, that implies that we can't sleep. */
308 if (timeout < 0) {
309 sleep_ok = false;
310 timeout = -timeout;
311 }
312
89c3a86c 313 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
56d36be4 314 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
89c3a86c 315 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
56d36be4 316
7f080c3f
HS
317 if (v != MBOX_OWNER_DRV) {
318 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
319 t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
320 return ret;
321 }
56d36be4 322
7f080c3f
HS
323 /* Copy in the new mailbox command and send it on its way ... */
324 t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
56d36be4
DM
325 for (i = 0; i < size; i += 8)
326 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
327
89c3a86c 328 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
56d36be4
DM
329 t4_read_reg(adap, ctl_reg); /* flush write */
330
331 delay_idx = 0;
332 ms = delay[0];
333
01b69614 334 for (i = 0; i < timeout; i += ms) {
56d36be4
DM
335 if (sleep_ok) {
336 ms = delay[delay_idx]; /* last element may repeat */
337 if (delay_idx < ARRAY_SIZE(delay) - 1)
338 delay_idx++;
339 msleep(ms);
340 } else
341 mdelay(ms);
342
343 v = t4_read_reg(adap, ctl_reg);
89c3a86c
HS
344 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
345 if (!(v & MBMSGVALID_F)) {
56d36be4
DM
346 t4_write_reg(adap, ctl_reg, 0);
347 continue;
348 }
349
7f080c3f
HS
350 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
351 res = be64_to_cpu(cmd_rpl[0]);
352
e2ac9628 353 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
56d36be4 354 fw_asrt(adap, data_reg);
e2ac9628
HS
355 res = FW_CMD_RETVAL_V(EIO);
356 } else if (rpl) {
7f080c3f 357 memcpy(rpl, cmd_rpl, size);
e2ac9628 358 }
56d36be4 359
56d36be4 360 t4_write_reg(adap, ctl_reg, 0);
7f080c3f
HS
361
362 execute = i + ms;
363 t4_record_mbox(adap, cmd_rpl,
364 MBOX_LEN, access, execute);
e2ac9628 365 return -FW_CMD_RETVAL_G((int)res);
56d36be4
DM
366 }
367 }
368
7f080c3f
HS
369 ret = -ETIMEDOUT;
370 t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
56d36be4
DM
371 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
372 *(const u8 *)cmd, mbox);
31d55c2d 373 t4_report_fw_error(adap);
7f080c3f 374 return ret;
56d36be4
DM
375}
376
01b69614
HS
377int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
378 void *rpl, bool sleep_ok)
56d36be4 379{
01b69614
HS
380 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
381 FW_CMD_MAX_TIMEOUT);
56d36be4
DM
382}
383
bf8ebb67
HS
384static int t4_edc_err_read(struct adapter *adap, int idx)
385{
386 u32 edc_ecc_err_addr_reg;
387 u32 rdata_reg;
388
389 if (is_t4(adap->params.chip)) {
390 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
391 return 0;
392 }
393 if (idx != 0 && idx != 1) {
394 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
395 return 0;
396 }
397
398 edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
399 rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
400
401 CH_WARN(adap,
402 "edc%d err addr 0x%x: 0x%x.\n",
403 idx, edc_ecc_err_addr_reg,
404 t4_read_reg(adap, edc_ecc_err_addr_reg));
405 CH_WARN(adap,
406 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
407 rdata_reg,
408 (unsigned long long)t4_read_reg64(adap, rdata_reg),
409 (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
410 (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
411 (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
412 (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
413 (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
414 (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
415 (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
416 (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
417
418 return 0;
419}
420
5afc8b84
VP
421/**
422 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
423 * @adap: the adapter
fc5ab020 424 * @win: PCI-E Memory Window to use
5afc8b84
VP
425 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
426 * @addr: address within indicated memory type
427 * @len: amount of memory to transfer
f01aa633 428 * @hbuf: host memory buffer
fc5ab020 429 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5afc8b84
VP
430 *
431 * Reads/writes an [almost] arbitrary memory region in the firmware: the
fc5ab020
HS
432 * firmware memory address and host buffer must be aligned on 32-bit
433 * boudaries; the length may be arbitrary. The memory is transferred as
434 * a raw byte sequence from/to the firmware's memory. If this memory
435 * contains data structures which contain multi-byte integers, it's the
436 * caller's responsibility to perform appropriate byte order conversions.
5afc8b84 437 */
fc5ab020 438int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
f01aa633 439 u32 len, void *hbuf, int dir)
5afc8b84 440{
fc5ab020
HS
441 u32 pos, offset, resid, memoffset;
442 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
f01aa633 443 u32 *buf;
5afc8b84 444
fc5ab020 445 /* Argument sanity checks ...
5afc8b84 446 */
f01aa633 447 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
5afc8b84 448 return -EINVAL;
f01aa633 449 buf = (u32 *)hbuf;
5afc8b84 450
fc5ab020
HS
451 /* It's convenient to be able to handle lengths which aren't a
452 * multiple of 32-bits because we often end up transferring files to
453 * the firmware. So we'll handle that by normalizing the length here
454 * and then handling any residual transfer at the end.
455 */
456 resid = len & 0x3;
457 len -= resid;
8c357ebd 458
19dd37ba 459 /* Offset into the region of memory which is being accessed
5afc8b84
VP
460 * MEM_EDC0 = 0
461 * MEM_EDC1 = 1
3ccc6cf7
HS
462 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
463 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
5afc8b84 464 */
6559a7e8 465 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
19dd37ba
SR
466 if (mtype != MEM_MC1)
467 memoffset = (mtype * (edc_size * 1024 * 1024));
468 else {
6559a7e8 469 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
7f0b8a56 470 MA_EXT_MEMORY0_BAR_A));
19dd37ba
SR
471 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
472 }
5afc8b84
VP
473
474 /* Determine the PCIE_MEM_ACCESS_OFFSET */
475 addr = addr + memoffset;
476
fc5ab020
HS
477 /* Each PCI-E Memory Window is programmed with a window size -- or
478 * "aperture" -- which controls the granularity of its mapping onto
479 * adapter memory. We need to grab that aperture in order to know
480 * how to use the specified window. The window is also programmed
481 * with the base address of the Memory Window in BAR0's address
482 * space. For T4 this is an absolute PCI-E Bus Address. For T5
483 * the address is relative to BAR0.
5afc8b84 484 */
fc5ab020 485 mem_reg = t4_read_reg(adap,
f061de42 486 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
fc5ab020 487 win));
f061de42
HS
488 mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
489 mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
fc5ab020
HS
490 if (is_t4(adap->params.chip))
491 mem_base -= adap->t4_bar0;
b2612722 492 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
5afc8b84 493
fc5ab020
HS
494 /* Calculate our initial PCI-E Memory Window Position and Offset into
495 * that Window.
496 */
497 pos = addr & ~(mem_aperture-1);
498 offset = addr - pos;
5afc8b84 499
fc5ab020
HS
500 /* Set up initial PCI-E Memory Window to cover the start of our
501 * transfer. (Read it back to ensure that changes propagate before we
502 * attempt to use the new value.)
503 */
504 t4_write_reg(adap,
f061de42 505 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
fc5ab020
HS
506 pos | win_pf);
507 t4_read_reg(adap,
f061de42 508 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
fc5ab020
HS
509
510 /* Transfer data to/from the adapter as long as there's an integral
511 * number of 32-bit transfers to complete.
f01aa633
HS
512 *
513 * A note on Endianness issues:
514 *
515 * The "register" reads and writes below from/to the PCI-E Memory
516 * Window invoke the standard adapter Big-Endian to PCI-E Link
517 * Little-Endian "swizzel." As a result, if we have the following
518 * data in adapter memory:
519 *
520 * Memory: ... | b0 | b1 | b2 | b3 | ...
521 * Address: i+0 i+1 i+2 i+3
522 *
523 * Then a read of the adapter memory via the PCI-E Memory Window
524 * will yield:
525 *
526 * x = readl(i)
527 * 31 0
528 * [ b3 | b2 | b1 | b0 ]
529 *
530 * If this value is stored into local memory on a Little-Endian system
531 * it will show up correctly in local memory as:
532 *
533 * ( ..., b0, b1, b2, b3, ... )
534 *
535 * But on a Big-Endian system, the store will show up in memory
536 * incorrectly swizzled as:
537 *
538 * ( ..., b3, b2, b1, b0, ... )
539 *
540 * So we need to account for this in the reads and writes to the
541 * PCI-E Memory Window below by undoing the register read/write
542 * swizzels.
fc5ab020
HS
543 */
544 while (len > 0) {
545 if (dir == T4_MEMORY_READ)
f01aa633
HS
546 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
547 mem_base + offset));
fc5ab020
HS
548 else
549 t4_write_reg(adap, mem_base + offset,
f01aa633 550 (__force u32)cpu_to_le32(*buf++));
fc5ab020
HS
551 offset += sizeof(__be32);
552 len -= sizeof(__be32);
553
554 /* If we've reached the end of our current window aperture,
555 * move the PCI-E Memory Window on to the next. Note that
556 * doing this here after "len" may be 0 allows us to set up
557 * the PCI-E Memory Window for a possible final residual
558 * transfer below ...
5afc8b84 559 */
fc5ab020
HS
560 if (offset == mem_aperture) {
561 pos += mem_aperture;
562 offset = 0;
563 t4_write_reg(adap,
f061de42
HS
564 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
565 win), pos | win_pf);
fc5ab020 566 t4_read_reg(adap,
f061de42
HS
567 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
568 win));
5afc8b84 569 }
5afc8b84
VP
570 }
571
fc5ab020
HS
572 /* If the original transfer had a length which wasn't a multiple of
573 * 32-bits, now's where we need to finish off the transfer of the
574 * residual amount. The PCI-E Memory Window has already been moved
575 * above (if necessary) to cover this final transfer.
576 */
577 if (resid) {
578 union {
f01aa633 579 u32 word;
fc5ab020
HS
580 char byte[4];
581 } last;
582 unsigned char *bp;
583 int i;
584
c81576c2 585 if (dir == T4_MEMORY_READ) {
f01aa633
HS
586 last.word = le32_to_cpu(
587 (__force __le32)t4_read_reg(adap,
588 mem_base + offset));
fc5ab020
HS
589 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
590 bp[i] = last.byte[i];
591 } else {
592 last.word = *buf;
593 for (i = resid; i < 4; i++)
594 last.byte[i] = 0;
595 t4_write_reg(adap, mem_base + offset,
f01aa633 596 (__force u32)cpu_to_le32(last.word));
fc5ab020
HS
597 }
598 }
5afc8b84 599
fc5ab020 600 return 0;
5afc8b84
VP
601}
602
b562fc37
HS
603/* Return the specified PCI-E Configuration Space register from our Physical
604 * Function. We try first via a Firmware LDST Command since we prefer to let
605 * the firmware own all of these registers, but if that fails we go for it
606 * directly ourselves.
607 */
608u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
609{
610 u32 val, ldst_addrspace;
611
612 /* If fw_attach != 0, construct and send the Firmware LDST Command to
613 * retrieve the specified PCI-E Configuration Space register.
614 */
615 struct fw_ldst_cmd ldst_cmd;
616 int ret;
617
618 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
619 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
620 ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
621 FW_CMD_REQUEST_F |
622 FW_CMD_READ_F |
623 ldst_addrspace);
624 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
625 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
626 ldst_cmd.u.pcie.ctrl_to_fn =
b2612722 627 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
b562fc37
HS
628 ldst_cmd.u.pcie.r = reg;
629
630 /* If the LDST Command succeeds, return the result, otherwise
631 * fall through to reading it directly ourselves ...
632 */
633 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
634 &ldst_cmd);
635 if (ret == 0)
636 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
637 else
638 /* Read the desired Configuration Space register via the PCI-E
639 * Backdoor mechanism.
640 */
641 t4_hw_pci_read_cfg4(adap, reg, &val);
642 return val;
643}
644
645/* Get the window based on base passed to it.
646 * Window aperture is currently unhandled, but there is no use case for it
647 * right now
648 */
649static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
650 u32 memwin_base)
651{
652 u32 ret;
653
654 if (is_t4(adap->params.chip)) {
655 u32 bar0;
656
657 /* Truncation intentional: we only read the bottom 32-bits of
658 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
659 * mechanism to read BAR0 instead of using
660 * pci_resource_start() because we could be operating from
661 * within a Virtual Machine which is trapping our accesses to
662 * our Configuration Space and we need to set up the PCI-E
663 * Memory Window decoders with the actual addresses which will
664 * be coming across the PCI-E link.
665 */
666 bar0 = t4_read_pcie_cfg4(adap, pci_base);
667 bar0 &= pci_mask;
668 adap->t4_bar0 = bar0;
669
670 ret = bar0 + memwin_base;
671 } else {
672 /* For T5, only relative offset inside the PCIe BAR is passed */
673 ret = memwin_base;
674 }
675 return ret;
676}
677
678/* Get the default utility window (win0) used by everyone */
679u32 t4_get_util_window(struct adapter *adap)
680{
681 return t4_get_window(adap, PCI_BASE_ADDRESS_0,
682 PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
683}
684
685/* Set up memory window for accessing adapter memory ranges. (Read
686 * back MA register to ensure that changes propagate before we attempt
687 * to use the new values.)
688 */
689void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
690{
691 t4_write_reg(adap,
692 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
693 memwin_base | BIR_V(0) |
694 WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
695 t4_read_reg(adap,
696 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
697}
698
812034f1
HS
699/**
700 * t4_get_regs_len - return the size of the chips register set
701 * @adapter: the adapter
702 *
703 * Returns the size of the chip's BAR0 register space.
704 */
705unsigned int t4_get_regs_len(struct adapter *adapter)
706{
707 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
708
709 switch (chip_version) {
710 case CHELSIO_T4:
711 return T4_REGMAP_SIZE;
712
713 case CHELSIO_T5:
ab4b583b 714 case CHELSIO_T6:
812034f1
HS
715 return T5_REGMAP_SIZE;
716 }
717
718 dev_err(adapter->pdev_dev,
719 "Unsupported chip version %d\n", chip_version);
720 return 0;
721}
722
723/**
724 * t4_get_regs - read chip registers into provided buffer
725 * @adap: the adapter
726 * @buf: register buffer
727 * @buf_size: size (in bytes) of register buffer
728 *
729 * If the provided register buffer isn't large enough for the chip's
730 * full register range, the register dump will be truncated to the
731 * register buffer's size.
732 */
733void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
734{
735 static const unsigned int t4_reg_ranges[] = {
736 0x1008, 0x1108,
8119c018
HS
737 0x1180, 0x1184,
738 0x1190, 0x1194,
739 0x11a0, 0x11a4,
740 0x11b0, 0x11b4,
812034f1
HS
741 0x11fc, 0x123c,
742 0x1300, 0x173c,
743 0x1800, 0x18fc,
8119c018
HS
744 0x3000, 0x30d8,
745 0x30e0, 0x30e4,
746 0x30ec, 0x5910,
747 0x5920, 0x5924,
748 0x5960, 0x5960,
749 0x5968, 0x5968,
750 0x5970, 0x5970,
751 0x5978, 0x5978,
752 0x5980, 0x5980,
753 0x5988, 0x5988,
754 0x5990, 0x5990,
755 0x5998, 0x5998,
756 0x59a0, 0x59d4,
757 0x5a00, 0x5ae0,
758 0x5ae8, 0x5ae8,
759 0x5af0, 0x5af0,
760 0x5af8, 0x5af8,
812034f1
HS
761 0x6000, 0x6098,
762 0x6100, 0x6150,
763 0x6200, 0x6208,
764 0x6240, 0x6248,
8119c018
HS
765 0x6280, 0x62b0,
766 0x62c0, 0x6338,
812034f1
HS
767 0x6370, 0x638c,
768 0x6400, 0x643c,
769 0x6500, 0x6524,
8119c018
HS
770 0x6a00, 0x6a04,
771 0x6a14, 0x6a38,
772 0x6a60, 0x6a70,
773 0x6a78, 0x6a78,
774 0x6b00, 0x6b0c,
775 0x6b1c, 0x6b84,
776 0x6bf0, 0x6bf8,
777 0x6c00, 0x6c0c,
778 0x6c1c, 0x6c84,
779 0x6cf0, 0x6cf8,
780 0x6d00, 0x6d0c,
781 0x6d1c, 0x6d84,
782 0x6df0, 0x6df8,
783 0x6e00, 0x6e0c,
784 0x6e1c, 0x6e84,
785 0x6ef0, 0x6ef8,
786 0x6f00, 0x6f0c,
787 0x6f1c, 0x6f84,
788 0x6ff0, 0x6ff8,
789 0x7000, 0x700c,
790 0x701c, 0x7084,
791 0x70f0, 0x70f8,
792 0x7100, 0x710c,
793 0x711c, 0x7184,
794 0x71f0, 0x71f8,
795 0x7200, 0x720c,
796 0x721c, 0x7284,
797 0x72f0, 0x72f8,
798 0x7300, 0x730c,
799 0x731c, 0x7384,
800 0x73f0, 0x73f8,
801 0x7400, 0x7450,
812034f1 802 0x7500, 0x7530,
8119c018
HS
803 0x7600, 0x760c,
804 0x7614, 0x761c,
812034f1
HS
805 0x7680, 0x76cc,
806 0x7700, 0x7798,
807 0x77c0, 0x77fc,
808 0x7900, 0x79fc,
8119c018
HS
809 0x7b00, 0x7b58,
810 0x7b60, 0x7b84,
811 0x7b8c, 0x7c38,
812 0x7d00, 0x7d38,
813 0x7d40, 0x7d80,
814 0x7d8c, 0x7ddc,
815 0x7de4, 0x7e04,
816 0x7e10, 0x7e1c,
817 0x7e24, 0x7e38,
818 0x7e40, 0x7e44,
819 0x7e4c, 0x7e78,
820 0x7e80, 0x7ea4,
821 0x7eac, 0x7edc,
822 0x7ee8, 0x7efc,
823 0x8dc0, 0x8e04,
824 0x8e10, 0x8e1c,
812034f1 825 0x8e30, 0x8e78,
8119c018
HS
826 0x8ea0, 0x8eb8,
827 0x8ec0, 0x8f6c,
828 0x8fc0, 0x9008,
829 0x9010, 0x9058,
830 0x9060, 0x9060,
831 0x9068, 0x9074,
812034f1 832 0x90fc, 0x90fc,
8119c018
HS
833 0x9400, 0x9408,
834 0x9410, 0x9458,
835 0x9600, 0x9600,
836 0x9608, 0x9638,
837 0x9640, 0x96bc,
812034f1
HS
838 0x9800, 0x9808,
839 0x9820, 0x983c,
840 0x9850, 0x9864,
841 0x9c00, 0x9c6c,
842 0x9c80, 0x9cec,
843 0x9d00, 0x9d6c,
844 0x9d80, 0x9dec,
845 0x9e00, 0x9e6c,
846 0x9e80, 0x9eec,
847 0x9f00, 0x9f6c,
848 0x9f80, 0x9fec,
8119c018
HS
849 0xd004, 0xd004,
850 0xd010, 0xd03c,
812034f1
HS
851 0xdfc0, 0xdfe0,
852 0xe000, 0xea7c,
8119c018 853 0xf000, 0x11190,
812034f1
HS
854 0x19040, 0x1906c,
855 0x19078, 0x19080,
8119c018
HS
856 0x1908c, 0x190e4,
857 0x190f0, 0x190f8,
858 0x19100, 0x19110,
859 0x19120, 0x19124,
860 0x19150, 0x19194,
861 0x1919c, 0x191b0,
812034f1
HS
862 0x191d0, 0x191e8,
863 0x19238, 0x1924c,
8119c018
HS
864 0x193f8, 0x1943c,
865 0x1944c, 0x19474,
866 0x19490, 0x194e0,
867 0x194f0, 0x194f8,
868 0x19800, 0x19c08,
869 0x19c10, 0x19c90,
870 0x19ca0, 0x19ce4,
871 0x19cf0, 0x19d40,
872 0x19d50, 0x19d94,
873 0x19da0, 0x19de8,
874 0x19df0, 0x19e40,
875 0x19e50, 0x19e90,
876 0x19ea0, 0x19f4c,
877 0x1a000, 0x1a004,
878 0x1a010, 0x1a06c,
879 0x1a0b0, 0x1a0e4,
880 0x1a0ec, 0x1a0f4,
881 0x1a100, 0x1a108,
882 0x1a114, 0x1a120,
883 0x1a128, 0x1a130,
884 0x1a138, 0x1a138,
812034f1
HS
885 0x1a190, 0x1a1c4,
886 0x1a1fc, 0x1a1fc,
887 0x1e040, 0x1e04c,
888 0x1e284, 0x1e28c,
889 0x1e2c0, 0x1e2c0,
890 0x1e2e0, 0x1e2e0,
891 0x1e300, 0x1e384,
892 0x1e3c0, 0x1e3c8,
893 0x1e440, 0x1e44c,
894 0x1e684, 0x1e68c,
895 0x1e6c0, 0x1e6c0,
896 0x1e6e0, 0x1e6e0,
897 0x1e700, 0x1e784,
898 0x1e7c0, 0x1e7c8,
899 0x1e840, 0x1e84c,
900 0x1ea84, 0x1ea8c,
901 0x1eac0, 0x1eac0,
902 0x1eae0, 0x1eae0,
903 0x1eb00, 0x1eb84,
904 0x1ebc0, 0x1ebc8,
905 0x1ec40, 0x1ec4c,
906 0x1ee84, 0x1ee8c,
907 0x1eec0, 0x1eec0,
908 0x1eee0, 0x1eee0,
909 0x1ef00, 0x1ef84,
910 0x1efc0, 0x1efc8,
911 0x1f040, 0x1f04c,
912 0x1f284, 0x1f28c,
913 0x1f2c0, 0x1f2c0,
914 0x1f2e0, 0x1f2e0,
915 0x1f300, 0x1f384,
916 0x1f3c0, 0x1f3c8,
917 0x1f440, 0x1f44c,
918 0x1f684, 0x1f68c,
919 0x1f6c0, 0x1f6c0,
920 0x1f6e0, 0x1f6e0,
921 0x1f700, 0x1f784,
922 0x1f7c0, 0x1f7c8,
923 0x1f840, 0x1f84c,
924 0x1fa84, 0x1fa8c,
925 0x1fac0, 0x1fac0,
926 0x1fae0, 0x1fae0,
927 0x1fb00, 0x1fb84,
928 0x1fbc0, 0x1fbc8,
929 0x1fc40, 0x1fc4c,
930 0x1fe84, 0x1fe8c,
931 0x1fec0, 0x1fec0,
932 0x1fee0, 0x1fee0,
933 0x1ff00, 0x1ff84,
934 0x1ffc0, 0x1ffc8,
935 0x20000, 0x2002c,
936 0x20100, 0x2013c,
8119c018
HS
937 0x20190, 0x201a0,
938 0x201a8, 0x201b8,
939 0x201c4, 0x201c8,
812034f1 940 0x20200, 0x20318,
8119c018
HS
941 0x20400, 0x204b4,
942 0x204c0, 0x20528,
812034f1
HS
943 0x20540, 0x20614,
944 0x21000, 0x21040,
945 0x2104c, 0x21060,
946 0x210c0, 0x210ec,
947 0x21200, 0x21268,
948 0x21270, 0x21284,
949 0x212fc, 0x21388,
950 0x21400, 0x21404,
8119c018
HS
951 0x21500, 0x21500,
952 0x21510, 0x21518,
953 0x2152c, 0x21530,
954 0x2153c, 0x2153c,
812034f1
HS
955 0x21550, 0x21554,
956 0x21600, 0x21600,
8119c018
HS
957 0x21608, 0x2161c,
958 0x21624, 0x21628,
959 0x21630, 0x21634,
960 0x2163c, 0x2163c,
812034f1
HS
961 0x21700, 0x2171c,
962 0x21780, 0x2178c,
8119c018
HS
963 0x21800, 0x21818,
964 0x21820, 0x21828,
965 0x21830, 0x21848,
966 0x21850, 0x21854,
967 0x21860, 0x21868,
968 0x21870, 0x21870,
969 0x21878, 0x21898,
970 0x218a0, 0x218a8,
971 0x218b0, 0x218c8,
972 0x218d0, 0x218d4,
973 0x218e0, 0x218e8,
974 0x218f0, 0x218f0,
975 0x218f8, 0x21a18,
976 0x21a20, 0x21a28,
977 0x21a30, 0x21a48,
978 0x21a50, 0x21a54,
979 0x21a60, 0x21a68,
980 0x21a70, 0x21a70,
981 0x21a78, 0x21a98,
982 0x21aa0, 0x21aa8,
983 0x21ab0, 0x21ac8,
984 0x21ad0, 0x21ad4,
985 0x21ae0, 0x21ae8,
986 0x21af0, 0x21af0,
987 0x21af8, 0x21c18,
988 0x21c20, 0x21c20,
989 0x21c28, 0x21c30,
990 0x21c38, 0x21c38,
991 0x21c80, 0x21c98,
992 0x21ca0, 0x21ca8,
993 0x21cb0, 0x21cc8,
994 0x21cd0, 0x21cd4,
995 0x21ce0, 0x21ce8,
996 0x21cf0, 0x21cf0,
997 0x21cf8, 0x21d7c,
812034f1
HS
998 0x21e00, 0x21e04,
999 0x22000, 0x2202c,
1000 0x22100, 0x2213c,
8119c018
HS
1001 0x22190, 0x221a0,
1002 0x221a8, 0x221b8,
1003 0x221c4, 0x221c8,
812034f1 1004 0x22200, 0x22318,
8119c018
HS
1005 0x22400, 0x224b4,
1006 0x224c0, 0x22528,
812034f1
HS
1007 0x22540, 0x22614,
1008 0x23000, 0x23040,
1009 0x2304c, 0x23060,
1010 0x230c0, 0x230ec,
1011 0x23200, 0x23268,
1012 0x23270, 0x23284,
1013 0x232fc, 0x23388,
1014 0x23400, 0x23404,
8119c018
HS
1015 0x23500, 0x23500,
1016 0x23510, 0x23518,
1017 0x2352c, 0x23530,
1018 0x2353c, 0x2353c,
812034f1
HS
1019 0x23550, 0x23554,
1020 0x23600, 0x23600,
8119c018
HS
1021 0x23608, 0x2361c,
1022 0x23624, 0x23628,
1023 0x23630, 0x23634,
1024 0x2363c, 0x2363c,
812034f1
HS
1025 0x23700, 0x2371c,
1026 0x23780, 0x2378c,
8119c018
HS
1027 0x23800, 0x23818,
1028 0x23820, 0x23828,
1029 0x23830, 0x23848,
1030 0x23850, 0x23854,
1031 0x23860, 0x23868,
1032 0x23870, 0x23870,
1033 0x23878, 0x23898,
1034 0x238a0, 0x238a8,
1035 0x238b0, 0x238c8,
1036 0x238d0, 0x238d4,
1037 0x238e0, 0x238e8,
1038 0x238f0, 0x238f0,
1039 0x238f8, 0x23a18,
1040 0x23a20, 0x23a28,
1041 0x23a30, 0x23a48,
1042 0x23a50, 0x23a54,
1043 0x23a60, 0x23a68,
1044 0x23a70, 0x23a70,
1045 0x23a78, 0x23a98,
1046 0x23aa0, 0x23aa8,
1047 0x23ab0, 0x23ac8,
1048 0x23ad0, 0x23ad4,
1049 0x23ae0, 0x23ae8,
1050 0x23af0, 0x23af0,
1051 0x23af8, 0x23c18,
1052 0x23c20, 0x23c20,
1053 0x23c28, 0x23c30,
1054 0x23c38, 0x23c38,
1055 0x23c80, 0x23c98,
1056 0x23ca0, 0x23ca8,
1057 0x23cb0, 0x23cc8,
1058 0x23cd0, 0x23cd4,
1059 0x23ce0, 0x23ce8,
1060 0x23cf0, 0x23cf0,
1061 0x23cf8, 0x23d7c,
812034f1
HS
1062 0x23e00, 0x23e04,
1063 0x24000, 0x2402c,
1064 0x24100, 0x2413c,
8119c018
HS
1065 0x24190, 0x241a0,
1066 0x241a8, 0x241b8,
1067 0x241c4, 0x241c8,
812034f1 1068 0x24200, 0x24318,
8119c018
HS
1069 0x24400, 0x244b4,
1070 0x244c0, 0x24528,
812034f1
HS
1071 0x24540, 0x24614,
1072 0x25000, 0x25040,
1073 0x2504c, 0x25060,
1074 0x250c0, 0x250ec,
1075 0x25200, 0x25268,
1076 0x25270, 0x25284,
1077 0x252fc, 0x25388,
1078 0x25400, 0x25404,
8119c018
HS
1079 0x25500, 0x25500,
1080 0x25510, 0x25518,
1081 0x2552c, 0x25530,
1082 0x2553c, 0x2553c,
812034f1
HS
1083 0x25550, 0x25554,
1084 0x25600, 0x25600,
8119c018
HS
1085 0x25608, 0x2561c,
1086 0x25624, 0x25628,
1087 0x25630, 0x25634,
1088 0x2563c, 0x2563c,
812034f1
HS
1089 0x25700, 0x2571c,
1090 0x25780, 0x2578c,
8119c018
HS
1091 0x25800, 0x25818,
1092 0x25820, 0x25828,
1093 0x25830, 0x25848,
1094 0x25850, 0x25854,
1095 0x25860, 0x25868,
1096 0x25870, 0x25870,
1097 0x25878, 0x25898,
1098 0x258a0, 0x258a8,
1099 0x258b0, 0x258c8,
1100 0x258d0, 0x258d4,
1101 0x258e0, 0x258e8,
1102 0x258f0, 0x258f0,
1103 0x258f8, 0x25a18,
1104 0x25a20, 0x25a28,
1105 0x25a30, 0x25a48,
1106 0x25a50, 0x25a54,
1107 0x25a60, 0x25a68,
1108 0x25a70, 0x25a70,
1109 0x25a78, 0x25a98,
1110 0x25aa0, 0x25aa8,
1111 0x25ab0, 0x25ac8,
1112 0x25ad0, 0x25ad4,
1113 0x25ae0, 0x25ae8,
1114 0x25af0, 0x25af0,
1115 0x25af8, 0x25c18,
1116 0x25c20, 0x25c20,
1117 0x25c28, 0x25c30,
1118 0x25c38, 0x25c38,
1119 0x25c80, 0x25c98,
1120 0x25ca0, 0x25ca8,
1121 0x25cb0, 0x25cc8,
1122 0x25cd0, 0x25cd4,
1123 0x25ce0, 0x25ce8,
1124 0x25cf0, 0x25cf0,
1125 0x25cf8, 0x25d7c,
812034f1
HS
1126 0x25e00, 0x25e04,
1127 0x26000, 0x2602c,
1128 0x26100, 0x2613c,
8119c018
HS
1129 0x26190, 0x261a0,
1130 0x261a8, 0x261b8,
1131 0x261c4, 0x261c8,
812034f1 1132 0x26200, 0x26318,
8119c018
HS
1133 0x26400, 0x264b4,
1134 0x264c0, 0x26528,
812034f1
HS
1135 0x26540, 0x26614,
1136 0x27000, 0x27040,
1137 0x2704c, 0x27060,
1138 0x270c0, 0x270ec,
1139 0x27200, 0x27268,
1140 0x27270, 0x27284,
1141 0x272fc, 0x27388,
1142 0x27400, 0x27404,
8119c018
HS
1143 0x27500, 0x27500,
1144 0x27510, 0x27518,
1145 0x2752c, 0x27530,
1146 0x2753c, 0x2753c,
812034f1
HS
1147 0x27550, 0x27554,
1148 0x27600, 0x27600,
8119c018
HS
1149 0x27608, 0x2761c,
1150 0x27624, 0x27628,
1151 0x27630, 0x27634,
1152 0x2763c, 0x2763c,
812034f1
HS
1153 0x27700, 0x2771c,
1154 0x27780, 0x2778c,
8119c018
HS
1155 0x27800, 0x27818,
1156 0x27820, 0x27828,
1157 0x27830, 0x27848,
1158 0x27850, 0x27854,
1159 0x27860, 0x27868,
1160 0x27870, 0x27870,
1161 0x27878, 0x27898,
1162 0x278a0, 0x278a8,
1163 0x278b0, 0x278c8,
1164 0x278d0, 0x278d4,
1165 0x278e0, 0x278e8,
1166 0x278f0, 0x278f0,
1167 0x278f8, 0x27a18,
1168 0x27a20, 0x27a28,
1169 0x27a30, 0x27a48,
1170 0x27a50, 0x27a54,
1171 0x27a60, 0x27a68,
1172 0x27a70, 0x27a70,
1173 0x27a78, 0x27a98,
1174 0x27aa0, 0x27aa8,
1175 0x27ab0, 0x27ac8,
1176 0x27ad0, 0x27ad4,
1177 0x27ae0, 0x27ae8,
1178 0x27af0, 0x27af0,
1179 0x27af8, 0x27c18,
1180 0x27c20, 0x27c20,
1181 0x27c28, 0x27c30,
1182 0x27c38, 0x27c38,
1183 0x27c80, 0x27c98,
1184 0x27ca0, 0x27ca8,
1185 0x27cb0, 0x27cc8,
1186 0x27cd0, 0x27cd4,
1187 0x27ce0, 0x27ce8,
1188 0x27cf0, 0x27cf0,
1189 0x27cf8, 0x27d7c,
9f5ac48d 1190 0x27e00, 0x27e04,
812034f1
HS
1191 };
1192
1193 static const unsigned int t5_reg_ranges[] = {
8119c018
HS
1194 0x1008, 0x10c0,
1195 0x10cc, 0x10f8,
1196 0x1100, 0x1100,
1197 0x110c, 0x1148,
1198 0x1180, 0x1184,
1199 0x1190, 0x1194,
1200 0x11a0, 0x11a4,
1201 0x11b0, 0x11b4,
812034f1
HS
1202 0x11fc, 0x123c,
1203 0x1280, 0x173c,
1204 0x1800, 0x18fc,
1205 0x3000, 0x3028,
8119c018
HS
1206 0x3060, 0x30b0,
1207 0x30b8, 0x30d8,
812034f1
HS
1208 0x30e0, 0x30fc,
1209 0x3140, 0x357c,
1210 0x35a8, 0x35cc,
1211 0x35ec, 0x35ec,
1212 0x3600, 0x5624,
8119c018
HS
1213 0x56cc, 0x56ec,
1214 0x56f4, 0x5720,
1215 0x5728, 0x575c,
812034f1 1216 0x580c, 0x5814,
8119c018
HS
1217 0x5890, 0x589c,
1218 0x58a4, 0x58ac,
1219 0x58b8, 0x58bc,
1220 0x5940, 0x59c8,
1221 0x59d0, 0x59dc,
812034f1 1222 0x59fc, 0x5a18,
8119c018
HS
1223 0x5a60, 0x5a70,
1224 0x5a80, 0x5a9c,
9f5ac48d 1225 0x5b94, 0x5bfc,
8119c018
HS
1226 0x6000, 0x6020,
1227 0x6028, 0x6040,
1228 0x6058, 0x609c,
1229 0x60a8, 0x614c,
812034f1
HS
1230 0x7700, 0x7798,
1231 0x77c0, 0x78fc,
8119c018
HS
1232 0x7b00, 0x7b58,
1233 0x7b60, 0x7b84,
1234 0x7b8c, 0x7c54,
1235 0x7d00, 0x7d38,
1236 0x7d40, 0x7d80,
1237 0x7d8c, 0x7ddc,
1238 0x7de4, 0x7e04,
1239 0x7e10, 0x7e1c,
1240 0x7e24, 0x7e38,
1241 0x7e40, 0x7e44,
1242 0x7e4c, 0x7e78,
1243 0x7e80, 0x7edc,
1244 0x7ee8, 0x7efc,
812034f1 1245 0x8dc0, 0x8de0,
8119c018
HS
1246 0x8df8, 0x8e04,
1247 0x8e10, 0x8e84,
812034f1 1248 0x8ea0, 0x8f84,
8119c018
HS
1249 0x8fc0, 0x9058,
1250 0x9060, 0x9060,
1251 0x9068, 0x90f8,
1252 0x9400, 0x9408,
1253 0x9410, 0x9470,
1254 0x9600, 0x9600,
1255 0x9608, 0x9638,
1256 0x9640, 0x96f4,
812034f1
HS
1257 0x9800, 0x9808,
1258 0x9820, 0x983c,
1259 0x9850, 0x9864,
1260 0x9c00, 0x9c6c,
1261 0x9c80, 0x9cec,
1262 0x9d00, 0x9d6c,
1263 0x9d80, 0x9dec,
1264 0x9e00, 0x9e6c,
1265 0x9e80, 0x9eec,
1266 0x9f00, 0x9f6c,
1267 0x9f80, 0xa020,
8119c018
HS
1268 0xd004, 0xd004,
1269 0xd010, 0xd03c,
812034f1 1270 0xdfc0, 0xdfe0,
8119c018
HS
1271 0xe000, 0x1106c,
1272 0x11074, 0x11088,
1273 0x1109c, 0x1117c,
812034f1
HS
1274 0x11190, 0x11204,
1275 0x19040, 0x1906c,
1276 0x19078, 0x19080,
8119c018
HS
1277 0x1908c, 0x190e8,
1278 0x190f0, 0x190f8,
1279 0x19100, 0x19110,
1280 0x19120, 0x19124,
1281 0x19150, 0x19194,
1282 0x1919c, 0x191b0,
812034f1
HS
1283 0x191d0, 0x191e8,
1284 0x19238, 0x19290,
8119c018
HS
1285 0x193f8, 0x19428,
1286 0x19430, 0x19444,
1287 0x1944c, 0x1946c,
1288 0x19474, 0x19474,
812034f1
HS
1289 0x19490, 0x194cc,
1290 0x194f0, 0x194f8,
8119c018
HS
1291 0x19c00, 0x19c08,
1292 0x19c10, 0x19c60,
1293 0x19c94, 0x19ce4,
1294 0x19cf0, 0x19d40,
1295 0x19d50, 0x19d94,
1296 0x19da0, 0x19de8,
1297 0x19df0, 0x19e10,
1298 0x19e50, 0x19e90,
1299 0x19ea0, 0x19f24,
1300 0x19f34, 0x19f34,
812034f1 1301 0x19f40, 0x19f50,
8119c018
HS
1302 0x19f90, 0x19fb4,
1303 0x19fc4, 0x19fe4,
1304 0x1a000, 0x1a004,
1305 0x1a010, 0x1a06c,
1306 0x1a0b0, 0x1a0e4,
1307 0x1a0ec, 0x1a0f8,
1308 0x1a100, 0x1a108,
1309 0x1a114, 0x1a120,
1310 0x1a128, 0x1a130,
1311 0x1a138, 0x1a138,
812034f1
HS
1312 0x1a190, 0x1a1c4,
1313 0x1a1fc, 0x1a1fc,
1314 0x1e008, 0x1e00c,
8119c018
HS
1315 0x1e040, 0x1e044,
1316 0x1e04c, 0x1e04c,
812034f1
HS
1317 0x1e284, 0x1e290,
1318 0x1e2c0, 0x1e2c0,
1319 0x1e2e0, 0x1e2e0,
1320 0x1e300, 0x1e384,
1321 0x1e3c0, 0x1e3c8,
1322 0x1e408, 0x1e40c,
8119c018
HS
1323 0x1e440, 0x1e444,
1324 0x1e44c, 0x1e44c,
812034f1
HS
1325 0x1e684, 0x1e690,
1326 0x1e6c0, 0x1e6c0,
1327 0x1e6e0, 0x1e6e0,
1328 0x1e700, 0x1e784,
1329 0x1e7c0, 0x1e7c8,
1330 0x1e808, 0x1e80c,
8119c018
HS
1331 0x1e840, 0x1e844,
1332 0x1e84c, 0x1e84c,
812034f1
HS
1333 0x1ea84, 0x1ea90,
1334 0x1eac0, 0x1eac0,
1335 0x1eae0, 0x1eae0,
1336 0x1eb00, 0x1eb84,
1337 0x1ebc0, 0x1ebc8,
1338 0x1ec08, 0x1ec0c,
8119c018
HS
1339 0x1ec40, 0x1ec44,
1340 0x1ec4c, 0x1ec4c,
812034f1
HS
1341 0x1ee84, 0x1ee90,
1342 0x1eec0, 0x1eec0,
1343 0x1eee0, 0x1eee0,
1344 0x1ef00, 0x1ef84,
1345 0x1efc0, 0x1efc8,
1346 0x1f008, 0x1f00c,
8119c018
HS
1347 0x1f040, 0x1f044,
1348 0x1f04c, 0x1f04c,
812034f1
HS
1349 0x1f284, 0x1f290,
1350 0x1f2c0, 0x1f2c0,
1351 0x1f2e0, 0x1f2e0,
1352 0x1f300, 0x1f384,
1353 0x1f3c0, 0x1f3c8,
1354 0x1f408, 0x1f40c,
8119c018
HS
1355 0x1f440, 0x1f444,
1356 0x1f44c, 0x1f44c,
812034f1
HS
1357 0x1f684, 0x1f690,
1358 0x1f6c0, 0x1f6c0,
1359 0x1f6e0, 0x1f6e0,
1360 0x1f700, 0x1f784,
1361 0x1f7c0, 0x1f7c8,
1362 0x1f808, 0x1f80c,
8119c018
HS
1363 0x1f840, 0x1f844,
1364 0x1f84c, 0x1f84c,
812034f1
HS
1365 0x1fa84, 0x1fa90,
1366 0x1fac0, 0x1fac0,
1367 0x1fae0, 0x1fae0,
1368 0x1fb00, 0x1fb84,
1369 0x1fbc0, 0x1fbc8,
1370 0x1fc08, 0x1fc0c,
8119c018
HS
1371 0x1fc40, 0x1fc44,
1372 0x1fc4c, 0x1fc4c,
812034f1
HS
1373 0x1fe84, 0x1fe90,
1374 0x1fec0, 0x1fec0,
1375 0x1fee0, 0x1fee0,
1376 0x1ff00, 0x1ff84,
1377 0x1ffc0, 0x1ffc8,
1378 0x30000, 0x30030,
8119c018
HS
1379 0x30038, 0x30038,
1380 0x30040, 0x30040,
812034f1 1381 0x30100, 0x30144,
8119c018
HS
1382 0x30190, 0x301a0,
1383 0x301a8, 0x301b8,
1384 0x301c4, 0x301c8,
1385 0x301d0, 0x301d0,
812034f1 1386 0x30200, 0x30318,
8119c018
HS
1387 0x30400, 0x304b4,
1388 0x304c0, 0x3052c,
812034f1 1389 0x30540, 0x3061c,
8119c018
HS
1390 0x30800, 0x30828,
1391 0x30834, 0x30834,
812034f1
HS
1392 0x308c0, 0x30908,
1393 0x30910, 0x309ac,
8119c018
HS
1394 0x30a00, 0x30a14,
1395 0x30a1c, 0x30a2c,
812034f1 1396 0x30a44, 0x30a50,
8119c018
HS
1397 0x30a74, 0x30a74,
1398 0x30a7c, 0x30afc,
1399 0x30b08, 0x30c24,
9f5ac48d 1400 0x30d00, 0x30d00,
812034f1
HS
1401 0x30d08, 0x30d14,
1402 0x30d1c, 0x30d20,
8119c018
HS
1403 0x30d3c, 0x30d3c,
1404 0x30d48, 0x30d50,
812034f1
HS
1405 0x31200, 0x3120c,
1406 0x31220, 0x31220,
1407 0x31240, 0x31240,
9f5ac48d 1408 0x31600, 0x3160c,
812034f1 1409 0x31a00, 0x31a1c,
9f5ac48d 1410 0x31e00, 0x31e20,
812034f1
HS
1411 0x31e38, 0x31e3c,
1412 0x31e80, 0x31e80,
1413 0x31e88, 0x31ea8,
1414 0x31eb0, 0x31eb4,
1415 0x31ec8, 0x31ed4,
1416 0x31fb8, 0x32004,
9f5ac48d
HS
1417 0x32200, 0x32200,
1418 0x32208, 0x32240,
1419 0x32248, 0x32280,
1420 0x32288, 0x322c0,
1421 0x322c8, 0x322fc,
812034f1
HS
1422 0x32600, 0x32630,
1423 0x32a00, 0x32abc,
8119c018
HS
1424 0x32b00, 0x32b10,
1425 0x32b20, 0x32b30,
1426 0x32b40, 0x32b50,
1427 0x32b60, 0x32b70,
1428 0x33000, 0x33028,
1429 0x33030, 0x33048,
1430 0x33060, 0x33068,
1431 0x33070, 0x3309c,
1432 0x330f0, 0x33128,
1433 0x33130, 0x33148,
1434 0x33160, 0x33168,
1435 0x33170, 0x3319c,
1436 0x331f0, 0x33238,
1437 0x33240, 0x33240,
1438 0x33248, 0x33250,
1439 0x3325c, 0x33264,
1440 0x33270, 0x332b8,
1441 0x332c0, 0x332e4,
1442 0x332f8, 0x33338,
1443 0x33340, 0x33340,
1444 0x33348, 0x33350,
1445 0x3335c, 0x33364,
1446 0x33370, 0x333b8,
1447 0x333c0, 0x333e4,
1448 0x333f8, 0x33428,
1449 0x33430, 0x33448,
1450 0x33460, 0x33468,
1451 0x33470, 0x3349c,
1452 0x334f0, 0x33528,
1453 0x33530, 0x33548,
1454 0x33560, 0x33568,
1455 0x33570, 0x3359c,
1456 0x335f0, 0x33638,
1457 0x33640, 0x33640,
1458 0x33648, 0x33650,
1459 0x3365c, 0x33664,
1460 0x33670, 0x336b8,
1461 0x336c0, 0x336e4,
1462 0x336f8, 0x33738,
1463 0x33740, 0x33740,
1464 0x33748, 0x33750,
1465 0x3375c, 0x33764,
1466 0x33770, 0x337b8,
1467 0x337c0, 0x337e4,
812034f1
HS
1468 0x337f8, 0x337fc,
1469 0x33814, 0x33814,
1470 0x3382c, 0x3382c,
1471 0x33880, 0x3388c,
1472 0x338e8, 0x338ec,
8119c018
HS
1473 0x33900, 0x33928,
1474 0x33930, 0x33948,
1475 0x33960, 0x33968,
1476 0x33970, 0x3399c,
1477 0x339f0, 0x33a38,
1478 0x33a40, 0x33a40,
1479 0x33a48, 0x33a50,
1480 0x33a5c, 0x33a64,
1481 0x33a70, 0x33ab8,
1482 0x33ac0, 0x33ae4,
812034f1
HS
1483 0x33af8, 0x33b10,
1484 0x33b28, 0x33b28,
1485 0x33b3c, 0x33b50,
1486 0x33bf0, 0x33c10,
1487 0x33c28, 0x33c28,
1488 0x33c3c, 0x33c50,
1489 0x33cf0, 0x33cfc,
1490 0x34000, 0x34030,
8119c018
HS
1491 0x34038, 0x34038,
1492 0x34040, 0x34040,
812034f1 1493 0x34100, 0x34144,
8119c018
HS
1494 0x34190, 0x341a0,
1495 0x341a8, 0x341b8,
1496 0x341c4, 0x341c8,
1497 0x341d0, 0x341d0,
812034f1 1498 0x34200, 0x34318,
8119c018
HS
1499 0x34400, 0x344b4,
1500 0x344c0, 0x3452c,
812034f1 1501 0x34540, 0x3461c,
8119c018
HS
1502 0x34800, 0x34828,
1503 0x34834, 0x34834,
812034f1
HS
1504 0x348c0, 0x34908,
1505 0x34910, 0x349ac,
8119c018
HS
1506 0x34a00, 0x34a14,
1507 0x34a1c, 0x34a2c,
812034f1 1508 0x34a44, 0x34a50,
8119c018
HS
1509 0x34a74, 0x34a74,
1510 0x34a7c, 0x34afc,
1511 0x34b08, 0x34c24,
9f5ac48d 1512 0x34d00, 0x34d00,
812034f1
HS
1513 0x34d08, 0x34d14,
1514 0x34d1c, 0x34d20,
8119c018
HS
1515 0x34d3c, 0x34d3c,
1516 0x34d48, 0x34d50,
812034f1
HS
1517 0x35200, 0x3520c,
1518 0x35220, 0x35220,
1519 0x35240, 0x35240,
9f5ac48d 1520 0x35600, 0x3560c,
812034f1 1521 0x35a00, 0x35a1c,
9f5ac48d 1522 0x35e00, 0x35e20,
812034f1
HS
1523 0x35e38, 0x35e3c,
1524 0x35e80, 0x35e80,
1525 0x35e88, 0x35ea8,
1526 0x35eb0, 0x35eb4,
1527 0x35ec8, 0x35ed4,
1528 0x35fb8, 0x36004,
9f5ac48d
HS
1529 0x36200, 0x36200,
1530 0x36208, 0x36240,
1531 0x36248, 0x36280,
1532 0x36288, 0x362c0,
1533 0x362c8, 0x362fc,
812034f1
HS
1534 0x36600, 0x36630,
1535 0x36a00, 0x36abc,
8119c018
HS
1536 0x36b00, 0x36b10,
1537 0x36b20, 0x36b30,
1538 0x36b40, 0x36b50,
1539 0x36b60, 0x36b70,
1540 0x37000, 0x37028,
1541 0x37030, 0x37048,
1542 0x37060, 0x37068,
1543 0x37070, 0x3709c,
1544 0x370f0, 0x37128,
1545 0x37130, 0x37148,
1546 0x37160, 0x37168,
1547 0x37170, 0x3719c,
1548 0x371f0, 0x37238,
1549 0x37240, 0x37240,
1550 0x37248, 0x37250,
1551 0x3725c, 0x37264,
1552 0x37270, 0x372b8,
1553 0x372c0, 0x372e4,
1554 0x372f8, 0x37338,
1555 0x37340, 0x37340,
1556 0x37348, 0x37350,
1557 0x3735c, 0x37364,
1558 0x37370, 0x373b8,
1559 0x373c0, 0x373e4,
1560 0x373f8, 0x37428,
1561 0x37430, 0x37448,
1562 0x37460, 0x37468,
1563 0x37470, 0x3749c,
1564 0x374f0, 0x37528,
1565 0x37530, 0x37548,
1566 0x37560, 0x37568,
1567 0x37570, 0x3759c,
1568 0x375f0, 0x37638,
1569 0x37640, 0x37640,
1570 0x37648, 0x37650,
1571 0x3765c, 0x37664,
1572 0x37670, 0x376b8,
1573 0x376c0, 0x376e4,
1574 0x376f8, 0x37738,
1575 0x37740, 0x37740,
1576 0x37748, 0x37750,
1577 0x3775c, 0x37764,
1578 0x37770, 0x377b8,
1579 0x377c0, 0x377e4,
812034f1
HS
1580 0x377f8, 0x377fc,
1581 0x37814, 0x37814,
1582 0x3782c, 0x3782c,
1583 0x37880, 0x3788c,
1584 0x378e8, 0x378ec,
8119c018
HS
1585 0x37900, 0x37928,
1586 0x37930, 0x37948,
1587 0x37960, 0x37968,
1588 0x37970, 0x3799c,
1589 0x379f0, 0x37a38,
1590 0x37a40, 0x37a40,
1591 0x37a48, 0x37a50,
1592 0x37a5c, 0x37a64,
1593 0x37a70, 0x37ab8,
1594 0x37ac0, 0x37ae4,
812034f1
HS
1595 0x37af8, 0x37b10,
1596 0x37b28, 0x37b28,
1597 0x37b3c, 0x37b50,
1598 0x37bf0, 0x37c10,
1599 0x37c28, 0x37c28,
1600 0x37c3c, 0x37c50,
1601 0x37cf0, 0x37cfc,
1602 0x38000, 0x38030,
8119c018
HS
1603 0x38038, 0x38038,
1604 0x38040, 0x38040,
812034f1 1605 0x38100, 0x38144,
8119c018
HS
1606 0x38190, 0x381a0,
1607 0x381a8, 0x381b8,
1608 0x381c4, 0x381c8,
1609 0x381d0, 0x381d0,
812034f1 1610 0x38200, 0x38318,
8119c018
HS
1611 0x38400, 0x384b4,
1612 0x384c0, 0x3852c,
812034f1 1613 0x38540, 0x3861c,
8119c018
HS
1614 0x38800, 0x38828,
1615 0x38834, 0x38834,
812034f1
HS
1616 0x388c0, 0x38908,
1617 0x38910, 0x389ac,
8119c018
HS
1618 0x38a00, 0x38a14,
1619 0x38a1c, 0x38a2c,
812034f1 1620 0x38a44, 0x38a50,
8119c018
HS
1621 0x38a74, 0x38a74,
1622 0x38a7c, 0x38afc,
1623 0x38b08, 0x38c24,
9f5ac48d 1624 0x38d00, 0x38d00,
812034f1
HS
1625 0x38d08, 0x38d14,
1626 0x38d1c, 0x38d20,
8119c018
HS
1627 0x38d3c, 0x38d3c,
1628 0x38d48, 0x38d50,
812034f1
HS
1629 0x39200, 0x3920c,
1630 0x39220, 0x39220,
1631 0x39240, 0x39240,
9f5ac48d 1632 0x39600, 0x3960c,
812034f1 1633 0x39a00, 0x39a1c,
9f5ac48d 1634 0x39e00, 0x39e20,
812034f1
HS
1635 0x39e38, 0x39e3c,
1636 0x39e80, 0x39e80,
1637 0x39e88, 0x39ea8,
1638 0x39eb0, 0x39eb4,
1639 0x39ec8, 0x39ed4,
1640 0x39fb8, 0x3a004,
9f5ac48d
HS
1641 0x3a200, 0x3a200,
1642 0x3a208, 0x3a240,
1643 0x3a248, 0x3a280,
1644 0x3a288, 0x3a2c0,
1645 0x3a2c8, 0x3a2fc,
812034f1
HS
1646 0x3a600, 0x3a630,
1647 0x3aa00, 0x3aabc,
8119c018
HS
1648 0x3ab00, 0x3ab10,
1649 0x3ab20, 0x3ab30,
1650 0x3ab40, 0x3ab50,
1651 0x3ab60, 0x3ab70,
1652 0x3b000, 0x3b028,
1653 0x3b030, 0x3b048,
1654 0x3b060, 0x3b068,
1655 0x3b070, 0x3b09c,
1656 0x3b0f0, 0x3b128,
1657 0x3b130, 0x3b148,
1658 0x3b160, 0x3b168,
1659 0x3b170, 0x3b19c,
1660 0x3b1f0, 0x3b238,
1661 0x3b240, 0x3b240,
1662 0x3b248, 0x3b250,
1663 0x3b25c, 0x3b264,
1664 0x3b270, 0x3b2b8,
1665 0x3b2c0, 0x3b2e4,
1666 0x3b2f8, 0x3b338,
1667 0x3b340, 0x3b340,
1668 0x3b348, 0x3b350,
1669 0x3b35c, 0x3b364,
1670 0x3b370, 0x3b3b8,
1671 0x3b3c0, 0x3b3e4,
1672 0x3b3f8, 0x3b428,
1673 0x3b430, 0x3b448,
1674 0x3b460, 0x3b468,
1675 0x3b470, 0x3b49c,
1676 0x3b4f0, 0x3b528,
1677 0x3b530, 0x3b548,
1678 0x3b560, 0x3b568,
1679 0x3b570, 0x3b59c,
1680 0x3b5f0, 0x3b638,
1681 0x3b640, 0x3b640,
1682 0x3b648, 0x3b650,
1683 0x3b65c, 0x3b664,
1684 0x3b670, 0x3b6b8,
1685 0x3b6c0, 0x3b6e4,
1686 0x3b6f8, 0x3b738,
1687 0x3b740, 0x3b740,
1688 0x3b748, 0x3b750,
1689 0x3b75c, 0x3b764,
1690 0x3b770, 0x3b7b8,
1691 0x3b7c0, 0x3b7e4,
812034f1
HS
1692 0x3b7f8, 0x3b7fc,
1693 0x3b814, 0x3b814,
1694 0x3b82c, 0x3b82c,
1695 0x3b880, 0x3b88c,
1696 0x3b8e8, 0x3b8ec,
8119c018
HS
1697 0x3b900, 0x3b928,
1698 0x3b930, 0x3b948,
1699 0x3b960, 0x3b968,
1700 0x3b970, 0x3b99c,
1701 0x3b9f0, 0x3ba38,
1702 0x3ba40, 0x3ba40,
1703 0x3ba48, 0x3ba50,
1704 0x3ba5c, 0x3ba64,
1705 0x3ba70, 0x3bab8,
1706 0x3bac0, 0x3bae4,
812034f1
HS
1707 0x3baf8, 0x3bb10,
1708 0x3bb28, 0x3bb28,
1709 0x3bb3c, 0x3bb50,
1710 0x3bbf0, 0x3bc10,
1711 0x3bc28, 0x3bc28,
1712 0x3bc3c, 0x3bc50,
1713 0x3bcf0, 0x3bcfc,
1714 0x3c000, 0x3c030,
8119c018
HS
1715 0x3c038, 0x3c038,
1716 0x3c040, 0x3c040,
812034f1 1717 0x3c100, 0x3c144,
8119c018
HS
1718 0x3c190, 0x3c1a0,
1719 0x3c1a8, 0x3c1b8,
1720 0x3c1c4, 0x3c1c8,
1721 0x3c1d0, 0x3c1d0,
812034f1 1722 0x3c200, 0x3c318,
8119c018
HS
1723 0x3c400, 0x3c4b4,
1724 0x3c4c0, 0x3c52c,
812034f1 1725 0x3c540, 0x3c61c,
8119c018
HS
1726 0x3c800, 0x3c828,
1727 0x3c834, 0x3c834,
812034f1
HS
1728 0x3c8c0, 0x3c908,
1729 0x3c910, 0x3c9ac,
8119c018
HS
1730 0x3ca00, 0x3ca14,
1731 0x3ca1c, 0x3ca2c,
812034f1 1732 0x3ca44, 0x3ca50,
8119c018
HS
1733 0x3ca74, 0x3ca74,
1734 0x3ca7c, 0x3cafc,
1735 0x3cb08, 0x3cc24,
9f5ac48d 1736 0x3cd00, 0x3cd00,
812034f1
HS
1737 0x3cd08, 0x3cd14,
1738 0x3cd1c, 0x3cd20,
8119c018
HS
1739 0x3cd3c, 0x3cd3c,
1740 0x3cd48, 0x3cd50,
812034f1
HS
1741 0x3d200, 0x3d20c,
1742 0x3d220, 0x3d220,
1743 0x3d240, 0x3d240,
9f5ac48d 1744 0x3d600, 0x3d60c,
812034f1 1745 0x3da00, 0x3da1c,
9f5ac48d 1746 0x3de00, 0x3de20,
812034f1
HS
1747 0x3de38, 0x3de3c,
1748 0x3de80, 0x3de80,
1749 0x3de88, 0x3dea8,
1750 0x3deb0, 0x3deb4,
1751 0x3dec8, 0x3ded4,
1752 0x3dfb8, 0x3e004,
9f5ac48d
HS
1753 0x3e200, 0x3e200,
1754 0x3e208, 0x3e240,
1755 0x3e248, 0x3e280,
1756 0x3e288, 0x3e2c0,
1757 0x3e2c8, 0x3e2fc,
812034f1
HS
1758 0x3e600, 0x3e630,
1759 0x3ea00, 0x3eabc,
8119c018
HS
1760 0x3eb00, 0x3eb10,
1761 0x3eb20, 0x3eb30,
1762 0x3eb40, 0x3eb50,
1763 0x3eb60, 0x3eb70,
1764 0x3f000, 0x3f028,
1765 0x3f030, 0x3f048,
1766 0x3f060, 0x3f068,
1767 0x3f070, 0x3f09c,
1768 0x3f0f0, 0x3f128,
1769 0x3f130, 0x3f148,
1770 0x3f160, 0x3f168,
1771 0x3f170, 0x3f19c,
1772 0x3f1f0, 0x3f238,
1773 0x3f240, 0x3f240,
1774 0x3f248, 0x3f250,
1775 0x3f25c, 0x3f264,
1776 0x3f270, 0x3f2b8,
1777 0x3f2c0, 0x3f2e4,
1778 0x3f2f8, 0x3f338,
1779 0x3f340, 0x3f340,
1780 0x3f348, 0x3f350,
1781 0x3f35c, 0x3f364,
1782 0x3f370, 0x3f3b8,
1783 0x3f3c0, 0x3f3e4,
1784 0x3f3f8, 0x3f428,
1785 0x3f430, 0x3f448,
1786 0x3f460, 0x3f468,
1787 0x3f470, 0x3f49c,
1788 0x3f4f0, 0x3f528,
1789 0x3f530, 0x3f548,
1790 0x3f560, 0x3f568,
1791 0x3f570, 0x3f59c,
1792 0x3f5f0, 0x3f638,
1793 0x3f640, 0x3f640,
1794 0x3f648, 0x3f650,
1795 0x3f65c, 0x3f664,
1796 0x3f670, 0x3f6b8,
1797 0x3f6c0, 0x3f6e4,
1798 0x3f6f8, 0x3f738,
1799 0x3f740, 0x3f740,
1800 0x3f748, 0x3f750,
1801 0x3f75c, 0x3f764,
1802 0x3f770, 0x3f7b8,
1803 0x3f7c0, 0x3f7e4,
812034f1
HS
1804 0x3f7f8, 0x3f7fc,
1805 0x3f814, 0x3f814,
1806 0x3f82c, 0x3f82c,
1807 0x3f880, 0x3f88c,
1808 0x3f8e8, 0x3f8ec,
8119c018
HS
1809 0x3f900, 0x3f928,
1810 0x3f930, 0x3f948,
1811 0x3f960, 0x3f968,
1812 0x3f970, 0x3f99c,
1813 0x3f9f0, 0x3fa38,
1814 0x3fa40, 0x3fa40,
1815 0x3fa48, 0x3fa50,
1816 0x3fa5c, 0x3fa64,
1817 0x3fa70, 0x3fab8,
1818 0x3fac0, 0x3fae4,
812034f1
HS
1819 0x3faf8, 0x3fb10,
1820 0x3fb28, 0x3fb28,
1821 0x3fb3c, 0x3fb50,
1822 0x3fbf0, 0x3fc10,
1823 0x3fc28, 0x3fc28,
1824 0x3fc3c, 0x3fc50,
1825 0x3fcf0, 0x3fcfc,
1826 0x40000, 0x4000c,
8119c018
HS
1827 0x40040, 0x40050,
1828 0x40060, 0x40068,
1829 0x4007c, 0x4008c,
1830 0x40094, 0x400b0,
1831 0x400c0, 0x40144,
812034f1 1832 0x40180, 0x4018c,
8119c018
HS
1833 0x40200, 0x40254,
1834 0x40260, 0x40264,
1835 0x40270, 0x40288,
1836 0x40290, 0x40298,
1837 0x402ac, 0x402c8,
1838 0x402d0, 0x402e0,
1839 0x402f0, 0x402f0,
1840 0x40300, 0x4033c,
812034f1
HS
1841 0x403f8, 0x403fc,
1842 0x41304, 0x413c4,
8119c018
HS
1843 0x41400, 0x4140c,
1844 0x41414, 0x4141c,
812034f1 1845 0x41480, 0x414d0,
8119c018
HS
1846 0x44000, 0x44054,
1847 0x4405c, 0x44078,
1848 0x440c0, 0x44174,
1849 0x44180, 0x441ac,
1850 0x441b4, 0x441b8,
1851 0x441c0, 0x44254,
1852 0x4425c, 0x44278,
1853 0x442c0, 0x44374,
1854 0x44380, 0x443ac,
1855 0x443b4, 0x443b8,
1856 0x443c0, 0x44454,
1857 0x4445c, 0x44478,
1858 0x444c0, 0x44574,
1859 0x44580, 0x445ac,
1860 0x445b4, 0x445b8,
1861 0x445c0, 0x44654,
1862 0x4465c, 0x44678,
1863 0x446c0, 0x44774,
1864 0x44780, 0x447ac,
1865 0x447b4, 0x447b8,
1866 0x447c0, 0x44854,
1867 0x4485c, 0x44878,
1868 0x448c0, 0x44974,
1869 0x44980, 0x449ac,
1870 0x449b4, 0x449b8,
1871 0x449c0, 0x449fc,
1872 0x45000, 0x45004,
1873 0x45010, 0x45030,
1874 0x45040, 0x45060,
1875 0x45068, 0x45068,
812034f1
HS
1876 0x45080, 0x45084,
1877 0x450a0, 0x450b0,
8119c018
HS
1878 0x45200, 0x45204,
1879 0x45210, 0x45230,
1880 0x45240, 0x45260,
1881 0x45268, 0x45268,
812034f1
HS
1882 0x45280, 0x45284,
1883 0x452a0, 0x452b0,
1884 0x460c0, 0x460e4,
8119c018
HS
1885 0x47000, 0x4703c,
1886 0x47044, 0x4708c,
812034f1 1887 0x47200, 0x47250,
8119c018
HS
1888 0x47400, 0x47408,
1889 0x47414, 0x47420,
812034f1
HS
1890 0x47600, 0x47618,
1891 0x47800, 0x47814,
1892 0x48000, 0x4800c,
8119c018
HS
1893 0x48040, 0x48050,
1894 0x48060, 0x48068,
1895 0x4807c, 0x4808c,
1896 0x48094, 0x480b0,
1897 0x480c0, 0x48144,
812034f1 1898 0x48180, 0x4818c,
8119c018
HS
1899 0x48200, 0x48254,
1900 0x48260, 0x48264,
1901 0x48270, 0x48288,
1902 0x48290, 0x48298,
1903 0x482ac, 0x482c8,
1904 0x482d0, 0x482e0,
1905 0x482f0, 0x482f0,
1906 0x48300, 0x4833c,
812034f1
HS
1907 0x483f8, 0x483fc,
1908 0x49304, 0x493c4,
8119c018
HS
1909 0x49400, 0x4940c,
1910 0x49414, 0x4941c,
812034f1 1911 0x49480, 0x494d0,
8119c018
HS
1912 0x4c000, 0x4c054,
1913 0x4c05c, 0x4c078,
1914 0x4c0c0, 0x4c174,
1915 0x4c180, 0x4c1ac,
1916 0x4c1b4, 0x4c1b8,
1917 0x4c1c0, 0x4c254,
1918 0x4c25c, 0x4c278,
1919 0x4c2c0, 0x4c374,
1920 0x4c380, 0x4c3ac,
1921 0x4c3b4, 0x4c3b8,
1922 0x4c3c0, 0x4c454,
1923 0x4c45c, 0x4c478,
1924 0x4c4c0, 0x4c574,
1925 0x4c580, 0x4c5ac,
1926 0x4c5b4, 0x4c5b8,
1927 0x4c5c0, 0x4c654,
1928 0x4c65c, 0x4c678,
1929 0x4c6c0, 0x4c774,
1930 0x4c780, 0x4c7ac,
1931 0x4c7b4, 0x4c7b8,
1932 0x4c7c0, 0x4c854,
1933 0x4c85c, 0x4c878,
1934 0x4c8c0, 0x4c974,
1935 0x4c980, 0x4c9ac,
1936 0x4c9b4, 0x4c9b8,
1937 0x4c9c0, 0x4c9fc,
1938 0x4d000, 0x4d004,
1939 0x4d010, 0x4d030,
1940 0x4d040, 0x4d060,
1941 0x4d068, 0x4d068,
812034f1
HS
1942 0x4d080, 0x4d084,
1943 0x4d0a0, 0x4d0b0,
8119c018
HS
1944 0x4d200, 0x4d204,
1945 0x4d210, 0x4d230,
1946 0x4d240, 0x4d260,
1947 0x4d268, 0x4d268,
812034f1
HS
1948 0x4d280, 0x4d284,
1949 0x4d2a0, 0x4d2b0,
1950 0x4e0c0, 0x4e0e4,
8119c018
HS
1951 0x4f000, 0x4f03c,
1952 0x4f044, 0x4f08c,
812034f1 1953 0x4f200, 0x4f250,
8119c018
HS
1954 0x4f400, 0x4f408,
1955 0x4f414, 0x4f420,
812034f1
HS
1956 0x4f600, 0x4f618,
1957 0x4f800, 0x4f814,
8119c018
HS
1958 0x50000, 0x50084,
1959 0x50090, 0x500cc,
812034f1 1960 0x50400, 0x50400,
8119c018
HS
1961 0x50800, 0x50884,
1962 0x50890, 0x508cc,
812034f1
HS
1963 0x50c00, 0x50c00,
1964 0x51000, 0x5101c,
1965 0x51300, 0x51308,
1966 };
1967
ab4b583b 1968 static const unsigned int t6_reg_ranges[] = {
8119c018
HS
1969 0x1008, 0x101c,
1970 0x1024, 0x10a8,
1971 0x10b4, 0x10f8,
1972 0x1100, 0x1114,
1973 0x111c, 0x112c,
1974 0x1138, 0x113c,
1975 0x1144, 0x114c,
1976 0x1180, 0x1184,
1977 0x1190, 0x1194,
1978 0x11a0, 0x11a4,
1979 0x11b0, 0x11b4,
676d6a75
HS
1980 0x11fc, 0x1258,
1981 0x1280, 0x12d4,
1982 0x12d9, 0x12d9,
1983 0x12de, 0x12de,
1984 0x12e3, 0x12e3,
1985 0x12e8, 0x133c,
ab4b583b
HS
1986 0x1800, 0x18fc,
1987 0x3000, 0x302c,
8119c018
HS
1988 0x3060, 0x30b0,
1989 0x30b8, 0x30d8,
ab4b583b
HS
1990 0x30e0, 0x30fc,
1991 0x3140, 0x357c,
1992 0x35a8, 0x35cc,
1993 0x35ec, 0x35ec,
1994 0x3600, 0x5624,
8119c018
HS
1995 0x56cc, 0x56ec,
1996 0x56f4, 0x5720,
1997 0x5728, 0x575c,
ab4b583b 1998 0x580c, 0x5814,
8119c018
HS
1999 0x5890, 0x589c,
2000 0x58a4, 0x58ac,
2001 0x58b8, 0x58bc,
ab4b583b
HS
2002 0x5940, 0x595c,
2003 0x5980, 0x598c,
8119c018
HS
2004 0x59b0, 0x59c8,
2005 0x59d0, 0x59dc,
ab4b583b
HS
2006 0x59fc, 0x5a18,
2007 0x5a60, 0x5a6c,
8119c018
HS
2008 0x5a80, 0x5a8c,
2009 0x5a94, 0x5a9c,
ab4b583b 2010 0x5b94, 0x5bfc,
8119c018
HS
2011 0x5c10, 0x5e48,
2012 0x5e50, 0x5e94,
2013 0x5ea0, 0x5eb0,
2014 0x5ec0, 0x5ec0,
676d6a75 2015 0x5ec8, 0x5ed0,
8119c018
HS
2016 0x6000, 0x6020,
2017 0x6028, 0x6040,
2018 0x6058, 0x609c,
2019 0x60a8, 0x619c,
ab4b583b
HS
2020 0x7700, 0x7798,
2021 0x77c0, 0x7880,
2022 0x78cc, 0x78fc,
8119c018
HS
2023 0x7b00, 0x7b58,
2024 0x7b60, 0x7b84,
2025 0x7b8c, 0x7c54,
2026 0x7d00, 0x7d38,
2027 0x7d40, 0x7d84,
2028 0x7d8c, 0x7ddc,
2029 0x7de4, 0x7e04,
2030 0x7e10, 0x7e1c,
2031 0x7e24, 0x7e38,
2032 0x7e40, 0x7e44,
2033 0x7e4c, 0x7e78,
2034 0x7e80, 0x7edc,
2035 0x7ee8, 0x7efc,
f109ff11 2036 0x8dc0, 0x8de4,
8119c018
HS
2037 0x8df8, 0x8e04,
2038 0x8e10, 0x8e84,
ab4b583b 2039 0x8ea0, 0x8f88,
8119c018
HS
2040 0x8fb8, 0x9058,
2041 0x9060, 0x9060,
2042 0x9068, 0x90f8,
2043 0x9100, 0x9124,
ab4b583b 2044 0x9400, 0x9470,
8119c018
HS
2045 0x9600, 0x9600,
2046 0x9608, 0x9638,
2047 0x9640, 0x9704,
2048 0x9710, 0x971c,
ab4b583b
HS
2049 0x9800, 0x9808,
2050 0x9820, 0x983c,
2051 0x9850, 0x9864,
2052 0x9c00, 0x9c6c,
2053 0x9c80, 0x9cec,
2054 0x9d00, 0x9d6c,
2055 0x9d80, 0x9dec,
2056 0x9e00, 0x9e6c,
2057 0x9e80, 0x9eec,
2058 0x9f00, 0x9f6c,
2059 0x9f80, 0xa020,
2060 0xd004, 0xd03c,
5b4e83e1 2061 0xd100, 0xd118,
8119c018
HS
2062 0xd200, 0xd214,
2063 0xd220, 0xd234,
2064 0xd240, 0xd254,
2065 0xd260, 0xd274,
2066 0xd280, 0xd294,
2067 0xd2a0, 0xd2b4,
2068 0xd2c0, 0xd2d4,
2069 0xd2e0, 0xd2f4,
2070 0xd300, 0xd31c,
ab4b583b
HS
2071 0xdfc0, 0xdfe0,
2072 0xe000, 0xf008,
2073 0x11000, 0x11014,
8119c018
HS
2074 0x11048, 0x1106c,
2075 0x11074, 0x11088,
2076 0x11098, 0x11120,
2077 0x1112c, 0x1117c,
2078 0x11190, 0x112e0,
ab4b583b 2079 0x11300, 0x1130c,
5b4e83e1 2080 0x12000, 0x1206c,
ab4b583b
HS
2081 0x19040, 0x1906c,
2082 0x19078, 0x19080,
8119c018
HS
2083 0x1908c, 0x190e8,
2084 0x190f0, 0x190f8,
2085 0x19100, 0x19110,
2086 0x19120, 0x19124,
2087 0x19150, 0x19194,
2088 0x1919c, 0x191b0,
ab4b583b 2089 0x191d0, 0x191e8,
676d6a75
HS
2090 0x19238, 0x19290,
2091 0x192a4, 0x192b0,
8119c018
HS
2092 0x192bc, 0x192bc,
2093 0x19348, 0x1934c,
2094 0x193f8, 0x19418,
2095 0x19420, 0x19428,
2096 0x19430, 0x19444,
2097 0x1944c, 0x1946c,
2098 0x19474, 0x19474,
ab4b583b
HS
2099 0x19490, 0x194cc,
2100 0x194f0, 0x194f8,
8119c018
HS
2101 0x19c00, 0x19c48,
2102 0x19c50, 0x19c80,
2103 0x19c94, 0x19c98,
2104 0x19ca0, 0x19cbc,
2105 0x19ce4, 0x19ce4,
2106 0x19cf0, 0x19cf8,
2107 0x19d00, 0x19d28,
ab4b583b 2108 0x19d50, 0x19d78,
8119c018
HS
2109 0x19d94, 0x19d98,
2110 0x19da0, 0x19dc8,
ab4b583b
HS
2111 0x19df0, 0x19e10,
2112 0x19e50, 0x19e6c,
8119c018
HS
2113 0x19ea0, 0x19ebc,
2114 0x19ec4, 0x19ef4,
2115 0x19f04, 0x19f2c,
2116 0x19f34, 0x19f34,
ab4b583b
HS
2117 0x19f40, 0x19f50,
2118 0x19f90, 0x19fac,
8119c018
HS
2119 0x19fc4, 0x19fc8,
2120 0x19fd0, 0x19fe4,
2121 0x1a000, 0x1a004,
2122 0x1a010, 0x1a06c,
2123 0x1a0b0, 0x1a0e4,
2124 0x1a0ec, 0x1a0f8,
2125 0x1a100, 0x1a108,
2126 0x1a114, 0x1a120,
2127 0x1a128, 0x1a130,
2128 0x1a138, 0x1a138,
ab4b583b
HS
2129 0x1a190, 0x1a1c4,
2130 0x1a1fc, 0x1a1fc,
2131 0x1e008, 0x1e00c,
8119c018
HS
2132 0x1e040, 0x1e044,
2133 0x1e04c, 0x1e04c,
ab4b583b
HS
2134 0x1e284, 0x1e290,
2135 0x1e2c0, 0x1e2c0,
2136 0x1e2e0, 0x1e2e0,
2137 0x1e300, 0x1e384,
2138 0x1e3c0, 0x1e3c8,
2139 0x1e408, 0x1e40c,
8119c018
HS
2140 0x1e440, 0x1e444,
2141 0x1e44c, 0x1e44c,
ab4b583b
HS
2142 0x1e684, 0x1e690,
2143 0x1e6c0, 0x1e6c0,
2144 0x1e6e0, 0x1e6e0,
2145 0x1e700, 0x1e784,
2146 0x1e7c0, 0x1e7c8,
2147 0x1e808, 0x1e80c,
8119c018
HS
2148 0x1e840, 0x1e844,
2149 0x1e84c, 0x1e84c,
ab4b583b
HS
2150 0x1ea84, 0x1ea90,
2151 0x1eac0, 0x1eac0,
2152 0x1eae0, 0x1eae0,
2153 0x1eb00, 0x1eb84,
2154 0x1ebc0, 0x1ebc8,
2155 0x1ec08, 0x1ec0c,
8119c018
HS
2156 0x1ec40, 0x1ec44,
2157 0x1ec4c, 0x1ec4c,
ab4b583b
HS
2158 0x1ee84, 0x1ee90,
2159 0x1eec0, 0x1eec0,
2160 0x1eee0, 0x1eee0,
2161 0x1ef00, 0x1ef84,
2162 0x1efc0, 0x1efc8,
2163 0x1f008, 0x1f00c,
8119c018
HS
2164 0x1f040, 0x1f044,
2165 0x1f04c, 0x1f04c,
ab4b583b
HS
2166 0x1f284, 0x1f290,
2167 0x1f2c0, 0x1f2c0,
2168 0x1f2e0, 0x1f2e0,
2169 0x1f300, 0x1f384,
2170 0x1f3c0, 0x1f3c8,
2171 0x1f408, 0x1f40c,
8119c018
HS
2172 0x1f440, 0x1f444,
2173 0x1f44c, 0x1f44c,
ab4b583b
HS
2174 0x1f684, 0x1f690,
2175 0x1f6c0, 0x1f6c0,
2176 0x1f6e0, 0x1f6e0,
2177 0x1f700, 0x1f784,
2178 0x1f7c0, 0x1f7c8,
2179 0x1f808, 0x1f80c,
8119c018
HS
2180 0x1f840, 0x1f844,
2181 0x1f84c, 0x1f84c,
ab4b583b
HS
2182 0x1fa84, 0x1fa90,
2183 0x1fac0, 0x1fac0,
2184 0x1fae0, 0x1fae0,
2185 0x1fb00, 0x1fb84,
2186 0x1fbc0, 0x1fbc8,
2187 0x1fc08, 0x1fc0c,
8119c018
HS
2188 0x1fc40, 0x1fc44,
2189 0x1fc4c, 0x1fc4c,
ab4b583b
HS
2190 0x1fe84, 0x1fe90,
2191 0x1fec0, 0x1fec0,
2192 0x1fee0, 0x1fee0,
2193 0x1ff00, 0x1ff84,
2194 0x1ffc0, 0x1ffc8,
8119c018
HS
2195 0x30000, 0x30030,
2196 0x30038, 0x30038,
2197 0x30040, 0x30040,
2198 0x30048, 0x30048,
2199 0x30050, 0x30050,
2200 0x3005c, 0x30060,
2201 0x30068, 0x30068,
2202 0x30070, 0x30070,
2203 0x30100, 0x30168,
2204 0x30190, 0x301a0,
2205 0x301a8, 0x301b8,
2206 0x301c4, 0x301c8,
2207 0x301d0, 0x301d0,
f109ff11 2208 0x30200, 0x30320,
8119c018
HS
2209 0x30400, 0x304b4,
2210 0x304c0, 0x3052c,
ab4b583b 2211 0x30540, 0x3061c,
8119c018 2212 0x30800, 0x308a0,
ab4b583b
HS
2213 0x308c0, 0x30908,
2214 0x30910, 0x309b8,
2215 0x30a00, 0x30a04,
8119c018
HS
2216 0x30a0c, 0x30a14,
2217 0x30a1c, 0x30a2c,
ab4b583b 2218 0x30a44, 0x30a50,
8119c018
HS
2219 0x30a74, 0x30a74,
2220 0x30a7c, 0x30afc,
2221 0x30b08, 0x30c24,
2222 0x30d00, 0x30d14,
2223 0x30d1c, 0x30d3c,
2224 0x30d44, 0x30d4c,
2225 0x30d54, 0x30d74,
2226 0x30d7c, 0x30d7c,
ab4b583b
HS
2227 0x30de0, 0x30de0,
2228 0x30e00, 0x30ed4,
2229 0x30f00, 0x30fa4,
2230 0x30fc0, 0x30fc4,
2231 0x31000, 0x31004,
2232 0x31080, 0x310fc,
2233 0x31208, 0x31220,
2234 0x3123c, 0x31254,
2235 0x31300, 0x31300,
2236 0x31308, 0x3131c,
2237 0x31338, 0x3133c,
2238 0x31380, 0x31380,
2239 0x31388, 0x313a8,
2240 0x313b4, 0x313b4,
2241 0x31400, 0x31420,
2242 0x31438, 0x3143c,
2243 0x31480, 0x31480,
2244 0x314a8, 0x314a8,
2245 0x314b0, 0x314b4,
2246 0x314c8, 0x314d4,
2247 0x31a40, 0x31a4c,
2248 0x31af0, 0x31b20,
2249 0x31b38, 0x31b3c,
2250 0x31b80, 0x31b80,
2251 0x31ba8, 0x31ba8,
2252 0x31bb0, 0x31bb4,
2253 0x31bc8, 0x31bd4,
2254 0x32140, 0x3218c,
8119c018
HS
2255 0x321f0, 0x321f4,
2256 0x32200, 0x32200,
ab4b583b
HS
2257 0x32218, 0x32218,
2258 0x32400, 0x32400,
2259 0x32408, 0x3241c,
2260 0x32618, 0x32620,
2261 0x32664, 0x32664,
2262 0x326a8, 0x326a8,
2263 0x326ec, 0x326ec,
2264 0x32a00, 0x32abc,
8119c018
HS
2265 0x32b00, 0x32b38,
2266 0x32b40, 0x32b58,
2267 0x32b60, 0x32b78,
ab4b583b
HS
2268 0x32c00, 0x32c00,
2269 0x32c08, 0x32c3c,
2270 0x32e00, 0x32e2c,
2271 0x32f00, 0x32f2c,
8119c018
HS
2272 0x33000, 0x3302c,
2273 0x33034, 0x33050,
2274 0x33058, 0x33058,
2275 0x33060, 0x3308c,
2276 0x3309c, 0x330ac,
2277 0x330c0, 0x330c0,
2278 0x330c8, 0x330d0,
2279 0x330d8, 0x330e0,
2280 0x330ec, 0x3312c,
2281 0x33134, 0x33150,
2282 0x33158, 0x33158,
2283 0x33160, 0x3318c,
2284 0x3319c, 0x331ac,
2285 0x331c0, 0x331c0,
2286 0x331c8, 0x331d0,
2287 0x331d8, 0x331e0,
2288 0x331ec, 0x33290,
2289 0x33298, 0x332c4,
2290 0x332e4, 0x33390,
2291 0x33398, 0x333c4,
2292 0x333e4, 0x3342c,
2293 0x33434, 0x33450,
2294 0x33458, 0x33458,
2295 0x33460, 0x3348c,
2296 0x3349c, 0x334ac,
2297 0x334c0, 0x334c0,
2298 0x334c8, 0x334d0,
2299 0x334d8, 0x334e0,
2300 0x334ec, 0x3352c,
2301 0x33534, 0x33550,
2302 0x33558, 0x33558,
2303 0x33560, 0x3358c,
2304 0x3359c, 0x335ac,
2305 0x335c0, 0x335c0,
2306 0x335c8, 0x335d0,
2307 0x335d8, 0x335e0,
2308 0x335ec, 0x33690,
2309 0x33698, 0x336c4,
2310 0x336e4, 0x33790,
2311 0x33798, 0x337c4,
ab4b583b
HS
2312 0x337e4, 0x337fc,
2313 0x33814, 0x33814,
2314 0x33854, 0x33868,
2315 0x33880, 0x3388c,
2316 0x338c0, 0x338d0,
2317 0x338e8, 0x338ec,
8119c018
HS
2318 0x33900, 0x3392c,
2319 0x33934, 0x33950,
2320 0x33958, 0x33958,
2321 0x33960, 0x3398c,
2322 0x3399c, 0x339ac,
2323 0x339c0, 0x339c0,
2324 0x339c8, 0x339d0,
2325 0x339d8, 0x339e0,
2326 0x339ec, 0x33a90,
2327 0x33a98, 0x33ac4,
ab4b583b 2328 0x33ae4, 0x33b10,
8119c018
HS
2329 0x33b24, 0x33b28,
2330 0x33b38, 0x33b50,
ab4b583b 2331 0x33bf0, 0x33c10,
8119c018
HS
2332 0x33c24, 0x33c28,
2333 0x33c38, 0x33c50,
ab4b583b 2334 0x33cf0, 0x33cfc,
8119c018
HS
2335 0x34000, 0x34030,
2336 0x34038, 0x34038,
2337 0x34040, 0x34040,
2338 0x34048, 0x34048,
2339 0x34050, 0x34050,
2340 0x3405c, 0x34060,
2341 0x34068, 0x34068,
2342 0x34070, 0x34070,
2343 0x34100, 0x34168,
2344 0x34190, 0x341a0,
2345 0x341a8, 0x341b8,
2346 0x341c4, 0x341c8,
2347 0x341d0, 0x341d0,
f109ff11 2348 0x34200, 0x34320,
8119c018
HS
2349 0x34400, 0x344b4,
2350 0x344c0, 0x3452c,
ab4b583b 2351 0x34540, 0x3461c,
8119c018 2352 0x34800, 0x348a0,
ab4b583b
HS
2353 0x348c0, 0x34908,
2354 0x34910, 0x349b8,
2355 0x34a00, 0x34a04,
8119c018
HS
2356 0x34a0c, 0x34a14,
2357 0x34a1c, 0x34a2c,
ab4b583b 2358 0x34a44, 0x34a50,
8119c018
HS
2359 0x34a74, 0x34a74,
2360 0x34a7c, 0x34afc,
2361 0x34b08, 0x34c24,
2362 0x34d00, 0x34d14,
2363 0x34d1c, 0x34d3c,
2364 0x34d44, 0x34d4c,
2365 0x34d54, 0x34d74,
2366 0x34d7c, 0x34d7c,
ab4b583b
HS
2367 0x34de0, 0x34de0,
2368 0x34e00, 0x34ed4,
2369 0x34f00, 0x34fa4,
2370 0x34fc0, 0x34fc4,
2371 0x35000, 0x35004,
2372 0x35080, 0x350fc,
2373 0x35208, 0x35220,
2374 0x3523c, 0x35254,
2375 0x35300, 0x35300,
2376 0x35308, 0x3531c,
2377 0x35338, 0x3533c,
2378 0x35380, 0x35380,
2379 0x35388, 0x353a8,
2380 0x353b4, 0x353b4,
2381 0x35400, 0x35420,
2382 0x35438, 0x3543c,
2383 0x35480, 0x35480,
2384 0x354a8, 0x354a8,
2385 0x354b0, 0x354b4,
2386 0x354c8, 0x354d4,
2387 0x35a40, 0x35a4c,
2388 0x35af0, 0x35b20,
2389 0x35b38, 0x35b3c,
2390 0x35b80, 0x35b80,
2391 0x35ba8, 0x35ba8,
2392 0x35bb0, 0x35bb4,
2393 0x35bc8, 0x35bd4,
2394 0x36140, 0x3618c,
8119c018
HS
2395 0x361f0, 0x361f4,
2396 0x36200, 0x36200,
ab4b583b
HS
2397 0x36218, 0x36218,
2398 0x36400, 0x36400,
2399 0x36408, 0x3641c,
2400 0x36618, 0x36620,
2401 0x36664, 0x36664,
2402 0x366a8, 0x366a8,
2403 0x366ec, 0x366ec,
2404 0x36a00, 0x36abc,
8119c018
HS
2405 0x36b00, 0x36b38,
2406 0x36b40, 0x36b58,
2407 0x36b60, 0x36b78,
ab4b583b
HS
2408 0x36c00, 0x36c00,
2409 0x36c08, 0x36c3c,
2410 0x36e00, 0x36e2c,
2411 0x36f00, 0x36f2c,
8119c018
HS
2412 0x37000, 0x3702c,
2413 0x37034, 0x37050,
2414 0x37058, 0x37058,
2415 0x37060, 0x3708c,
2416 0x3709c, 0x370ac,
2417 0x370c0, 0x370c0,
2418 0x370c8, 0x370d0,
2419 0x370d8, 0x370e0,
2420 0x370ec, 0x3712c,
2421 0x37134, 0x37150,
2422 0x37158, 0x37158,
2423 0x37160, 0x3718c,
2424 0x3719c, 0x371ac,
2425 0x371c0, 0x371c0,
2426 0x371c8, 0x371d0,
2427 0x371d8, 0x371e0,
2428 0x371ec, 0x37290,
2429 0x37298, 0x372c4,
2430 0x372e4, 0x37390,
2431 0x37398, 0x373c4,
2432 0x373e4, 0x3742c,
2433 0x37434, 0x37450,
2434 0x37458, 0x37458,
2435 0x37460, 0x3748c,
2436 0x3749c, 0x374ac,
2437 0x374c0, 0x374c0,
2438 0x374c8, 0x374d0,
2439 0x374d8, 0x374e0,
2440 0x374ec, 0x3752c,
2441 0x37534, 0x37550,
2442 0x37558, 0x37558,
2443 0x37560, 0x3758c,
2444 0x3759c, 0x375ac,
2445 0x375c0, 0x375c0,
2446 0x375c8, 0x375d0,
2447 0x375d8, 0x375e0,
2448 0x375ec, 0x37690,
2449 0x37698, 0x376c4,
2450 0x376e4, 0x37790,
2451 0x37798, 0x377c4,
ab4b583b
HS
2452 0x377e4, 0x377fc,
2453 0x37814, 0x37814,
2454 0x37854, 0x37868,
2455 0x37880, 0x3788c,
2456 0x378c0, 0x378d0,
2457 0x378e8, 0x378ec,
8119c018
HS
2458 0x37900, 0x3792c,
2459 0x37934, 0x37950,
2460 0x37958, 0x37958,
2461 0x37960, 0x3798c,
2462 0x3799c, 0x379ac,
2463 0x379c0, 0x379c0,
2464 0x379c8, 0x379d0,
2465 0x379d8, 0x379e0,
2466 0x379ec, 0x37a90,
2467 0x37a98, 0x37ac4,
ab4b583b 2468 0x37ae4, 0x37b10,
8119c018
HS
2469 0x37b24, 0x37b28,
2470 0x37b38, 0x37b50,
ab4b583b 2471 0x37bf0, 0x37c10,
8119c018
HS
2472 0x37c24, 0x37c28,
2473 0x37c38, 0x37c50,
ab4b583b
HS
2474 0x37cf0, 0x37cfc,
2475 0x40040, 0x40040,
2476 0x40080, 0x40084,
2477 0x40100, 0x40100,
2478 0x40140, 0x401bc,
2479 0x40200, 0x40214,
2480 0x40228, 0x40228,
2481 0x40240, 0x40258,
2482 0x40280, 0x40280,
2483 0x40304, 0x40304,
2484 0x40330, 0x4033c,
676d6a75
HS
2485 0x41304, 0x413b8,
2486 0x413c0, 0x413c8,
8119c018
HS
2487 0x413d0, 0x413dc,
2488 0x413f0, 0x413f0,
2489 0x41400, 0x4140c,
2490 0x41414, 0x4141c,
ab4b583b
HS
2491 0x41480, 0x414d0,
2492 0x44000, 0x4407c,
8119c018
HS
2493 0x440c0, 0x441ac,
2494 0x441b4, 0x4427c,
2495 0x442c0, 0x443ac,
2496 0x443b4, 0x4447c,
2497 0x444c0, 0x445ac,
2498 0x445b4, 0x4467c,
2499 0x446c0, 0x447ac,
2500 0x447b4, 0x4487c,
2501 0x448c0, 0x449ac,
2502 0x449b4, 0x44a7c,
2503 0x44ac0, 0x44bac,
2504 0x44bb4, 0x44c7c,
2505 0x44cc0, 0x44dac,
2506 0x44db4, 0x44e7c,
2507 0x44ec0, 0x44fac,
2508 0x44fb4, 0x4507c,
2509 0x450c0, 0x451ac,
2510 0x451b4, 0x451fc,
2511 0x45800, 0x45804,
2512 0x45810, 0x45830,
2513 0x45840, 0x45860,
2514 0x45868, 0x45868,
ab4b583b
HS
2515 0x45880, 0x45884,
2516 0x458a0, 0x458b0,
8119c018
HS
2517 0x45a00, 0x45a04,
2518 0x45a10, 0x45a30,
2519 0x45a40, 0x45a60,
2520 0x45a68, 0x45a68,
ab4b583b
HS
2521 0x45a80, 0x45a84,
2522 0x45aa0, 0x45ab0,
2523 0x460c0, 0x460e4,
8119c018
HS
2524 0x47000, 0x4703c,
2525 0x47044, 0x4708c,
ab4b583b 2526 0x47200, 0x47250,
8119c018
HS
2527 0x47400, 0x47408,
2528 0x47414, 0x47420,
ab4b583b 2529 0x47600, 0x47618,
8119c018
HS
2530 0x47800, 0x47814,
2531 0x47820, 0x4782c,
2532 0x50000, 0x50084,
2533 0x50090, 0x500cc,
2534 0x50300, 0x50384,
ab4b583b 2535 0x50400, 0x50400,
8119c018
HS
2536 0x50800, 0x50884,
2537 0x50890, 0x508cc,
2538 0x50b00, 0x50b84,
ab4b583b 2539 0x50c00, 0x50c00,
8119c018
HS
2540 0x51000, 0x51020,
2541 0x51028, 0x510b0,
ab4b583b
HS
2542 0x51300, 0x51324,
2543 };
2544
812034f1
HS
2545 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2546 const unsigned int *reg_ranges;
2547 int reg_ranges_size, range;
2548 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2549
2550 /* Select the right set of register ranges to dump depending on the
2551 * adapter chip type.
2552 */
2553 switch (chip_version) {
2554 case CHELSIO_T4:
2555 reg_ranges = t4_reg_ranges;
2556 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2557 break;
2558
2559 case CHELSIO_T5:
2560 reg_ranges = t5_reg_ranges;
2561 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2562 break;
2563
ab4b583b
HS
2564 case CHELSIO_T6:
2565 reg_ranges = t6_reg_ranges;
2566 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2567 break;
2568
812034f1
HS
2569 default:
2570 dev_err(adap->pdev_dev,
2571 "Unsupported chip version %d\n", chip_version);
2572 return;
2573 }
2574
2575 /* Clear the register buffer and insert the appropriate register
2576 * values selected by the above register ranges.
2577 */
2578 memset(buf, 0, buf_size);
2579 for (range = 0; range < reg_ranges_size; range += 2) {
2580 unsigned int reg = reg_ranges[range];
2581 unsigned int last_reg = reg_ranges[range + 1];
2582 u32 *bufp = (u32 *)((char *)buf + reg);
2583
2584 /* Iterate across the register range filling in the register
2585 * buffer but don't write past the end of the register buffer.
2586 */
2587 while (reg <= last_reg && bufp < buf_end) {
2588 *bufp++ = t4_read_reg(adap, reg);
2589 reg += sizeof(u32);
2590 }
2591 }
2592}
2593
56d36be4 2594#define EEPROM_STAT_ADDR 0x7bfc
67e65879 2595#define VPD_SIZE 0x800
47ce9c48
SR
2596#define VPD_BASE 0x400
2597#define VPD_BASE_OLD 0
0a57a536 2598#define VPD_LEN 1024
63a92fe6 2599#define CHELSIO_VPD_UNIQUE_ID 0x82
56d36be4
DM
2600
2601/**
2602 * t4_seeprom_wp - enable/disable EEPROM write protection
2603 * @adapter: the adapter
2604 * @enable: whether to enable or disable write protection
2605 *
2606 * Enables or disables write protection on the serial EEPROM.
2607 */
2608int t4_seeprom_wp(struct adapter *adapter, bool enable)
2609{
2610 unsigned int v = enable ? 0xc : 0;
2611 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2612 return ret < 0 ? ret : 0;
2613}
2614
2615/**
098ef6c2 2616 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
56d36be4
DM
2617 * @adapter: adapter to read
2618 * @p: where to store the parameters
2619 *
2620 * Reads card parameters stored in VPD EEPROM.
2621 */
098ef6c2 2622int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
56d36be4 2623{
098ef6c2
HS
2624 int i, ret = 0, addr;
2625 int ec, sn, pn, na;
8c357ebd 2626 u8 *vpd, csum;
23d88e1d 2627 unsigned int vpdr_len, kw_offset, id_len;
56d36be4 2628
8c357ebd
VP
2629 vpd = vmalloc(VPD_LEN);
2630 if (!vpd)
2631 return -ENOMEM;
2632
67e65879
HS
2633 /* We have two VPD data structures stored in the adapter VPD area.
2634 * By default, Linux calculates the size of the VPD area by traversing
2635 * the first VPD area at offset 0x0, so we need to tell the OS what
2636 * our real VPD size is.
2637 */
2638 ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
2639 if (ret < 0)
2640 goto out;
2641
098ef6c2
HS
2642 /* Card information normally starts at VPD_BASE but early cards had
2643 * it at 0.
2644 */
47ce9c48
SR
2645 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
2646 if (ret < 0)
2647 goto out;
63a92fe6
HS
2648
2649 /* The VPD shall have a unique identifier specified by the PCI SIG.
2650 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2651 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2652 * is expected to automatically put this entry at the
2653 * beginning of the VPD.
2654 */
2655 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
47ce9c48
SR
2656
2657 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
56d36be4 2658 if (ret < 0)
8c357ebd 2659 goto out;
56d36be4 2660
23d88e1d
DM
2661 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2662 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
8c357ebd
VP
2663 ret = -EINVAL;
2664 goto out;
23d88e1d
DM
2665 }
2666
2667 id_len = pci_vpd_lrdt_size(vpd);
2668 if (id_len > ID_LEN)
2669 id_len = ID_LEN;
2670
2671 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2672 if (i < 0) {
2673 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
8c357ebd
VP
2674 ret = -EINVAL;
2675 goto out;
23d88e1d
DM
2676 }
2677
2678 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2679 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2680 if (vpdr_len + kw_offset > VPD_LEN) {
226ec5fd 2681 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
8c357ebd
VP
2682 ret = -EINVAL;
2683 goto out;
226ec5fd
DM
2684 }
2685
2686#define FIND_VPD_KW(var, name) do { \
23d88e1d 2687 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
226ec5fd
DM
2688 if (var < 0) { \
2689 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
8c357ebd
VP
2690 ret = -EINVAL; \
2691 goto out; \
226ec5fd
DM
2692 } \
2693 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2694} while (0)
2695
2696 FIND_VPD_KW(i, "RV");
2697 for (csum = 0; i >= 0; i--)
2698 csum += vpd[i];
56d36be4
DM
2699
2700 if (csum) {
2701 dev_err(adapter->pdev_dev,
2702 "corrupted VPD EEPROM, actual csum %u\n", csum);
8c357ebd
VP
2703 ret = -EINVAL;
2704 goto out;
56d36be4
DM
2705 }
2706
226ec5fd
DM
2707 FIND_VPD_KW(ec, "EC");
2708 FIND_VPD_KW(sn, "SN");
a94cd705 2709 FIND_VPD_KW(pn, "PN");
098ef6c2 2710 FIND_VPD_KW(na, "NA");
226ec5fd
DM
2711#undef FIND_VPD_KW
2712
23d88e1d 2713 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
56d36be4 2714 strim(p->id);
226ec5fd 2715 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 2716 strim(p->ec);
226ec5fd
DM
2717 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2718 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4 2719 strim(p->sn);
63a92fe6 2720 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
a94cd705
KS
2721 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2722 strim(p->pn);
098ef6c2
HS
2723 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2724 strim((char *)p->na);
636f9d37 2725
098ef6c2
HS
2726out:
2727 vfree(vpd);
2728 return ret;
2729}
2730
2731/**
2732 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2733 * @adapter: adapter to read
2734 * @p: where to store the parameters
2735 *
2736 * Reads card parameters stored in VPD EEPROM and retrieves the Core
2737 * Clock. This can only be called after a connection to the firmware
2738 * is established.
2739 */
2740int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2741{
2742 u32 cclk_param, cclk_val;
2743 int ret;
2744
2745 /* Grab the raw VPD parameters.
2746 */
2747 ret = t4_get_raw_vpd_params(adapter, p);
2748 if (ret)
2749 return ret;
2750
2751 /* Ask firmware for the Core Clock since it knows how to translate the
636f9d37
VP
2752 * Reference Clock ('V2') VPD field into a Core Clock value ...
2753 */
5167865a
HS
2754 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2755 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
098ef6c2 2756 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
636f9d37 2757 1, &cclk_param, &cclk_val);
8c357ebd 2758
636f9d37
VP
2759 if (ret)
2760 return ret;
2761 p->cclk = cclk_val;
2762
56d36be4
DM
2763 return 0;
2764}
2765
2766/* serial flash and firmware constants */
2767enum {
2768 SF_ATTEMPTS = 10, /* max retries for SF operations */
2769
2770 /* flash command opcodes */
2771 SF_PROG_PAGE = 2, /* program page */
2772 SF_WR_DISABLE = 4, /* disable writes */
2773 SF_RD_STATUS = 5, /* read status register */
2774 SF_WR_ENABLE = 6, /* enable writes */
2775 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 2776 SF_RD_ID = 0x9f, /* read ID */
56d36be4
DM
2777 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2778
6f1d7210 2779 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
56d36be4
DM
2780};
2781
2782/**
2783 * sf1_read - read data from the serial flash
2784 * @adapter: the adapter
2785 * @byte_cnt: number of bytes to read
2786 * @cont: whether another operation will be chained
2787 * @lock: whether to lock SF for PL access only
2788 * @valp: where to store the read data
2789 *
2790 * Reads up to 4 bytes of data from the serial flash. The location of
2791 * the read needs to be specified prior to calling this by issuing the
2792 * appropriate commands to the serial flash.
2793 */
2794static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2795 int lock, u32 *valp)
2796{
2797 int ret;
2798
2799 if (!byte_cnt || byte_cnt > 4)
2800 return -EINVAL;
0d804338 2801 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
56d36be4 2802 return -EBUSY;
0d804338
HS
2803 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2804 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2805 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
56d36be4 2806 if (!ret)
0d804338 2807 *valp = t4_read_reg(adapter, SF_DATA_A);
56d36be4
DM
2808 return ret;
2809}
2810
2811/**
2812 * sf1_write - write data to the serial flash
2813 * @adapter: the adapter
2814 * @byte_cnt: number of bytes to write
2815 * @cont: whether another operation will be chained
2816 * @lock: whether to lock SF for PL access only
2817 * @val: value to write
2818 *
2819 * Writes up to 4 bytes of data to the serial flash. The location of
2820 * the write needs to be specified prior to calling this by issuing the
2821 * appropriate commands to the serial flash.
2822 */
2823static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2824 int lock, u32 val)
2825{
2826 if (!byte_cnt || byte_cnt > 4)
2827 return -EINVAL;
0d804338 2828 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
56d36be4 2829 return -EBUSY;
0d804338
HS
2830 t4_write_reg(adapter, SF_DATA_A, val);
2831 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2832 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2833 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
56d36be4
DM
2834}
2835
2836/**
2837 * flash_wait_op - wait for a flash operation to complete
2838 * @adapter: the adapter
2839 * @attempts: max number of polls of the status register
2840 * @delay: delay between polls in ms
2841 *
2842 * Wait for a flash operation to complete by polling the status register.
2843 */
2844static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
2845{
2846 int ret;
2847 u32 status;
2848
2849 while (1) {
2850 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
2851 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
2852 return ret;
2853 if (!(status & 1))
2854 return 0;
2855 if (--attempts == 0)
2856 return -EAGAIN;
2857 if (delay)
2858 msleep(delay);
2859 }
2860}
2861
2862/**
2863 * t4_read_flash - read words from serial flash
2864 * @adapter: the adapter
2865 * @addr: the start address for the read
2866 * @nwords: how many 32-bit words to read
2867 * @data: where to store the read data
2868 * @byte_oriented: whether to store data as bytes or as words
2869 *
2870 * Read the specified number of 32-bit words from the serial flash.
2871 * If @byte_oriented is set the read data is stored as a byte array
2872 * (i.e., big-endian), otherwise as 32-bit words in the platform's
dbedd44e 2873 * natural endianness.
56d36be4 2874 */
49216c1c
HS
2875int t4_read_flash(struct adapter *adapter, unsigned int addr,
2876 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
2877{
2878 int ret;
2879
900a6596 2880 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
2881 return -EINVAL;
2882
2883 addr = swab32(addr) | SF_RD_DATA_FAST;
2884
2885 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
2886 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
2887 return ret;
2888
2889 for ( ; nwords; nwords--, data++) {
2890 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2891 if (nwords == 1)
0d804338 2892 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
2893 if (ret)
2894 return ret;
2895 if (byte_oriented)
f404f80c 2896 *data = (__force __u32)(cpu_to_be32(*data));
56d36be4
DM
2897 }
2898 return 0;
2899}
2900
2901/**
2902 * t4_write_flash - write up to a page of data to the serial flash
2903 * @adapter: the adapter
2904 * @addr: the start address to write
2905 * @n: length of data to write in bytes
2906 * @data: the data to write
2907 *
2908 * Writes up to a page of data (256 bytes) to the serial flash starting
2909 * at the given address. All the data must be written to the same page.
2910 */
2911static int t4_write_flash(struct adapter *adapter, unsigned int addr,
2912 unsigned int n, const u8 *data)
2913{
2914 int ret;
2915 u32 buf[64];
2916 unsigned int i, c, left, val, offset = addr & 0xff;
2917
900a6596 2918 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
2919 return -EINVAL;
2920
2921 val = swab32(addr) | SF_PROG_PAGE;
2922
2923 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2924 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
2925 goto unlock;
2926
2927 for (left = n; left; left -= c) {
2928 c = min(left, 4U);
2929 for (val = 0, i = 0; i < c; ++i)
2930 val = (val << 8) + *data++;
2931
2932 ret = sf1_write(adapter, c, c != left, 1, val);
2933 if (ret)
2934 goto unlock;
2935 }
900a6596 2936 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
2937 if (ret)
2938 goto unlock;
2939
0d804338 2940 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
2941
2942 /* Read the page to verify the write succeeded */
2943 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
2944 if (ret)
2945 return ret;
2946
2947 if (memcmp(data - n, (u8 *)buf + offset, n)) {
2948 dev_err(adapter->pdev_dev,
2949 "failed to correctly write the flash page at %#x\n",
2950 addr);
2951 return -EIO;
2952 }
2953 return 0;
2954
2955unlock:
0d804338 2956 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
2957 return ret;
2958}
2959
2960/**
16e47624 2961 * t4_get_fw_version - read the firmware version
56d36be4
DM
2962 * @adapter: the adapter
2963 * @vers: where to place the version
2964 *
2965 * Reads the FW version from flash.
2966 */
16e47624 2967int t4_get_fw_version(struct adapter *adapter, u32 *vers)
56d36be4 2968{
16e47624
HS
2969 return t4_read_flash(adapter, FLASH_FW_START +
2970 offsetof(struct fw_hdr, fw_ver), 1,
2971 vers, 0);
56d36be4
DM
2972}
2973
0de72738
HS
2974/**
2975 * t4_get_bs_version - read the firmware bootstrap version
2976 * @adapter: the adapter
2977 * @vers: where to place the version
2978 *
2979 * Reads the FW Bootstrap version from flash.
2980 */
2981int t4_get_bs_version(struct adapter *adapter, u32 *vers)
2982{
2983 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
2984 offsetof(struct fw_hdr, fw_ver), 1,
2985 vers, 0);
2986}
2987
56d36be4 2988/**
16e47624 2989 * t4_get_tp_version - read the TP microcode version
56d36be4
DM
2990 * @adapter: the adapter
2991 * @vers: where to place the version
2992 *
2993 * Reads the TP microcode version from flash.
2994 */
16e47624 2995int t4_get_tp_version(struct adapter *adapter, u32 *vers)
56d36be4 2996{
16e47624 2997 return t4_read_flash(adapter, FLASH_FW_START +
900a6596 2998 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
2999 1, vers, 0);
3000}
3001
ba3f8cd5
HS
3002/**
3003 * t4_get_exprom_version - return the Expansion ROM version (if any)
3004 * @adapter: the adapter
3005 * @vers: where to place the version
3006 *
3007 * Reads the Expansion ROM header from FLASH and returns the version
3008 * number (if present) through the @vers return value pointer. We return
3009 * this in the Firmware Version Format since it's convenient. Return
3010 * 0 on success, -ENOENT if no Expansion ROM is present.
3011 */
3012int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3013{
3014 struct exprom_header {
3015 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3016 unsigned char hdr_ver[4]; /* Expansion ROM version */
3017 } *hdr;
3018 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3019 sizeof(u32))];
3020 int ret;
3021
3022 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3023 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3024 0);
3025 if (ret)
3026 return ret;
3027
3028 hdr = (struct exprom_header *)exprom_header_buf;
3029 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3030 return -ENOENT;
3031
3032 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3033 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3034 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3035 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3036 return 0;
3037}
3038
a69265e9
HS
3039/**
3040 * t4_check_fw_version - check if the FW is supported with this driver
3041 * @adap: the adapter
3042 *
3043 * Checks if an adapter's FW is compatible with the driver. Returns 0
3044 * if there's exact match, a negative error if the version could not be
3045 * read or there's a major version mismatch
3046 */
3047int t4_check_fw_version(struct adapter *adap)
3048{
21d11bd6 3049 int i, ret, major, minor, micro;
a69265e9
HS
3050 int exp_major, exp_minor, exp_micro;
3051 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3052
3053 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
21d11bd6
HS
3054 /* Try multiple times before returning error */
3055 for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3056 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3057
a69265e9
HS
3058 if (ret)
3059 return ret;
3060
3061 major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3062 minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3063 micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3064
3065 switch (chip_version) {
3066 case CHELSIO_T4:
3067 exp_major = T4FW_MIN_VERSION_MAJOR;
3068 exp_minor = T4FW_MIN_VERSION_MINOR;
3069 exp_micro = T4FW_MIN_VERSION_MICRO;
3070 break;
3071 case CHELSIO_T5:
3072 exp_major = T5FW_MIN_VERSION_MAJOR;
3073 exp_minor = T5FW_MIN_VERSION_MINOR;
3074 exp_micro = T5FW_MIN_VERSION_MICRO;
3075 break;
3076 case CHELSIO_T6:
3077 exp_major = T6FW_MIN_VERSION_MAJOR;
3078 exp_minor = T6FW_MIN_VERSION_MINOR;
3079 exp_micro = T6FW_MIN_VERSION_MICRO;
3080 break;
3081 default:
3082 dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3083 adap->chip);
3084 return -EINVAL;
3085 }
3086
3087 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3088 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3089 dev_err(adap->pdev_dev,
3090 "Card has firmware version %u.%u.%u, minimum "
3091 "supported firmware is %u.%u.%u.\n", major, minor,
3092 micro, exp_major, exp_minor, exp_micro);
3093 return -EFAULT;
3094 }
3095 return 0;
3096}
3097
16e47624
HS
3098/* Is the given firmware API compatible with the one the driver was compiled
3099 * with?
56d36be4 3100 */
16e47624 3101static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
56d36be4 3102{
56d36be4 3103
16e47624
HS
3104 /* short circuit if it's the exact same firmware version */
3105 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3106 return 1;
56d36be4 3107
16e47624
HS
3108#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3109 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3110 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3111 return 1;
3112#undef SAME_INTF
0a57a536 3113
16e47624
HS
3114 return 0;
3115}
56d36be4 3116
16e47624
HS
3117/* The firmware in the filesystem is usable, but should it be installed?
3118 * This routine explains itself in detail if it indicates the filesystem
3119 * firmware should be installed.
3120 */
3121static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3122 int k, int c)
3123{
3124 const char *reason;
3125
3126 if (!card_fw_usable) {
3127 reason = "incompatible or unusable";
3128 goto install;
e69972f5
JH
3129 }
3130
16e47624
HS
3131 if (k > c) {
3132 reason = "older than the version supported with this driver";
3133 goto install;
56d36be4
DM
3134 }
3135
16e47624
HS
3136 return 0;
3137
3138install:
3139 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3140 "installing firmware %u.%u.%u.%u on card.\n",
b2e1a3f0
HS
3141 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3142 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3143 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3144 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
56d36be4 3145
56d36be4
DM
3146 return 1;
3147}
3148
16e47624
HS
3149int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3150 const u8 *fw_data, unsigned int fw_size,
3151 struct fw_hdr *card_fw, enum dev_state state,
3152 int *reset)
3153{
3154 int ret, card_fw_usable, fs_fw_usable;
3155 const struct fw_hdr *fs_fw;
3156 const struct fw_hdr *drv_fw;
3157
3158 drv_fw = &fw_info->fw_hdr;
3159
3160 /* Read the header of the firmware on the card */
3161 ret = -t4_read_flash(adap, FLASH_FW_START,
3162 sizeof(*card_fw) / sizeof(uint32_t),
3163 (uint32_t *)card_fw, 1);
3164 if (ret == 0) {
3165 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3166 } else {
3167 dev_err(adap->pdev_dev,
3168 "Unable to read card's firmware header: %d\n", ret);
3169 card_fw_usable = 0;
3170 }
3171
3172 if (fw_data != NULL) {
3173 fs_fw = (const void *)fw_data;
3174 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3175 } else {
3176 fs_fw = NULL;
3177 fs_fw_usable = 0;
3178 }
3179
3180 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3181 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3182 /* Common case: the firmware on the card is an exact match and
3183 * the filesystem one is an exact match too, or the filesystem
3184 * one is absent/incompatible.
3185 */
3186 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3187 should_install_fs_fw(adap, card_fw_usable,
3188 be32_to_cpu(fs_fw->fw_ver),
3189 be32_to_cpu(card_fw->fw_ver))) {
3190 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
3191 fw_size, 0);
3192 if (ret != 0) {
3193 dev_err(adap->pdev_dev,
3194 "failed to install firmware: %d\n", ret);
3195 goto bye;
3196 }
3197
3198 /* Installed successfully, update the cached header too. */
e3d50738 3199 *card_fw = *fs_fw;
16e47624
HS
3200 card_fw_usable = 1;
3201 *reset = 0; /* already reset as part of load_fw */
3202 }
3203
3204 if (!card_fw_usable) {
3205 uint32_t d, c, k;
3206
3207 d = be32_to_cpu(drv_fw->fw_ver);
3208 c = be32_to_cpu(card_fw->fw_ver);
3209 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3210
3211 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3212 "chip state %d, "
3213 "driver compiled with %d.%d.%d.%d, "
3214 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3215 state,
b2e1a3f0
HS
3216 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3217 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3218 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3219 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3220 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3221 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
16e47624
HS
3222 ret = EINVAL;
3223 goto bye;
3224 }
3225
3226 /* We're using whatever's on the card and it's known to be good. */
3227 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3228 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3229
3230bye:
3231 return ret;
3232}
3233
56d36be4
DM
3234/**
3235 * t4_flash_erase_sectors - erase a range of flash sectors
3236 * @adapter: the adapter
3237 * @start: the first sector to erase
3238 * @end: the last sector to erase
3239 *
3240 * Erases the sectors in the given inclusive range.
3241 */
3242static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3243{
3244 int ret = 0;
3245
c0d5b8cf
HS
3246 if (end >= adapter->params.sf_nsec)
3247 return -EINVAL;
3248
56d36be4
DM
3249 while (start <= end) {
3250 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3251 (ret = sf1_write(adapter, 4, 0, 1,
3252 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 3253 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
3254 dev_err(adapter->pdev_dev,
3255 "erase of flash sector %d failed, error %d\n",
3256 start, ret);
3257 break;
3258 }
3259 start++;
3260 }
0d804338 3261 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
3262 return ret;
3263}
3264
636f9d37
VP
3265/**
3266 * t4_flash_cfg_addr - return the address of the flash configuration file
3267 * @adapter: the adapter
3268 *
3269 * Return the address within the flash where the Firmware Configuration
3270 * File is stored.
3271 */
3272unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3273{
3274 if (adapter->params.sf_size == 0x100000)
3275 return FLASH_FPGA_CFG_START;
3276 else
3277 return FLASH_CFG_START;
3278}
3279
79af221d
HS
3280/* Return TRUE if the specified firmware matches the adapter. I.e. T4
3281 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3282 * and emit an error message for mismatched firmware to save our caller the
3283 * effort ...
3284 */
3285static bool t4_fw_matches_chip(const struct adapter *adap,
3286 const struct fw_hdr *hdr)
3287{
3288 /* The expression below will return FALSE for any unsupported adapter
3289 * which will keep us "honest" in the future ...
3290 */
3291 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3ccc6cf7
HS
3292 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3293 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
79af221d
HS
3294 return true;
3295
3296 dev_err(adap->pdev_dev,
3297 "FW image (%d) is not suitable for this adapter (%d)\n",
3298 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3299 return false;
3300}
3301
56d36be4
DM
3302/**
3303 * t4_load_fw - download firmware
3304 * @adap: the adapter
3305 * @fw_data: the firmware image to write
3306 * @size: image size
3307 *
3308 * Write the supplied firmware image to the card's serial flash.
3309 */
3310int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3311{
3312 u32 csum;
3313 int ret, addr;
3314 unsigned int i;
3315 u8 first_page[SF_PAGE_SIZE];
404d9e3f 3316 const __be32 *p = (const __be32 *)fw_data;
56d36be4 3317 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596
DM
3318 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3319 unsigned int fw_img_start = adap->params.sf_fw_start;
3320 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
56d36be4
DM
3321
3322 if (!size) {
3323 dev_err(adap->pdev_dev, "FW image has no data\n");
3324 return -EINVAL;
3325 }
3326 if (size & 511) {
3327 dev_err(adap->pdev_dev,
3328 "FW image size not multiple of 512 bytes\n");
3329 return -EINVAL;
3330 }
f404f80c 3331 if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
56d36be4
DM
3332 dev_err(adap->pdev_dev,
3333 "FW image size differs from size in FW header\n");
3334 return -EINVAL;
3335 }
3336 if (size > FW_MAX_SIZE) {
3337 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3338 FW_MAX_SIZE);
3339 return -EFBIG;
3340 }
79af221d
HS
3341 if (!t4_fw_matches_chip(adap, hdr))
3342 return -EINVAL;
56d36be4
DM
3343
3344 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
f404f80c 3345 csum += be32_to_cpu(p[i]);
56d36be4
DM
3346
3347 if (csum != 0xffffffff) {
3348 dev_err(adap->pdev_dev,
3349 "corrupted firmware image, checksum %#x\n", csum);
3350 return -EINVAL;
3351 }
3352
900a6596
DM
3353 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3354 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
3355 if (ret)
3356 goto out;
3357
3358 /*
3359 * We write the correct version at the end so the driver can see a bad
3360 * version if the FW write fails. Start by writing a copy of the
3361 * first page with a bad version.
3362 */
3363 memcpy(first_page, fw_data, SF_PAGE_SIZE);
f404f80c 3364 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
900a6596 3365 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
3366 if (ret)
3367 goto out;
3368
900a6596 3369 addr = fw_img_start;
56d36be4
DM
3370 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3371 addr += SF_PAGE_SIZE;
3372 fw_data += SF_PAGE_SIZE;
3373 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
3374 if (ret)
3375 goto out;
3376 }
3377
3378 ret = t4_write_flash(adap,
900a6596 3379 fw_img_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
3380 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3381out:
3382 if (ret)
3383 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3384 ret);
dff04bce
HS
3385 else
3386 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
56d36be4
DM
3387 return ret;
3388}
3389
01b69614
HS
3390/**
3391 * t4_phy_fw_ver - return current PHY firmware version
3392 * @adap: the adapter
3393 * @phy_fw_ver: return value buffer for PHY firmware version
3394 *
3395 * Returns the current version of external PHY firmware on the
3396 * adapter.
3397 */
3398int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3399{
3400 u32 param, val;
3401 int ret;
3402
3403 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3404 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3405 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3406 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
b2612722 3407 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
01b69614
HS
3408 &param, &val);
3409 if (ret < 0)
3410 return ret;
3411 *phy_fw_ver = val;
3412 return 0;
3413}
3414
3415/**
3416 * t4_load_phy_fw - download port PHY firmware
3417 * @adap: the adapter
3418 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
3419 * @win_lock: the lock to use to guard the memory copy
3420 * @phy_fw_version: function to check PHY firmware versions
3421 * @phy_fw_data: the PHY firmware image to write
3422 * @phy_fw_size: image size
3423 *
3424 * Transfer the specified PHY firmware to the adapter. If a non-NULL
3425 * @phy_fw_version is supplied, then it will be used to determine if
3426 * it's necessary to perform the transfer by comparing the version
3427 * of any existing adapter PHY firmware with that of the passed in
3428 * PHY firmware image. If @win_lock is non-NULL then it will be used
3429 * around the call to t4_memory_rw() which transfers the PHY firmware
3430 * to the adapter.
3431 *
3432 * A negative error number will be returned if an error occurs. If
3433 * version number support is available and there's no need to upgrade
3434 * the firmware, 0 will be returned. If firmware is successfully
3435 * transferred to the adapter, 1 will be retured.
3436 *
3437 * NOTE: some adapters only have local RAM to store the PHY firmware. As
3438 * a result, a RESET of the adapter would cause that RAM to lose its
3439 * contents. Thus, loading PHY firmware on such adapters must happen
3440 * after any FW_RESET_CMDs ...
3441 */
3442int t4_load_phy_fw(struct adapter *adap,
3443 int win, spinlock_t *win_lock,
3444 int (*phy_fw_version)(const u8 *, size_t),
3445 const u8 *phy_fw_data, size_t phy_fw_size)
3446{
3447 unsigned long mtype = 0, maddr = 0;
3448 u32 param, val;
3449 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3450 int ret;
3451
3452 /* If we have version number support, then check to see if the adapter
3453 * already has up-to-date PHY firmware loaded.
3454 */
3455 if (phy_fw_version) {
3456 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3457 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3458 if (ret < 0)
3459 return ret;
3460
3461 if (cur_phy_fw_ver >= new_phy_fw_vers) {
3462 CH_WARN(adap, "PHY Firmware already up-to-date, "
3463 "version %#x\n", cur_phy_fw_ver);
3464 return 0;
3465 }
3466 }
3467
3468 /* Ask the firmware where it wants us to copy the PHY firmware image.
3469 * The size of the file requires a special version of the READ coommand
3470 * which will pass the file size via the values field in PARAMS_CMD and
3471 * retrieve the return value from firmware and place it in the same
3472 * buffer values
3473 */
3474 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3475 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3476 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3477 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3478 val = phy_fw_size;
b2612722 3479 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
01b69614
HS
3480 &param, &val, 1);
3481 if (ret < 0)
3482 return ret;
3483 mtype = val >> 8;
3484 maddr = (val & 0xff) << 16;
3485
3486 /* Copy the supplied PHY Firmware image to the adapter memory location
3487 * allocated by the adapter firmware.
3488 */
3489 if (win_lock)
3490 spin_lock_bh(win_lock);
3491 ret = t4_memory_rw(adap, win, mtype, maddr,
3492 phy_fw_size, (__be32 *)phy_fw_data,
3493 T4_MEMORY_WRITE);
3494 if (win_lock)
3495 spin_unlock_bh(win_lock);
3496 if (ret)
3497 return ret;
3498
3499 /* Tell the firmware that the PHY firmware image has been written to
3500 * RAM and it can now start copying it over to the PHYs. The chip
3501 * firmware will RESET the affected PHYs as part of this operation
3502 * leaving them running the new PHY firmware image.
3503 */
3504 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3505 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3506 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3507 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
b2612722 3508 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
01b69614
HS
3509 &param, &val, 30000);
3510
3511 /* If we have version number support, then check to see that the new
3512 * firmware got loaded properly.
3513 */
3514 if (phy_fw_version) {
3515 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3516 if (ret < 0)
3517 return ret;
3518
3519 if (cur_phy_fw_ver != new_phy_fw_vers) {
3520 CH_WARN(adap, "PHY Firmware did not update: "
3521 "version on adapter %#x, "
3522 "version flashed %#x\n",
3523 cur_phy_fw_ver, new_phy_fw_vers);
3524 return -ENXIO;
3525 }
3526 }
3527
3528 return 1;
3529}
3530
49216c1c
HS
3531/**
3532 * t4_fwcache - firmware cache operation
3533 * @adap: the adapter
3534 * @op : the operation (flush or flush and invalidate)
3535 */
3536int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3537{
3538 struct fw_params_cmd c;
3539
3540 memset(&c, 0, sizeof(c));
3541 c.op_to_vfn =
3542 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3543 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
b2612722 3544 FW_PARAMS_CMD_PFN_V(adap->pf) |
49216c1c
HS
3545 FW_PARAMS_CMD_VFN_V(0));
3546 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3547 c.param[0].mnem =
3548 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3549 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3550 c.param[0].val = (__force __be32)op;
3551
3552 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3553}
3554
19689609
HS
3555void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3556 unsigned int *pif_req_wrptr,
3557 unsigned int *pif_rsp_wrptr)
3558{
3559 int i, j;
3560 u32 cfg, val, req, rsp;
3561
3562 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3563 if (cfg & LADBGEN_F)
3564 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3565
3566 val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3567 req = POLADBGWRPTR_G(val);
3568 rsp = PILADBGWRPTR_G(val);
3569 if (pif_req_wrptr)
3570 *pif_req_wrptr = req;
3571 if (pif_rsp_wrptr)
3572 *pif_rsp_wrptr = rsp;
3573
3574 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3575 for (j = 0; j < 6; j++) {
3576 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3577 PILADBGRDPTR_V(rsp));
3578 *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3579 *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3580 req++;
3581 rsp++;
3582 }
3583 req = (req + 2) & POLADBGRDPTR_M;
3584 rsp = (rsp + 2) & PILADBGRDPTR_M;
3585 }
3586 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3587}
3588
26fae93f
HS
3589void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3590{
3591 u32 cfg;
3592 int i, j, idx;
3593
3594 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3595 if (cfg & LADBGEN_F)
3596 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3597
3598 for (i = 0; i < CIM_MALA_SIZE; i++) {
3599 for (j = 0; j < 5; j++) {
3600 idx = 8 * i + j;
3601 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3602 PILADBGRDPTR_V(idx));
3603 *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3604 *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3605 }
3606 }
3607 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3608}
3609
797ff0f5
HS
3610void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3611{
3612 unsigned int i, j;
3613
3614 for (i = 0; i < 8; i++) {
3615 u32 *p = la_buf + i;
3616
3617 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3618 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3619 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3620 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3621 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3622 }
3623}
3624
56d36be4 3625#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
72aca4bf
KS
3626 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
3627 FW_PORT_CAP_ANEG)
56d36be4
DM
3628
3629/**
4036da90 3630 * t4_link_l1cfg - apply link configuration to MAC/PHY
56d36be4
DM
3631 * @phy: the PHY to setup
3632 * @mac: the MAC to setup
3633 * @lc: the requested link configuration
3634 *
3635 * Set up a port's MAC and PHY according to a desired link configuration.
3636 * - If the PHY can auto-negotiate first decide what to advertise, then
3637 * enable/disable auto-negotiation as desired, and reset.
3638 * - If the PHY does not auto-negotiate just reset it.
3639 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
3640 * otherwise do it later based on the outcome of auto-negotiation.
3641 */
4036da90 3642int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
56d36be4
DM
3643 struct link_config *lc)
3644{
3645 struct fw_port_cmd c;
2b5fb1f2 3646 unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
56d36be4
DM
3647
3648 lc->link_ok = 0;
3649 if (lc->requested_fc & PAUSE_RX)
3650 fc |= FW_PORT_CAP_FC_RX;
3651 if (lc->requested_fc & PAUSE_TX)
3652 fc |= FW_PORT_CAP_FC_TX;
3653
3654 memset(&c, 0, sizeof(c));
f404f80c
HS
3655 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
3656 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
3657 FW_PORT_CMD_PORTID_V(port));
3658 c.action_to_len16 =
3659 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
3660 FW_LEN16(c));
56d36be4
DM
3661
3662 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
f404f80c
HS
3663 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
3664 fc);
56d36be4
DM
3665 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3666 } else if (lc->autoneg == AUTONEG_DISABLE) {
f404f80c 3667 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
56d36be4
DM
3668 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
3669 } else
f404f80c 3670 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
56d36be4
DM
3671
3672 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3673}
3674
3675/**
3676 * t4_restart_aneg - restart autonegotiation
3677 * @adap: the adapter
3678 * @mbox: mbox to use for the FW command
3679 * @port: the port id
3680 *
3681 * Restarts autonegotiation for the selected port.
3682 */
3683int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
3684{
3685 struct fw_port_cmd c;
3686
3687 memset(&c, 0, sizeof(c));
f404f80c
HS
3688 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
3689 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
3690 FW_PORT_CMD_PORTID_V(port));
3691 c.action_to_len16 =
3692 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
3693 FW_LEN16(c));
3694 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
56d36be4
DM
3695 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3696}
3697
8caa1e84
VP
3698typedef void (*int_handler_t)(struct adapter *adap);
3699
56d36be4
DM
3700struct intr_info {
3701 unsigned int mask; /* bits to check in interrupt status */
3702 const char *msg; /* message to print or NULL */
3703 short stat_idx; /* stat counter to increment or -1 */
3704 unsigned short fatal; /* whether the condition reported is fatal */
8caa1e84 3705 int_handler_t int_handler; /* platform-specific int handler */
56d36be4
DM
3706};
3707
3708/**
3709 * t4_handle_intr_status - table driven interrupt handler
3710 * @adapter: the adapter that generated the interrupt
3711 * @reg: the interrupt status register to process
3712 * @acts: table of interrupt actions
3713 *
3714 * A table driven interrupt handler that applies a set of masks to an
3715 * interrupt status word and performs the corresponding actions if the
25985edc 3716 * interrupts described by the mask have occurred. The actions include
56d36be4
DM
3717 * optionally emitting a warning or alert message. The table is terminated
3718 * by an entry specifying mask 0. Returns the number of fatal interrupt
3719 * conditions.
3720 */
3721static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
3722 const struct intr_info *acts)
3723{
3724 int fatal = 0;
3725 unsigned int mask = 0;
3726 unsigned int status = t4_read_reg(adapter, reg);
3727
3728 for ( ; acts->mask; ++acts) {
3729 if (!(status & acts->mask))
3730 continue;
3731 if (acts->fatal) {
3732 fatal++;
3733 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
3734 status & acts->mask);
3735 } else if (acts->msg && printk_ratelimit())
3736 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
3737 status & acts->mask);
8caa1e84
VP
3738 if (acts->int_handler)
3739 acts->int_handler(adapter);
56d36be4
DM
3740 mask |= acts->mask;
3741 }
3742 status &= mask;
3743 if (status) /* clear processed interrupts */
3744 t4_write_reg(adapter, reg, status);
3745 return fatal;
3746}
3747
3748/*
3749 * Interrupt handler for the PCIE module.
3750 */
3751static void pcie_intr_handler(struct adapter *adapter)
3752{
005b5717 3753 static const struct intr_info sysbus_intr_info[] = {
f061de42
HS
3754 { RNPP_F, "RXNP array parity error", -1, 1 },
3755 { RPCP_F, "RXPC array parity error", -1, 1 },
3756 { RCIP_F, "RXCIF array parity error", -1, 1 },
3757 { RCCP_F, "Rx completions control array parity error", -1, 1 },
3758 { RFTP_F, "RXFT array parity error", -1, 1 },
56d36be4
DM
3759 { 0 }
3760 };
005b5717 3761 static const struct intr_info pcie_port_intr_info[] = {
f061de42
HS
3762 { TPCP_F, "TXPC array parity error", -1, 1 },
3763 { TNPP_F, "TXNP array parity error", -1, 1 },
3764 { TFTP_F, "TXFT array parity error", -1, 1 },
3765 { TCAP_F, "TXCA array parity error", -1, 1 },
3766 { TCIP_F, "TXCIF array parity error", -1, 1 },
3767 { RCAP_F, "RXCA array parity error", -1, 1 },
3768 { OTDD_F, "outbound request TLP discarded", -1, 1 },
3769 { RDPE_F, "Rx data parity error", -1, 1 },
3770 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
56d36be4
DM
3771 { 0 }
3772 };
005b5717 3773 static const struct intr_info pcie_intr_info[] = {
f061de42
HS
3774 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
3775 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
3776 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
3777 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
3778 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
3779 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
3780 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
3781 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
3782 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
3783 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
3784 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
3785 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
3786 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
3787 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
3788 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
3789 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
3790 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
3791 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
3792 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
3793 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
3794 { FIDPERR_F, "PCI FID parity error", -1, 1 },
3795 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
3796 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
3797 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
3798 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
3799 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
3800 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
3801 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
3802 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
3803 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
3804 -1, 0 },
56d36be4
DM
3805 { 0 }
3806 };
3807
0a57a536 3808 static struct intr_info t5_pcie_intr_info[] = {
f061de42 3809 { MSTGRPPERR_F, "Master Response Read Queue parity error",
0a57a536 3810 -1, 1 },
f061de42
HS
3811 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
3812 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
3813 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
3814 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
3815 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
3816 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
3817 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
0a57a536 3818 -1, 1 },
f061de42 3819 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
0a57a536 3820 -1, 1 },
f061de42
HS
3821 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
3822 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
3823 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
3824 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
3825 { DREQWRPERR_F, "PCI DMA channel write request parity error",
0a57a536 3826 -1, 1 },
f061de42
HS
3827 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
3828 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
3829 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
3830 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
3831 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
3832 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
3833 { FIDPERR_F, "PCI FID parity error", -1, 1 },
3834 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
3835 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
3836 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
3837 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
0a57a536 3838 -1, 1 },
f061de42
HS
3839 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
3840 -1, 1 },
3841 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
3842 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
3843 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
3844 { READRSPERR_F, "Outbound read error", -1, 0 },
0a57a536
SR
3845 { 0 }
3846 };
3847
56d36be4
DM
3848 int fat;
3849
9bb59b96
HS
3850 if (is_t4(adapter->params.chip))
3851 fat = t4_handle_intr_status(adapter,
f061de42
HS
3852 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
3853 sysbus_intr_info) +
9bb59b96 3854 t4_handle_intr_status(adapter,
f061de42
HS
3855 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
3856 pcie_port_intr_info) +
3857 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
9bb59b96
HS
3858 pcie_intr_info);
3859 else
f061de42 3860 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
9bb59b96 3861 t5_pcie_intr_info);
0a57a536 3862
56d36be4
DM
3863 if (fat)
3864 t4_fatal_err(adapter);
3865}
3866
3867/*
3868 * TP interrupt handler.
3869 */
3870static void tp_intr_handler(struct adapter *adapter)
3871{
005b5717 3872 static const struct intr_info tp_intr_info[] = {
56d36be4 3873 { 0x3fffffff, "TP parity error", -1, 1 },
837e4a42 3874 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
56d36be4
DM
3875 { 0 }
3876 };
3877
837e4a42 3878 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
56d36be4
DM
3879 t4_fatal_err(adapter);
3880}
3881
3882/*
3883 * SGE interrupt handler.
3884 */
3885static void sge_intr_handler(struct adapter *adapter)
3886{
3887 u64 v;
3ccc6cf7 3888 u32 err;
56d36be4 3889
005b5717 3890 static const struct intr_info sge_intr_info[] = {
f612b815 3891 { ERR_CPL_EXCEED_IQE_SIZE_F,
56d36be4 3892 "SGE received CPL exceeding IQE size", -1, 1 },
f612b815 3893 { ERR_INVALID_CIDX_INC_F,
56d36be4 3894 "SGE GTS CIDX increment too large", -1, 0 },
f612b815
HS
3895 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
3896 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
f612b815 3897 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
56d36be4 3898 "SGE IQID > 1023 received CPL for FL", -1, 0 },
f612b815 3899 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
56d36be4 3900 0 },
f612b815 3901 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
56d36be4 3902 0 },
f612b815 3903 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
56d36be4 3904 0 },
f612b815 3905 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
56d36be4 3906 0 },
f612b815 3907 { ERR_ING_CTXT_PRIO_F,
56d36be4 3908 "SGE too many priority ingress contexts", -1, 0 },
f612b815
HS
3909 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
3910 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
56d36be4
DM
3911 { 0 }
3912 };
3913
3ccc6cf7
HS
3914 static struct intr_info t4t5_sge_intr_info[] = {
3915 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
3916 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
3917 { ERR_EGR_CTXT_PRIO_F,
3918 "SGE too many priority egress contexts", -1, 0 },
3919 { 0 }
3920 };
3921
f612b815
HS
3922 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
3923 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
56d36be4
DM
3924 if (v) {
3925 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
8caa1e84 3926 (unsigned long long)v);
f612b815
HS
3927 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
3928 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
56d36be4
DM
3929 }
3930
3ccc6cf7
HS
3931 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
3932 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
3933 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
3934 t4t5_sge_intr_info);
3935
3936 err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
3937 if (err & ERROR_QID_VALID_F) {
3938 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
3939 ERROR_QID_G(err));
3940 if (err & UNCAPTURED_ERROR_F)
3941 dev_err(adapter->pdev_dev,
3942 "SGE UNCAPTURED_ERROR set (clearing)\n");
3943 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
3944 UNCAPTURED_ERROR_F);
3945 }
3946
3947 if (v != 0)
56d36be4
DM
3948 t4_fatal_err(adapter);
3949}
3950
89c3a86c
HS
3951#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
3952 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
3953#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
3954 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
3955
56d36be4
DM
3956/*
3957 * CIM interrupt handler.
3958 */
3959static void cim_intr_handler(struct adapter *adapter)
3960{
005b5717 3961 static const struct intr_info cim_intr_info[] = {
89c3a86c
HS
3962 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
3963 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
3964 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
3965 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
3966 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
3967 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
3968 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
56d36be4
DM
3969 { 0 }
3970 };
005b5717 3971 static const struct intr_info cim_upintr_info[] = {
89c3a86c
HS
3972 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
3973 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
3974 { ILLWRINT_F, "CIM illegal write", -1, 1 },
3975 { ILLRDINT_F, "CIM illegal read", -1, 1 },
3976 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
3977 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
3978 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
3979 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
3980 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
3981 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
3982 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
3983 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
3984 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
3985 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
3986 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
3987 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
3988 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
3989 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
3990 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
3991 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
3992 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
3993 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
3994 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
3995 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
3996 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
3997 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
3998 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
3999 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
56d36be4
DM
4000 { 0 }
4001 };
4002
4003 int fat;
4004
f061de42 4005 if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
31d55c2d
HS
4006 t4_report_fw_error(adapter);
4007
89c3a86c 4008 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
56d36be4 4009 cim_intr_info) +
89c3a86c 4010 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
56d36be4
DM
4011 cim_upintr_info);
4012 if (fat)
4013 t4_fatal_err(adapter);
4014}
4015
4016/*
4017 * ULP RX interrupt handler.
4018 */
4019static void ulprx_intr_handler(struct adapter *adapter)
4020{
005b5717 4021 static const struct intr_info ulprx_intr_info[] = {
91e9a1ec 4022 { 0x1800000, "ULPRX context error", -1, 1 },
56d36be4
DM
4023 { 0x7fffff, "ULPRX parity error", -1, 1 },
4024 { 0 }
4025 };
4026
0d804338 4027 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
56d36be4
DM
4028 t4_fatal_err(adapter);
4029}
4030
4031/*
4032 * ULP TX interrupt handler.
4033 */
4034static void ulptx_intr_handler(struct adapter *adapter)
4035{
005b5717 4036 static const struct intr_info ulptx_intr_info[] = {
837e4a42 4037 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
56d36be4 4038 0 },
837e4a42 4039 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
56d36be4 4040 0 },
837e4a42 4041 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
56d36be4 4042 0 },
837e4a42 4043 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
56d36be4
DM
4044 0 },
4045 { 0xfffffff, "ULPTX parity error", -1, 1 },
4046 { 0 }
4047 };
4048
837e4a42 4049 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
56d36be4
DM
4050 t4_fatal_err(adapter);
4051}
4052
4053/*
4054 * PM TX interrupt handler.
4055 */
4056static void pmtx_intr_handler(struct adapter *adapter)
4057{
005b5717 4058 static const struct intr_info pmtx_intr_info[] = {
837e4a42
HS
4059 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4060 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4061 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4062 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4063 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4064 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4065 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4066 -1, 1 },
4067 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4068 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
56d36be4
DM
4069 { 0 }
4070 };
4071
837e4a42 4072 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
56d36be4
DM
4073 t4_fatal_err(adapter);
4074}
4075
4076/*
4077 * PM RX interrupt handler.
4078 */
4079static void pmrx_intr_handler(struct adapter *adapter)
4080{
005b5717 4081 static const struct intr_info pmrx_intr_info[] = {
837e4a42
HS
4082 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4083 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4084 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4085 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4086 -1, 1 },
4087 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4088 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
56d36be4
DM
4089 { 0 }
4090 };
4091
837e4a42 4092 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
56d36be4
DM
4093 t4_fatal_err(adapter);
4094}
4095
4096/*
4097 * CPL switch interrupt handler.
4098 */
4099static void cplsw_intr_handler(struct adapter *adapter)
4100{
005b5717 4101 static const struct intr_info cplsw_intr_info[] = {
0d804338
HS
4102 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4103 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4104 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4105 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4106 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4107 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
56d36be4
DM
4108 { 0 }
4109 };
4110
0d804338 4111 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
56d36be4
DM
4112 t4_fatal_err(adapter);
4113}
4114
4115/*
4116 * LE interrupt handler.
4117 */
4118static void le_intr_handler(struct adapter *adap)
4119{
3ccc6cf7 4120 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
005b5717 4121 static const struct intr_info le_intr_info[] = {
0d804338
HS
4122 { LIPMISS_F, "LE LIP miss", -1, 0 },
4123 { LIP0_F, "LE 0 LIP error", -1, 0 },
4124 { PARITYERR_F, "LE parity error", -1, 1 },
4125 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4126 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
56d36be4
DM
4127 { 0 }
4128 };
4129
3ccc6cf7
HS
4130 static struct intr_info t6_le_intr_info[] = {
4131 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4132 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4133 { TCAMINTPERR_F, "LE parity error", -1, 1 },
4134 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4135 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4136 { 0 }
4137 };
4138
4139 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4140 (chip <= CHELSIO_T5) ?
4141 le_intr_info : t6_le_intr_info))
56d36be4
DM
4142 t4_fatal_err(adap);
4143}
4144
4145/*
4146 * MPS interrupt handler.
4147 */
4148static void mps_intr_handler(struct adapter *adapter)
4149{
005b5717 4150 static const struct intr_info mps_rx_intr_info[] = {
56d36be4
DM
4151 { 0xffffff, "MPS Rx parity error", -1, 1 },
4152 { 0 }
4153 };
005b5717 4154 static const struct intr_info mps_tx_intr_info[] = {
837e4a42
HS
4155 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4156 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4157 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4158 -1, 1 },
4159 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4160 -1, 1 },
4161 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
4162 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4163 { FRMERR_F, "MPS Tx framing error", -1, 1 },
56d36be4
DM
4164 { 0 }
4165 };
005b5717 4166 static const struct intr_info mps_trc_intr_info[] = {
837e4a42
HS
4167 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4168 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4169 -1, 1 },
4170 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
56d36be4
DM
4171 { 0 }
4172 };
005b5717 4173 static const struct intr_info mps_stat_sram_intr_info[] = {
56d36be4
DM
4174 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4175 { 0 }
4176 };
005b5717 4177 static const struct intr_info mps_stat_tx_intr_info[] = {
56d36be4
DM
4178 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4179 { 0 }
4180 };
005b5717 4181 static const struct intr_info mps_stat_rx_intr_info[] = {
56d36be4
DM
4182 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4183 { 0 }
4184 };
005b5717 4185 static const struct intr_info mps_cls_intr_info[] = {
837e4a42
HS
4186 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4187 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4188 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
56d36be4
DM
4189 { 0 }
4190 };
4191
4192 int fat;
4193
837e4a42 4194 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
56d36be4 4195 mps_rx_intr_info) +
837e4a42 4196 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
56d36be4 4197 mps_tx_intr_info) +
837e4a42 4198 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
56d36be4 4199 mps_trc_intr_info) +
837e4a42 4200 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
56d36be4 4201 mps_stat_sram_intr_info) +
837e4a42 4202 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
56d36be4 4203 mps_stat_tx_intr_info) +
837e4a42 4204 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
56d36be4 4205 mps_stat_rx_intr_info) +
837e4a42 4206 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
56d36be4
DM
4207 mps_cls_intr_info);
4208
837e4a42
HS
4209 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4210 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
56d36be4
DM
4211 if (fat)
4212 t4_fatal_err(adapter);
4213}
4214
89c3a86c
HS
4215#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4216 ECC_UE_INT_CAUSE_F)
56d36be4
DM
4217
4218/*
4219 * EDC/MC interrupt handler.
4220 */
4221static void mem_intr_handler(struct adapter *adapter, int idx)
4222{
822dd8a8 4223 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
56d36be4
DM
4224
4225 unsigned int addr, cnt_addr, v;
4226
4227 if (idx <= MEM_EDC1) {
89c3a86c
HS
4228 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4229 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
822dd8a8
HS
4230 } else if (idx == MEM_MC) {
4231 if (is_t4(adapter->params.chip)) {
89c3a86c
HS
4232 addr = MC_INT_CAUSE_A;
4233 cnt_addr = MC_ECC_STATUS_A;
822dd8a8 4234 } else {
89c3a86c
HS
4235 addr = MC_P_INT_CAUSE_A;
4236 cnt_addr = MC_P_ECC_STATUS_A;
822dd8a8 4237 }
56d36be4 4238 } else {
89c3a86c
HS
4239 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4240 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
56d36be4
DM
4241 }
4242
4243 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
89c3a86c 4244 if (v & PERR_INT_CAUSE_F)
56d36be4
DM
4245 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4246 name[idx]);
89c3a86c
HS
4247 if (v & ECC_CE_INT_CAUSE_F) {
4248 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
56d36be4 4249
bf8ebb67
HS
4250 t4_edc_err_read(adapter, idx);
4251
89c3a86c 4252 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
56d36be4
DM
4253 if (printk_ratelimit())
4254 dev_warn(adapter->pdev_dev,
4255 "%u %s correctable ECC data error%s\n",
4256 cnt, name[idx], cnt > 1 ? "s" : "");
4257 }
89c3a86c 4258 if (v & ECC_UE_INT_CAUSE_F)
56d36be4
DM
4259 dev_alert(adapter->pdev_dev,
4260 "%s uncorrectable ECC data error\n", name[idx]);
4261
4262 t4_write_reg(adapter, addr, v);
89c3a86c 4263 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
56d36be4
DM
4264 t4_fatal_err(adapter);
4265}
4266
4267/*
4268 * MA interrupt handler.
4269 */
4270static void ma_intr_handler(struct adapter *adap)
4271{
89c3a86c 4272 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
56d36be4 4273
89c3a86c 4274 if (status & MEM_PERR_INT_CAUSE_F) {
56d36be4
DM
4275 dev_alert(adap->pdev_dev,
4276 "MA parity error, parity status %#x\n",
89c3a86c 4277 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
9bb59b96
HS
4278 if (is_t5(adap->params.chip))
4279 dev_alert(adap->pdev_dev,
4280 "MA parity error, parity status %#x\n",
4281 t4_read_reg(adap,
89c3a86c 4282 MA_PARITY_ERROR_STATUS2_A));
9bb59b96 4283 }
89c3a86c
HS
4284 if (status & MEM_WRAP_INT_CAUSE_F) {
4285 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
56d36be4
DM
4286 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4287 "client %u to address %#x\n",
89c3a86c
HS
4288 MEM_WRAP_CLIENT_NUM_G(v),
4289 MEM_WRAP_ADDRESS_G(v) << 4);
56d36be4 4290 }
89c3a86c 4291 t4_write_reg(adap, MA_INT_CAUSE_A, status);
56d36be4
DM
4292 t4_fatal_err(adap);
4293}
4294
4295/*
4296 * SMB interrupt handler.
4297 */
4298static void smb_intr_handler(struct adapter *adap)
4299{
005b5717 4300 static const struct intr_info smb_intr_info[] = {
0d804338
HS
4301 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4302 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4303 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
56d36be4
DM
4304 { 0 }
4305 };
4306
0d804338 4307 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
56d36be4
DM
4308 t4_fatal_err(adap);
4309}
4310
4311/*
4312 * NC-SI interrupt handler.
4313 */
4314static void ncsi_intr_handler(struct adapter *adap)
4315{
005b5717 4316 static const struct intr_info ncsi_intr_info[] = {
0d804338
HS
4317 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4318 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4319 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4320 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
56d36be4
DM
4321 { 0 }
4322 };
4323
0d804338 4324 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
56d36be4
DM
4325 t4_fatal_err(adap);
4326}
4327
4328/*
4329 * XGMAC interrupt handler.
4330 */
4331static void xgmac_intr_handler(struct adapter *adap, int port)
4332{
0a57a536
SR
4333 u32 v, int_cause_reg;
4334
d14807dd 4335 if (is_t4(adap->params.chip))
0d804338 4336 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
0a57a536 4337 else
0d804338 4338 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
0a57a536
SR
4339
4340 v = t4_read_reg(adap, int_cause_reg);
56d36be4 4341
0d804338 4342 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
56d36be4
DM
4343 if (!v)
4344 return;
4345
0d804338 4346 if (v & TXFIFO_PRTY_ERR_F)
56d36be4
DM
4347 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4348 port);
0d804338 4349 if (v & RXFIFO_PRTY_ERR_F)
56d36be4
DM
4350 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4351 port);
0d804338 4352 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
56d36be4
DM
4353 t4_fatal_err(adap);
4354}
4355
4356/*
4357 * PL interrupt handler.
4358 */
4359static void pl_intr_handler(struct adapter *adap)
4360{
005b5717 4361 static const struct intr_info pl_intr_info[] = {
0d804338
HS
4362 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
4363 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
56d36be4
DM
4364 { 0 }
4365 };
4366
0d804338 4367 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
56d36be4
DM
4368 t4_fatal_err(adap);
4369}
4370
0d804338
HS
4371#define PF_INTR_MASK (PFSW_F)
4372#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
4373 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
4374 CPL_SWITCH_F | SGE_F | ULP_TX_F)
56d36be4
DM
4375
4376/**
4377 * t4_slow_intr_handler - control path interrupt handler
4378 * @adapter: the adapter
4379 *
4380 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4381 * The designation 'slow' is because it involves register reads, while
4382 * data interrupts typically don't involve any MMIOs.
4383 */
4384int t4_slow_intr_handler(struct adapter *adapter)
4385{
0d804338 4386 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
56d36be4
DM
4387
4388 if (!(cause & GLBL_INTR_MASK))
4389 return 0;
0d804338 4390 if (cause & CIM_F)
56d36be4 4391 cim_intr_handler(adapter);
0d804338 4392 if (cause & MPS_F)
56d36be4 4393 mps_intr_handler(adapter);
0d804338 4394 if (cause & NCSI_F)
56d36be4 4395 ncsi_intr_handler(adapter);
0d804338 4396 if (cause & PL_F)
56d36be4 4397 pl_intr_handler(adapter);
0d804338 4398 if (cause & SMB_F)
56d36be4 4399 smb_intr_handler(adapter);
0d804338 4400 if (cause & XGMAC0_F)
56d36be4 4401 xgmac_intr_handler(adapter, 0);
0d804338 4402 if (cause & XGMAC1_F)
56d36be4 4403 xgmac_intr_handler(adapter, 1);
0d804338 4404 if (cause & XGMAC_KR0_F)
56d36be4 4405 xgmac_intr_handler(adapter, 2);
0d804338 4406 if (cause & XGMAC_KR1_F)
56d36be4 4407 xgmac_intr_handler(adapter, 3);
0d804338 4408 if (cause & PCIE_F)
56d36be4 4409 pcie_intr_handler(adapter);
0d804338 4410 if (cause & MC_F)
56d36be4 4411 mem_intr_handler(adapter, MEM_MC);
3ccc6cf7 4412 if (is_t5(adapter->params.chip) && (cause & MC1_F))
822dd8a8 4413 mem_intr_handler(adapter, MEM_MC1);
0d804338 4414 if (cause & EDC0_F)
56d36be4 4415 mem_intr_handler(adapter, MEM_EDC0);
0d804338 4416 if (cause & EDC1_F)
56d36be4 4417 mem_intr_handler(adapter, MEM_EDC1);
0d804338 4418 if (cause & LE_F)
56d36be4 4419 le_intr_handler(adapter);
0d804338 4420 if (cause & TP_F)
56d36be4 4421 tp_intr_handler(adapter);
0d804338 4422 if (cause & MA_F)
56d36be4 4423 ma_intr_handler(adapter);
0d804338 4424 if (cause & PM_TX_F)
56d36be4 4425 pmtx_intr_handler(adapter);
0d804338 4426 if (cause & PM_RX_F)
56d36be4 4427 pmrx_intr_handler(adapter);
0d804338 4428 if (cause & ULP_RX_F)
56d36be4 4429 ulprx_intr_handler(adapter);
0d804338 4430 if (cause & CPL_SWITCH_F)
56d36be4 4431 cplsw_intr_handler(adapter);
0d804338 4432 if (cause & SGE_F)
56d36be4 4433 sge_intr_handler(adapter);
0d804338 4434 if (cause & ULP_TX_F)
56d36be4
DM
4435 ulptx_intr_handler(adapter);
4436
4437 /* Clear the interrupts just processed for which we are the master. */
0d804338
HS
4438 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
4439 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
56d36be4
DM
4440 return 1;
4441}
4442
4443/**
4444 * t4_intr_enable - enable interrupts
4445 * @adapter: the adapter whose interrupts should be enabled
4446 *
4447 * Enable PF-specific interrupts for the calling function and the top-level
4448 * interrupt concentrator for global interrupts. Interrupts are already
4449 * enabled at each module, here we just enable the roots of the interrupt
4450 * hierarchies.
4451 *
4452 * Note: this function should be called only when the driver manages
4453 * non PF-specific interrupts from the various HW modules. Only one PCI
4454 * function at a time should be doing this.
4455 */
4456void t4_intr_enable(struct adapter *adapter)
4457{
3ccc6cf7 4458 u32 val = 0;
d86bd29e
HS
4459 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4460 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4461 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
56d36be4 4462
3ccc6cf7
HS
4463 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4464 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
f612b815
HS
4465 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
4466 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
3ccc6cf7 4467 ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
f612b815
HS
4468 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
4469 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
4470 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
3ccc6cf7 4471 DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
0d804338
HS
4472 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
4473 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
56d36be4
DM
4474}
4475
4476/**
4477 * t4_intr_disable - disable interrupts
4478 * @adapter: the adapter whose interrupts should be disabled
4479 *
4480 * Disable interrupts. We only disable the top-level interrupt
4481 * concentrators. The caller must be a PCI function managing global
4482 * interrupts.
4483 */
4484void t4_intr_disable(struct adapter *adapter)
4485{
d86bd29e
HS
4486 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4487 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4488 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
56d36be4 4489
0d804338
HS
4490 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
4491 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
56d36be4
DM
4492}
4493
56d36be4
DM
4494/**
4495 * t4_config_rss_range - configure a portion of the RSS mapping table
4496 * @adapter: the adapter
4497 * @mbox: mbox to use for the FW command
4498 * @viid: virtual interface whose RSS subtable is to be written
4499 * @start: start entry in the table to write
4500 * @n: how many table entries to write
4501 * @rspq: values for the response queue lookup table
4502 * @nrspq: number of values in @rspq
4503 *
4504 * Programs the selected part of the VI's RSS mapping table with the
4505 * provided values. If @nrspq < @n the supplied values are used repeatedly
4506 * until the full table range is populated.
4507 *
4508 * The caller must ensure the values in @rspq are in the range allowed for
4509 * @viid.
4510 */
4511int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4512 int start, int n, const u16 *rspq, unsigned int nrspq)
4513{
4514 int ret;
4515 const u16 *rsp = rspq;
4516 const u16 *rsp_end = rspq + nrspq;
4517 struct fw_rss_ind_tbl_cmd cmd;
4518
4519 memset(&cmd, 0, sizeof(cmd));
f404f80c 4520 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
e2ac9628 4521 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
b2e1a3f0 4522 FW_RSS_IND_TBL_CMD_VIID_V(viid));
f404f80c 4523 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
56d36be4
DM
4524
4525 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
4526 while (n > 0) {
4527 int nq = min(n, 32);
4528 __be32 *qp = &cmd.iq0_to_iq2;
4529
f404f80c
HS
4530 cmd.niqid = cpu_to_be16(nq);
4531 cmd.startidx = cpu_to_be16(start);
56d36be4
DM
4532
4533 start += nq;
4534 n -= nq;
4535
4536 while (nq > 0) {
4537 unsigned int v;
4538
b2e1a3f0 4539 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
56d36be4
DM
4540 if (++rsp >= rsp_end)
4541 rsp = rspq;
b2e1a3f0 4542 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
56d36be4
DM
4543 if (++rsp >= rsp_end)
4544 rsp = rspq;
b2e1a3f0 4545 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
56d36be4
DM
4546 if (++rsp >= rsp_end)
4547 rsp = rspq;
4548
f404f80c 4549 *qp++ = cpu_to_be32(v);
56d36be4
DM
4550 nq -= 3;
4551 }
4552
4553 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4554 if (ret)
4555 return ret;
4556 }
4557 return 0;
4558}
4559
4560/**
4561 * t4_config_glbl_rss - configure the global RSS mode
4562 * @adapter: the adapter
4563 * @mbox: mbox to use for the FW command
4564 * @mode: global RSS mode
4565 * @flags: mode-specific flags
4566 *
4567 * Sets the global RSS mode.
4568 */
4569int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
4570 unsigned int flags)
4571{
4572 struct fw_rss_glb_config_cmd c;
4573
4574 memset(&c, 0, sizeof(c));
f404f80c
HS
4575 c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
4576 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4577 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
56d36be4 4578 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
f404f80c
HS
4579 c.u.manual.mode_pkd =
4580 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
56d36be4
DM
4581 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
4582 c.u.basicvirtual.mode_pkd =
f404f80c
HS
4583 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
4584 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
56d36be4
DM
4585 } else
4586 return -EINVAL;
4587 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4588}
4589
c035e183
HS
4590/**
4591 * t4_config_vi_rss - configure per VI RSS settings
4592 * @adapter: the adapter
4593 * @mbox: mbox to use for the FW command
4594 * @viid: the VI id
4595 * @flags: RSS flags
4596 * @defq: id of the default RSS queue for the VI.
4597 *
4598 * Configures VI-specific RSS properties.
4599 */
4600int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
4601 unsigned int flags, unsigned int defq)
4602{
4603 struct fw_rss_vi_config_cmd c;
4604
4605 memset(&c, 0, sizeof(c));
4606 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
4607 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4608 FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
4609 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4610 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
4611 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
4612 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
4613}
4614
688ea5fe
HS
4615/* Read an RSS table row */
4616static int rd_rss_row(struct adapter *adap, int row, u32 *val)
4617{
4618 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
4619 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
4620 5, 0, val);
4621}
4622
4623/**
4624 * t4_read_rss - read the contents of the RSS mapping table
4625 * @adapter: the adapter
4626 * @map: holds the contents of the RSS mapping table
4627 *
4628 * Reads the contents of the RSS hash->queue mapping table.
4629 */
4630int t4_read_rss(struct adapter *adapter, u16 *map)
4631{
4632 u32 val;
4633 int i, ret;
4634
4635 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
4636 ret = rd_rss_row(adapter, i, &val);
4637 if (ret)
4638 return ret;
4639 *map++ = LKPTBLQUEUE0_G(val);
4640 *map++ = LKPTBLQUEUE1_G(val);
4641 }
4642 return 0;
4643}
4644
0b2c2a93
HS
4645static unsigned int t4_use_ldst(struct adapter *adap)
4646{
4647 return (adap->flags & FW_OK) || !adap->use_bd;
4648}
4649
c1e9af0c
HS
4650/**
4651 * t4_fw_tp_pio_rw - Access TP PIO through LDST
4652 * @adap: the adapter
4653 * @vals: where the indirect register values are stored/written
4654 * @nregs: how many indirect registers to read/write
4655 * @start_idx: index of first indirect register to read/write
4656 * @rw: Read (1) or Write (0)
4657 *
4658 * Access TP PIO registers through LDST
4659 */
4660static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
4661 unsigned int start_index, unsigned int rw)
4662{
4663 int ret, i;
4664 int cmd = FW_LDST_ADDRSPC_TP_PIO;
4665 struct fw_ldst_cmd c;
4666
4667 for (i = 0 ; i < nregs; i++) {
4668 memset(&c, 0, sizeof(c));
4669 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
4670 FW_CMD_REQUEST_F |
4671 (rw ? FW_CMD_READ_F :
4672 FW_CMD_WRITE_F) |
4673 FW_LDST_CMD_ADDRSPACE_V(cmd));
4674 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
4675
4676 c.u.addrval.addr = cpu_to_be32(start_index + i);
4677 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
4678 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4679 if (!ret && rw)
4680 vals[i] = be32_to_cpu(c.u.addrval.val);
4681 }
4682}
4683
688ea5fe
HS
4684/**
4685 * t4_read_rss_key - read the global RSS key
4686 * @adap: the adapter
4687 * @key: 10-entry array holding the 320-bit RSS key
4688 *
4689 * Reads the global 320-bit RSS key.
4690 */
4691void t4_read_rss_key(struct adapter *adap, u32 *key)
4692{
0b2c2a93 4693 if (t4_use_ldst(adap))
c1e9af0c
HS
4694 t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
4695 else
4696 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
4697 TP_RSS_SECRET_KEY0_A);
688ea5fe
HS
4698}
4699
4700/**
4701 * t4_write_rss_key - program one of the RSS keys
4702 * @adap: the adapter
4703 * @key: 10-entry array holding the 320-bit RSS key
4704 * @idx: which RSS key to write
4705 *
4706 * Writes one of the RSS keys with the given 320-bit value. If @idx is
4707 * 0..15 the corresponding entry in the RSS key table is written,
4708 * otherwise the global RSS key is written.
4709 */
4710void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
4711{
3ccc6cf7
HS
4712 u8 rss_key_addr_cnt = 16;
4713 u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
4714
4715 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
4716 * allows access to key addresses 16-63 by using KeyWrAddrX
4717 * as index[5:4](upper 2) into key table
4718 */
4719 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
4720 (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
4721 rss_key_addr_cnt = 32;
4722
0b2c2a93 4723 if (t4_use_ldst(adap))
c1e9af0c
HS
4724 t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
4725 else
4726 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
4727 TP_RSS_SECRET_KEY0_A);
3ccc6cf7
HS
4728
4729 if (idx >= 0 && idx < rss_key_addr_cnt) {
4730 if (rss_key_addr_cnt > 16)
4731 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
4732 KEYWRADDRX_V(idx >> 4) |
4733 T6_VFWRADDR_V(idx) | KEYWREN_F);
4734 else
4735 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
4736 KEYWRADDR_V(idx) | KEYWREN_F);
4737 }
688ea5fe
HS
4738}
4739
4740/**
4741 * t4_read_rss_pf_config - read PF RSS Configuration Table
4742 * @adapter: the adapter
4743 * @index: the entry in the PF RSS table to read
4744 * @valp: where to store the returned value
4745 *
4746 * Reads the PF RSS Configuration Table at the specified index and returns
4747 * the value found there.
4748 */
4749void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
4750 u32 *valp)
4751{
0b2c2a93 4752 if (t4_use_ldst(adapter))
c1e9af0c
HS
4753 t4_fw_tp_pio_rw(adapter, valp, 1,
4754 TP_RSS_PF0_CONFIG_A + index, 1);
4755 else
4756 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4757 valp, 1, TP_RSS_PF0_CONFIG_A + index);
688ea5fe
HS
4758}
4759
4760/**
4761 * t4_read_rss_vf_config - read VF RSS Configuration Table
4762 * @adapter: the adapter
4763 * @index: the entry in the VF RSS table to read
4764 * @vfl: where to store the returned VFL
4765 * @vfh: where to store the returned VFH
4766 *
4767 * Reads the VF RSS Configuration Table at the specified index and returns
4768 * the (VFL, VFH) values found there.
4769 */
4770void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
4771 u32 *vfl, u32 *vfh)
4772{
4773 u32 vrt, mask, data;
4774
3ccc6cf7
HS
4775 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
4776 mask = VFWRADDR_V(VFWRADDR_M);
4777 data = VFWRADDR_V(index);
4778 } else {
4779 mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
4780 data = T6_VFWRADDR_V(index);
4781 }
688ea5fe
HS
4782
4783 /* Request that the index'th VF Table values be read into VFL/VFH.
4784 */
4785 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
4786 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
4787 vrt |= data | VFRDEN_F;
4788 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
4789
4790 /* Grab the VFL/VFH values ...
4791 */
0b2c2a93 4792 if (t4_use_ldst(adapter)) {
c1e9af0c
HS
4793 t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
4794 t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
4795 } else {
4796 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4797 vfl, 1, TP_RSS_VFL_CONFIG_A);
4798 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4799 vfh, 1, TP_RSS_VFH_CONFIG_A);
4800 }
688ea5fe
HS
4801}
4802
4803/**
4804 * t4_read_rss_pf_map - read PF RSS Map
4805 * @adapter: the adapter
4806 *
4807 * Reads the PF RSS Map register and returns its value.
4808 */
4809u32 t4_read_rss_pf_map(struct adapter *adapter)
4810{
4811 u32 pfmap;
4812
0b2c2a93 4813 if (t4_use_ldst(adapter))
c1e9af0c
HS
4814 t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
4815 else
4816 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4817 &pfmap, 1, TP_RSS_PF_MAP_A);
688ea5fe
HS
4818 return pfmap;
4819}
4820
4821/**
4822 * t4_read_rss_pf_mask - read PF RSS Mask
4823 * @adapter: the adapter
4824 *
4825 * Reads the PF RSS Mask register and returns its value.
4826 */
4827u32 t4_read_rss_pf_mask(struct adapter *adapter)
4828{
4829 u32 pfmask;
4830
0b2c2a93 4831 if (t4_use_ldst(adapter))
c1e9af0c
HS
4832 t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
4833 else
4834 t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4835 &pfmask, 1, TP_RSS_PF_MSK_A);
688ea5fe
HS
4836 return pfmask;
4837}
4838
56d36be4
DM
4839/**
4840 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
4841 * @adap: the adapter
4842 * @v4: holds the TCP/IP counter values
4843 * @v6: holds the TCP/IPv6 counter values
4844 *
4845 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
4846 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
4847 */
4848void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
4849 struct tp_tcp_stats *v6)
4850{
837e4a42 4851 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
56d36be4 4852
837e4a42 4853#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
56d36be4
DM
4854#define STAT(x) val[STAT_IDX(x)]
4855#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
4856
4857 if (v4) {
837e4a42
HS
4858 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
4859 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
a4cfd929
HS
4860 v4->tcp_out_rsts = STAT(OUT_RST);
4861 v4->tcp_in_segs = STAT64(IN_SEG);
4862 v4->tcp_out_segs = STAT64(OUT_SEG);
4863 v4->tcp_retrans_segs = STAT64(RXT_SEG);
56d36be4
DM
4864 }
4865 if (v6) {
837e4a42
HS
4866 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
4867 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
a4cfd929
HS
4868 v6->tcp_out_rsts = STAT(OUT_RST);
4869 v6->tcp_in_segs = STAT64(IN_SEG);
4870 v6->tcp_out_segs = STAT64(OUT_SEG);
4871 v6->tcp_retrans_segs = STAT64(RXT_SEG);
56d36be4
DM
4872 }
4873#undef STAT64
4874#undef STAT
4875#undef STAT_IDX
4876}
4877
a4cfd929
HS
4878/**
4879 * t4_tp_get_err_stats - read TP's error MIB counters
4880 * @adap: the adapter
4881 * @st: holds the counter values
4882 *
4883 * Returns the values of TP's error counters.
4884 */
4885void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
4886{
df459ebc
HS
4887 int nchan = adap->params.arch.nchan;
4888
4889 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4890 st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
4891 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4892 st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
4893 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4894 st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
4895 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4896 st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
4897 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4898 st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
4899 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4900 st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
4901 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4902 st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
4903 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4904 st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
4905
a4cfd929
HS
4906 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
4907 &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
4908}
4909
a6222975
HS
4910/**
4911 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
4912 * @adap: the adapter
4913 * @st: holds the counter values
4914 *
4915 * Returns the values of TP's CPL counters.
4916 */
4917void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
4918{
df459ebc
HS
4919 int nchan = adap->params.arch.nchan;
4920
4921 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
4922 nchan, TP_MIB_CPL_IN_REQ_0_A);
4923 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
4924 nchan, TP_MIB_CPL_OUT_RSP_0_A);
4925
a6222975
HS
4926}
4927
a4cfd929
HS
4928/**
4929 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
4930 * @adap: the adapter
4931 * @st: holds the counter values
4932 *
4933 * Returns the values of TP's RDMA counters.
4934 */
4935void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
4936{
4937 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
4938 2, TP_MIB_RQE_DFR_PKT_A);
4939}
4940
a6222975
HS
4941/**
4942 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
4943 * @adap: the adapter
4944 * @idx: the port index
4945 * @st: holds the counter values
4946 *
4947 * Returns the values of TP's FCoE counters for the selected port.
4948 */
4949void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
4950 struct tp_fcoe_stats *st)
4951{
4952 u32 val[2];
4953
4954 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
4955 1, TP_MIB_FCOE_DDP_0_A + idx);
4956 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
4957 1, TP_MIB_FCOE_DROP_0_A + idx);
4958 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
4959 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
4960 st->octets_ddp = ((u64)val[0] << 32) | val[1];
4961}
4962
a4cfd929
HS
4963/**
4964 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
4965 * @adap: the adapter
4966 * @st: holds the counter values
4967 *
4968 * Returns the values of TP's counters for non-TCP directly-placed packets.
4969 */
4970void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
4971{
4972 u32 val[4];
4973
4974 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
4975 TP_MIB_USM_PKTS_A);
4976 st->frames = val[0];
4977 st->drops = val[1];
4978 st->octets = ((u64)val[2] << 32) | val[3];
4979}
4980
56d36be4
DM
4981/**
4982 * t4_read_mtu_tbl - returns the values in the HW path MTU table
4983 * @adap: the adapter
4984 * @mtus: where to store the MTU values
4985 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
4986 *
4987 * Reads the HW path MTU table.
4988 */
4989void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
4990{
4991 u32 v;
4992 int i;
4993
4994 for (i = 0; i < NMTUS; ++i) {
837e4a42
HS
4995 t4_write_reg(adap, TP_MTU_TABLE_A,
4996 MTUINDEX_V(0xff) | MTUVALUE_V(i));
4997 v = t4_read_reg(adap, TP_MTU_TABLE_A);
4998 mtus[i] = MTUVALUE_G(v);
56d36be4 4999 if (mtu_log)
837e4a42 5000 mtu_log[i] = MTUWIDTH_G(v);
56d36be4
DM
5001 }
5002}
5003
bad43792
HS
5004/**
5005 * t4_read_cong_tbl - reads the congestion control table
5006 * @adap: the adapter
5007 * @incr: where to store the alpha values
5008 *
5009 * Reads the additive increments programmed into the HW congestion
5010 * control table.
5011 */
5012void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5013{
5014 unsigned int mtu, w;
5015
5016 for (mtu = 0; mtu < NMTUS; ++mtu)
5017 for (w = 0; w < NCCTRL_WIN; ++w) {
5018 t4_write_reg(adap, TP_CCTRL_TABLE_A,
5019 ROWINDEX_V(0xffff) | (mtu << 5) | w);
5020 incr[mtu][w] = (u16)t4_read_reg(adap,
5021 TP_CCTRL_TABLE_A) & 0x1fff;
5022 }
5023}
5024
636f9d37
VP
5025/**
5026 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5027 * @adap: the adapter
5028 * @addr: the indirect TP register address
5029 * @mask: specifies the field within the register to modify
5030 * @val: new value for the field
5031 *
5032 * Sets a field of an indirect TP register to the given value.
5033 */
5034void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5035 unsigned int mask, unsigned int val)
5036{
837e4a42
HS
5037 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5038 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5039 t4_write_reg(adap, TP_PIO_DATA_A, val);
636f9d37
VP
5040}
5041
56d36be4
DM
5042/**
5043 * init_cong_ctrl - initialize congestion control parameters
5044 * @a: the alpha values for congestion control
5045 * @b: the beta values for congestion control
5046 *
5047 * Initialize the congestion control parameters.
5048 */
91744948 5049static void init_cong_ctrl(unsigned short *a, unsigned short *b)
56d36be4
DM
5050{
5051 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5052 a[9] = 2;
5053 a[10] = 3;
5054 a[11] = 4;
5055 a[12] = 5;
5056 a[13] = 6;
5057 a[14] = 7;
5058 a[15] = 8;
5059 a[16] = 9;
5060 a[17] = 10;
5061 a[18] = 14;
5062 a[19] = 17;
5063 a[20] = 21;
5064 a[21] = 25;
5065 a[22] = 30;
5066 a[23] = 35;
5067 a[24] = 45;
5068 a[25] = 60;
5069 a[26] = 80;
5070 a[27] = 100;
5071 a[28] = 200;
5072 a[29] = 300;
5073 a[30] = 400;
5074 a[31] = 500;
5075
5076 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5077 b[9] = b[10] = 1;
5078 b[11] = b[12] = 2;
5079 b[13] = b[14] = b[15] = b[16] = 3;
5080 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5081 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5082 b[28] = b[29] = 6;
5083 b[30] = b[31] = 7;
5084}
5085
5086/* The minimum additive increment value for the congestion control table */
5087#define CC_MIN_INCR 2U
5088
5089/**
5090 * t4_load_mtus - write the MTU and congestion control HW tables
5091 * @adap: the adapter
5092 * @mtus: the values for the MTU table
5093 * @alpha: the values for the congestion control alpha parameter
5094 * @beta: the values for the congestion control beta parameter
5095 *
5096 * Write the HW MTU table with the supplied MTUs and the high-speed
5097 * congestion control table with the supplied alpha, beta, and MTUs.
5098 * We write the two tables together because the additive increments
5099 * depend on the MTUs.
5100 */
5101void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5102 const unsigned short *alpha, const unsigned short *beta)
5103{
5104 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5105 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5106 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5107 28672, 40960, 57344, 81920, 114688, 163840, 229376
5108 };
5109
5110 unsigned int i, w;
5111
5112 for (i = 0; i < NMTUS; ++i) {
5113 unsigned int mtu = mtus[i];
5114 unsigned int log2 = fls(mtu);
5115
5116 if (!(mtu & ((1 << log2) >> 2))) /* round */
5117 log2--;
837e4a42
HS
5118 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5119 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
56d36be4
DM
5120
5121 for (w = 0; w < NCCTRL_WIN; ++w) {
5122 unsigned int inc;
5123
5124 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5125 CC_MIN_INCR);
5126
837e4a42 5127 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
56d36be4
DM
5128 (w << 16) | (beta[w] << 13) | inc);
5129 }
5130 }
5131}
5132
7864026b
HS
5133/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5134 * clocks. The formula is
5135 *
5136 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5137 *
5138 * which is equivalent to
5139 *
5140 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5141 */
5142static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5143{
5144 u64 v = bytes256 * adap->params.vpd.cclk;
5145
5146 return v * 62 + v / 2;
5147}
5148
5149/**
5150 * t4_get_chan_txrate - get the current per channel Tx rates
5151 * @adap: the adapter
5152 * @nic_rate: rates for NIC traffic
5153 * @ofld_rate: rates for offloaded traffic
5154 *
5155 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5156 * for each channel.
5157 */
5158void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5159{
5160 u32 v;
5161
5162 v = t4_read_reg(adap, TP_TX_TRATE_A);
5163 nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5164 nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5165 if (adap->params.arch.nchan == NCHAN) {
5166 nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5167 nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5168 }
5169
5170 v = t4_read_reg(adap, TP_TX_ORATE_A);
5171 ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5172 ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5173 if (adap->params.arch.nchan == NCHAN) {
5174 ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5175 ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5176 }
5177}
5178
8e3d04fd
HS
5179/**
5180 * t4_set_trace_filter - configure one of the tracing filters
5181 * @adap: the adapter
5182 * @tp: the desired trace filter parameters
5183 * @idx: which filter to configure
5184 * @enable: whether to enable or disable the filter
5185 *
5186 * Configures one of the tracing filters available in HW. If @enable is
5187 * %0 @tp is not examined and may be %NULL. The user is responsible to
5188 * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5189 */
5190int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5191 int idx, int enable)
5192{
5193 int i, ofst = idx * 4;
5194 u32 data_reg, mask_reg, cfg;
5195 u32 multitrc = TRCMULTIFILTER_F;
5196
5197 if (!enable) {
5198 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5199 return 0;
5200 }
5201
5202 cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5203 if (cfg & TRCMULTIFILTER_F) {
5204 /* If multiple tracers are enabled, then maximum
5205 * capture size is 2.5KB (FIFO size of a single channel)
5206 * minus 2 flits for CPL_TRACE_PKT header.
5207 */
5208 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5209 return -EINVAL;
5210 } else {
5211 /* If multiple tracers are disabled, to avoid deadlocks
5212 * maximum packet capture size of 9600 bytes is recommended.
5213 * Also in this mode, only trace0 can be enabled and running.
5214 */
5215 multitrc = 0;
5216 if (tp->snap_len > 9600 || idx)
5217 return -EINVAL;
5218 }
5219
5220 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5221 tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5222 tp->min_len > TFMINPKTSIZE_M)
5223 return -EINVAL;
5224
5225 /* stop the tracer we'll be changing */
5226 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5227
5228 idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
5229 data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
5230 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
5231
5232 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5233 t4_write_reg(adap, data_reg, tp->data[i]);
5234 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5235 }
5236 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
5237 TFCAPTUREMAX_V(tp->snap_len) |
5238 TFMINPKTSIZE_V(tp->min_len));
5239 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
5240 TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
5241 (is_t4(adap->params.chip) ?
5242 TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
5243 T5_TFPORT_V(tp->port) | T5_TFEN_F |
5244 T5_TFINVERTMATCH_V(tp->invert)));
5245
5246 return 0;
5247}
5248
5249/**
5250 * t4_get_trace_filter - query one of the tracing filters
5251 * @adap: the adapter
5252 * @tp: the current trace filter parameters
5253 * @idx: which trace filter to query
5254 * @enabled: non-zero if the filter is enabled
5255 *
5256 * Returns the current settings of one of the HW tracing filters.
5257 */
5258void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5259 int *enabled)
5260{
5261 u32 ctla, ctlb;
5262 int i, ofst = idx * 4;
5263 u32 data_reg, mask_reg;
5264
5265 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
5266 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
5267
5268 if (is_t4(adap->params.chip)) {
5269 *enabled = !!(ctla & TFEN_F);
5270 tp->port = TFPORT_G(ctla);
5271 tp->invert = !!(ctla & TFINVERTMATCH_F);
5272 } else {
5273 *enabled = !!(ctla & T5_TFEN_F);
5274 tp->port = T5_TFPORT_G(ctla);
5275 tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
5276 }
5277 tp->snap_len = TFCAPTUREMAX_G(ctlb);
5278 tp->min_len = TFMINPKTSIZE_G(ctlb);
5279 tp->skip_ofst = TFOFFSET_G(ctla);
5280 tp->skip_len = TFLENGTH_G(ctla);
5281
5282 ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
5283 data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
5284 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
5285
5286 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5287 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5288 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5289 }
5290}
5291
b3bbe36a
HS
5292/**
5293 * t4_pmtx_get_stats - returns the HW stats from PMTX
5294 * @adap: the adapter
5295 * @cnt: where to store the count statistics
5296 * @cycles: where to store the cycle statistics
5297 *
5298 * Returns performance statistics from PMTX.
5299 */
5300void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5301{
5302 int i;
5303 u32 data[2];
5304
44588560 5305 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
b3bbe36a
HS
5306 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
5307 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
5308 if (is_t4(adap->params.chip)) {
5309 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
5310 } else {
5311 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
5312 PM_TX_DBG_DATA_A, data, 2,
5313 PM_TX_DBG_STAT_MSB_A);
5314 cycles[i] = (((u64)data[0] << 32) | data[1]);
5315 }
5316 }
5317}
5318
5319/**
5320 * t4_pmrx_get_stats - returns the HW stats from PMRX
5321 * @adap: the adapter
5322 * @cnt: where to store the count statistics
5323 * @cycles: where to store the cycle statistics
5324 *
5325 * Returns performance statistics from PMRX.
5326 */
5327void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5328{
5329 int i;
5330 u32 data[2];
5331
44588560 5332 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
b3bbe36a
HS
5333 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
5334 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
5335 if (is_t4(adap->params.chip)) {
5336 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
5337 } else {
5338 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
5339 PM_RX_DBG_DATA_A, data, 2,
5340 PM_RX_DBG_STAT_MSB_A);
5341 cycles[i] = (((u64)data[0] << 32) | data[1]);
5342 }
5343 }
5344}
5345
56d36be4 5346/**
145ef8a5 5347 * t4_get_mps_bg_map - return the buffer groups associated with a port
56d36be4
DM
5348 * @adap: the adapter
5349 * @idx: the port index
5350 *
5351 * Returns a bitmap indicating which MPS buffer groups are associated
5352 * with the given port. Bit i is set if buffer group i is used by the
5353 * port.
5354 */
145ef8a5 5355unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
56d36be4 5356{
837e4a42 5357 u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
56d36be4
DM
5358
5359 if (n == 0)
5360 return idx == 0 ? 0xf : 0;
e9faeab8
HS
5361 /* In T6 (which is a 2 port card),
5362 * port 0 is mapped to channel 0 and port 1 is mapped to channel 1.
5363 * For 2 port T4/T5 adapter,
5364 * port 0 is mapped to channel 0 and 1,
5365 * port 1 is mapped to channel 2 and 3.
5366 */
5367 if ((n == 1) &&
5368 (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
56d36be4
DM
5369 return idx < 2 ? (3 << (2 * idx)) : 0;
5370 return 1 << idx;
5371}
5372
72aca4bf
KS
5373/**
5374 * t4_get_port_type_description - return Port Type string description
5375 * @port_type: firmware Port Type enumeration
5376 */
5377const char *t4_get_port_type_description(enum fw_port_type port_type)
5378{
5379 static const char *const port_type_description[] = {
5380 "R XFI",
5381 "R XAUI",
5382 "T SGMII",
5383 "T XFI",
5384 "T XAUI",
5385 "KX4",
5386 "CX4",
5387 "KX",
5388 "KR",
5389 "R SFP+",
5390 "KR/KX",
5391 "KR/KX/KX4",
5392 "R QSFP_10G",
5aa80e51 5393 "R QSA",
72aca4bf
KS
5394 "R QSFP",
5395 "R BP40_BA",
5396 };
5397
5398 if (port_type < ARRAY_SIZE(port_type_description))
5399 return port_type_description[port_type];
5400 return "UNKNOWN";
5401}
5402
a4cfd929
HS
5403/**
5404 * t4_get_port_stats_offset - collect port stats relative to a previous
5405 * snapshot
5406 * @adap: The adapter
5407 * @idx: The port
5408 * @stats: Current stats to fill
5409 * @offset: Previous stats snapshot
5410 */
5411void t4_get_port_stats_offset(struct adapter *adap, int idx,
5412 struct port_stats *stats,
5413 struct port_stats *offset)
5414{
5415 u64 *s, *o;
5416 int i;
5417
5418 t4_get_port_stats(adap, idx, stats);
5419 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
5420 i < (sizeof(struct port_stats) / sizeof(u64));
5421 i++, s++, o++)
5422 *s -= *o;
5423}
5424
56d36be4
DM
5425/**
5426 * t4_get_port_stats - collect port statistics
5427 * @adap: the adapter
5428 * @idx: the port index
5429 * @p: the stats structure to fill
5430 *
5431 * Collect statistics related to the given port from HW.
5432 */
5433void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
5434{
145ef8a5 5435 u32 bgmap = t4_get_mps_bg_map(adap, idx);
56d36be4
DM
5436
5437#define GET_STAT(name) \
0a57a536 5438 t4_read_reg64(adap, \
d14807dd 5439 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
0a57a536 5440 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
56d36be4
DM
5441#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
5442
5443 p->tx_octets = GET_STAT(TX_PORT_BYTES);
5444 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
5445 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
5446 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
5447 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
5448 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
5449 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
5450 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
5451 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
5452 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
5453 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
5454 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
5455 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
5456 p->tx_drop = GET_STAT(TX_PORT_DROP);
5457 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
5458 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
5459 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
5460 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
5461 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
5462 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
5463 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
5464 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
5465 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
5466
5467 p->rx_octets = GET_STAT(RX_PORT_BYTES);
5468 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
5469 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
5470 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
5471 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
5472 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
5473 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
5474 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
5475 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
5476 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
5477 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
5478 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
5479 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
5480 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
5481 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
5482 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
5483 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
5484 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
5485 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
5486 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
5487 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
5488 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
5489 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
5490 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
5491 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
5492 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
5493 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
5494
5495 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
5496 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
5497 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
5498 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
5499 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
5500 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
5501 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
5502 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
5503
5504#undef GET_STAT
5505#undef GET_STAT_COM
5506}
5507
56d36be4 5508/**
65046e84 5509 * t4_get_lb_stats - collect loopback port statistics
56d36be4 5510 * @adap: the adapter
65046e84
HS
5511 * @idx: the loopback port index
5512 * @p: the stats structure to fill
56d36be4 5513 *
65046e84 5514 * Return HW statistics for the given loopback port.
56d36be4 5515 */
65046e84 5516void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
56d36be4 5517{
65046e84 5518 u32 bgmap = t4_get_mps_bg_map(adap, idx);
56d36be4 5519
65046e84
HS
5520#define GET_STAT(name) \
5521 t4_read_reg64(adap, \
0d804338 5522 (is_t4(adap->params.chip) ? \
65046e84
HS
5523 PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
5524 T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
5525#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
56d36be4 5526
65046e84
HS
5527 p->octets = GET_STAT(BYTES);
5528 p->frames = GET_STAT(FRAMES);
5529 p->bcast_frames = GET_STAT(BCAST);
5530 p->mcast_frames = GET_STAT(MCAST);
5531 p->ucast_frames = GET_STAT(UCAST);
5532 p->error_frames = GET_STAT(ERROR);
5533
5534 p->frames_64 = GET_STAT(64B);
5535 p->frames_65_127 = GET_STAT(65B_127B);
5536 p->frames_128_255 = GET_STAT(128B_255B);
5537 p->frames_256_511 = GET_STAT(256B_511B);
5538 p->frames_512_1023 = GET_STAT(512B_1023B);
5539 p->frames_1024_1518 = GET_STAT(1024B_1518B);
5540 p->frames_1519_max = GET_STAT(1519B_MAX);
5541 p->drop = GET_STAT(DROP_FRAMES);
5542
5543 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
5544 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
5545 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
5546 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
5547 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
5548 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
5549 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
5550 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
56d36be4 5551
65046e84
HS
5552#undef GET_STAT
5553#undef GET_STAT_COM
56d36be4
DM
5554}
5555
f2b7e78d
VP
5556/* t4_mk_filtdelwr - create a delete filter WR
5557 * @ftid: the filter ID
5558 * @wr: the filter work request to populate
5559 * @qid: ingress queue to receive the delete notification
5560 *
5561 * Creates a filter work request to delete the supplied filter. If @qid is
5562 * negative the delete notification is suppressed.
5563 */
5564void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
5565{
5566 memset(wr, 0, sizeof(*wr));
f404f80c
HS
5567 wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
5568 wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
5569 wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
5570 FW_FILTER_WR_NOREPLY_V(qid < 0));
5571 wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
f2b7e78d 5572 if (qid >= 0)
f404f80c
HS
5573 wr->rx_chan_rx_rpl_iq =
5574 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
f2b7e78d
VP
5575}
5576
56d36be4 5577#define INIT_CMD(var, cmd, rd_wr) do { \
f404f80c
HS
5578 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
5579 FW_CMD_REQUEST_F | \
5580 FW_CMD_##rd_wr##_F); \
5581 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
56d36be4
DM
5582} while (0)
5583
8caa1e84
VP
5584int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
5585 u32 addr, u32 val)
5586{
f404f80c 5587 u32 ldst_addrspace;
8caa1e84
VP
5588 struct fw_ldst_cmd c;
5589
5590 memset(&c, 0, sizeof(c));
f404f80c
HS
5591 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
5592 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5593 FW_CMD_REQUEST_F |
5594 FW_CMD_WRITE_F |
5595 ldst_addrspace);
5596 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5597 c.u.addrval.addr = cpu_to_be32(addr);
5598 c.u.addrval.val = cpu_to_be32(val);
8caa1e84
VP
5599
5600 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5601}
5602
56d36be4
DM
5603/**
5604 * t4_mdio_rd - read a PHY register through MDIO
5605 * @adap: the adapter
5606 * @mbox: mailbox to use for the FW command
5607 * @phy_addr: the PHY address
5608 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
5609 * @reg: the register to read
5610 * @valp: where to store the value
5611 *
5612 * Issues a FW command through the given mailbox to read a PHY register.
5613 */
5614int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
5615 unsigned int mmd, unsigned int reg, u16 *valp)
5616{
5617 int ret;
f404f80c 5618 u32 ldst_addrspace;
56d36be4
DM
5619 struct fw_ldst_cmd c;
5620
5621 memset(&c, 0, sizeof(c));
f404f80c
HS
5622 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
5623 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5624 FW_CMD_REQUEST_F | FW_CMD_READ_F |
5625 ldst_addrspace);
5626 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5627 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
5628 FW_LDST_CMD_MMD_V(mmd));
5629 c.u.mdio.raddr = cpu_to_be16(reg);
56d36be4
DM
5630
5631 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5632 if (ret == 0)
f404f80c 5633 *valp = be16_to_cpu(c.u.mdio.rval);
56d36be4
DM
5634 return ret;
5635}
5636
5637/**
5638 * t4_mdio_wr - write a PHY register through MDIO
5639 * @adap: the adapter
5640 * @mbox: mailbox to use for the FW command
5641 * @phy_addr: the PHY address
5642 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
5643 * @reg: the register to write
5644 * @valp: value to write
5645 *
5646 * Issues a FW command through the given mailbox to write a PHY register.
5647 */
5648int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
5649 unsigned int mmd, unsigned int reg, u16 val)
5650{
f404f80c 5651 u32 ldst_addrspace;
56d36be4
DM
5652 struct fw_ldst_cmd c;
5653
5654 memset(&c, 0, sizeof(c));
f404f80c
HS
5655 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
5656 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5657 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5658 ldst_addrspace);
5659 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5660 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
5661 FW_LDST_CMD_MMD_V(mmd));
5662 c.u.mdio.raddr = cpu_to_be16(reg);
5663 c.u.mdio.rval = cpu_to_be16(val);
56d36be4
DM
5664
5665 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
5666}
5667
68bce192
KS
5668/**
5669 * t4_sge_decode_idma_state - decode the idma state
5670 * @adap: the adapter
5671 * @state: the state idma is stuck in
5672 */
5673void t4_sge_decode_idma_state(struct adapter *adapter, int state)
5674{
5675 static const char * const t4_decode[] = {
5676 "IDMA_IDLE",
5677 "IDMA_PUSH_MORE_CPL_FIFO",
5678 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
5679 "Not used",
5680 "IDMA_PHYSADDR_SEND_PCIEHDR",
5681 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
5682 "IDMA_PHYSADDR_SEND_PAYLOAD",
5683 "IDMA_SEND_FIFO_TO_IMSG",
5684 "IDMA_FL_REQ_DATA_FL_PREP",
5685 "IDMA_FL_REQ_DATA_FL",
5686 "IDMA_FL_DROP",
5687 "IDMA_FL_H_REQ_HEADER_FL",
5688 "IDMA_FL_H_SEND_PCIEHDR",
5689 "IDMA_FL_H_PUSH_CPL_FIFO",
5690 "IDMA_FL_H_SEND_CPL",
5691 "IDMA_FL_H_SEND_IP_HDR_FIRST",
5692 "IDMA_FL_H_SEND_IP_HDR",
5693 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
5694 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
5695 "IDMA_FL_H_SEND_IP_HDR_PADDING",
5696 "IDMA_FL_D_SEND_PCIEHDR",
5697 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
5698 "IDMA_FL_D_REQ_NEXT_DATA_FL",
5699 "IDMA_FL_SEND_PCIEHDR",
5700 "IDMA_FL_PUSH_CPL_FIFO",
5701 "IDMA_FL_SEND_CPL",
5702 "IDMA_FL_SEND_PAYLOAD_FIRST",
5703 "IDMA_FL_SEND_PAYLOAD",
5704 "IDMA_FL_REQ_NEXT_DATA_FL",
5705 "IDMA_FL_SEND_NEXT_PCIEHDR",
5706 "IDMA_FL_SEND_PADDING",
5707 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
5708 "IDMA_FL_SEND_FIFO_TO_IMSG",
5709 "IDMA_FL_REQ_DATAFL_DONE",
5710 "IDMA_FL_REQ_HEADERFL_DONE",
5711 };
5712 static const char * const t5_decode[] = {
5713 "IDMA_IDLE",
5714 "IDMA_ALMOST_IDLE",
5715 "IDMA_PUSH_MORE_CPL_FIFO",
5716 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
5717 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
5718 "IDMA_PHYSADDR_SEND_PCIEHDR",
5719 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
5720 "IDMA_PHYSADDR_SEND_PAYLOAD",
5721 "IDMA_SEND_FIFO_TO_IMSG",
5722 "IDMA_FL_REQ_DATA_FL",
5723 "IDMA_FL_DROP",
5724 "IDMA_FL_DROP_SEND_INC",
5725 "IDMA_FL_H_REQ_HEADER_FL",
5726 "IDMA_FL_H_SEND_PCIEHDR",
5727 "IDMA_FL_H_PUSH_CPL_FIFO",
5728 "IDMA_FL_H_SEND_CPL",
5729 "IDMA_FL_H_SEND_IP_HDR_FIRST",
5730 "IDMA_FL_H_SEND_IP_HDR",
5731 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
5732 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
5733 "IDMA_FL_H_SEND_IP_HDR_PADDING",
5734 "IDMA_FL_D_SEND_PCIEHDR",
5735 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
5736 "IDMA_FL_D_REQ_NEXT_DATA_FL",
5737 "IDMA_FL_SEND_PCIEHDR",
5738 "IDMA_FL_PUSH_CPL_FIFO",
5739 "IDMA_FL_SEND_CPL",
5740 "IDMA_FL_SEND_PAYLOAD_FIRST",
5741 "IDMA_FL_SEND_PAYLOAD",
5742 "IDMA_FL_REQ_NEXT_DATA_FL",
5743 "IDMA_FL_SEND_NEXT_PCIEHDR",
5744 "IDMA_FL_SEND_PADDING",
5745 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
5746 };
6df39753
HS
5747 static const char * const t6_decode[] = {
5748 "IDMA_IDLE",
5749 "IDMA_PUSH_MORE_CPL_FIFO",
5750 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
5751 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
5752 "IDMA_PHYSADDR_SEND_PCIEHDR",
5753 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
5754 "IDMA_PHYSADDR_SEND_PAYLOAD",
5755 "IDMA_FL_REQ_DATA_FL",
5756 "IDMA_FL_DROP",
5757 "IDMA_FL_DROP_SEND_INC",
5758 "IDMA_FL_H_REQ_HEADER_FL",
5759 "IDMA_FL_H_SEND_PCIEHDR",
5760 "IDMA_FL_H_PUSH_CPL_FIFO",
5761 "IDMA_FL_H_SEND_CPL",
5762 "IDMA_FL_H_SEND_IP_HDR_FIRST",
5763 "IDMA_FL_H_SEND_IP_HDR",
5764 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
5765 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
5766 "IDMA_FL_H_SEND_IP_HDR_PADDING",
5767 "IDMA_FL_D_SEND_PCIEHDR",
5768 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
5769 "IDMA_FL_D_REQ_NEXT_DATA_FL",
5770 "IDMA_FL_SEND_PCIEHDR",
5771 "IDMA_FL_PUSH_CPL_FIFO",
5772 "IDMA_FL_SEND_CPL",
5773 "IDMA_FL_SEND_PAYLOAD_FIRST",
5774 "IDMA_FL_SEND_PAYLOAD",
5775 "IDMA_FL_REQ_NEXT_DATA_FL",
5776 "IDMA_FL_SEND_NEXT_PCIEHDR",
5777 "IDMA_FL_SEND_PADDING",
5778 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
5779 };
68bce192 5780 static const u32 sge_regs[] = {
f061de42
HS
5781 SGE_DEBUG_DATA_LOW_INDEX_2_A,
5782 SGE_DEBUG_DATA_LOW_INDEX_3_A,
5783 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
68bce192
KS
5784 };
5785 const char **sge_idma_decode;
5786 int sge_idma_decode_nstates;
5787 int i;
6df39753
HS
5788 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
5789
5790 /* Select the right set of decode strings to dump depending on the
5791 * adapter chip type.
5792 */
5793 switch (chip_version) {
5794 case CHELSIO_T4:
5795 sge_idma_decode = (const char **)t4_decode;
5796 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
5797 break;
5798
5799 case CHELSIO_T5:
5800 sge_idma_decode = (const char **)t5_decode;
5801 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
5802 break;
5803
5804 case CHELSIO_T6:
5805 sge_idma_decode = (const char **)t6_decode;
5806 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
5807 break;
5808
5809 default:
5810 dev_err(adapter->pdev_dev,
5811 "Unsupported chip version %d\n", chip_version);
5812 return;
5813 }
68bce192
KS
5814
5815 if (is_t4(adapter->params.chip)) {
5816 sge_idma_decode = (const char **)t4_decode;
5817 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
5818 } else {
5819 sge_idma_decode = (const char **)t5_decode;
5820 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
5821 }
5822
5823 if (state < sge_idma_decode_nstates)
5824 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
5825 else
5826 CH_WARN(adapter, "idma state %d unknown\n", state);
5827
5828 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
5829 CH_WARN(adapter, "SGE register %#x value %#x\n",
5830 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
5831}
5832
5d700ecb
HS
5833/**
5834 * t4_sge_ctxt_flush - flush the SGE context cache
5835 * @adap: the adapter
5836 * @mbox: mailbox to use for the FW command
5837 *
5838 * Issues a FW command through the given mailbox to flush the
5839 * SGE context cache.
5840 */
5841int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
5842{
5843 int ret;
5844 u32 ldst_addrspace;
5845 struct fw_ldst_cmd c;
5846
5847 memset(&c, 0, sizeof(c));
5848 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
5849 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5850 FW_CMD_REQUEST_F | FW_CMD_READ_F |
5851 ldst_addrspace);
5852 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5853 c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
5854
5855 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
5856 return ret;
5857}
5858
56d36be4 5859/**
636f9d37
VP
5860 * t4_fw_hello - establish communication with FW
5861 * @adap: the adapter
5862 * @mbox: mailbox to use for the FW command
5863 * @evt_mbox: mailbox to receive async FW events
5864 * @master: specifies the caller's willingness to be the device master
5865 * @state: returns the current device state (if non-NULL)
56d36be4 5866 *
636f9d37
VP
5867 * Issues a command to establish communication with FW. Returns either
5868 * an error (negative integer) or the mailbox of the Master PF.
56d36be4
DM
5869 */
5870int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
5871 enum dev_master master, enum dev_state *state)
5872{
5873 int ret;
5874 struct fw_hello_cmd c;
636f9d37
VP
5875 u32 v;
5876 unsigned int master_mbox;
5877 int retries = FW_CMD_HELLO_RETRIES;
56d36be4 5878
636f9d37
VP
5879retry:
5880 memset(&c, 0, sizeof(c));
56d36be4 5881 INIT_CMD(c, HELLO, WRITE);
f404f80c 5882 c.err_to_clearinit = cpu_to_be32(
5167865a
HS
5883 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
5884 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
f404f80c
HS
5885 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
5886 mbox : FW_HELLO_CMD_MBMASTER_M) |
5167865a
HS
5887 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
5888 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
5889 FW_HELLO_CMD_CLEARINIT_F);
56d36be4 5890
636f9d37
VP
5891 /*
5892 * Issue the HELLO command to the firmware. If it's not successful
5893 * but indicates that we got a "busy" or "timeout" condition, retry
31d55c2d
HS
5894 * the HELLO until we exhaust our retry limit. If we do exceed our
5895 * retry limit, check to see if the firmware left us any error
5896 * information and report that if so.
636f9d37 5897 */
56d36be4 5898 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
636f9d37
VP
5899 if (ret < 0) {
5900 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
5901 goto retry;
f061de42 5902 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
31d55c2d 5903 t4_report_fw_error(adap);
636f9d37
VP
5904 return ret;
5905 }
5906
f404f80c 5907 v = be32_to_cpu(c.err_to_clearinit);
5167865a 5908 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
636f9d37 5909 if (state) {
5167865a 5910 if (v & FW_HELLO_CMD_ERR_F)
56d36be4 5911 *state = DEV_STATE_ERR;
5167865a 5912 else if (v & FW_HELLO_CMD_INIT_F)
636f9d37 5913 *state = DEV_STATE_INIT;
56d36be4
DM
5914 else
5915 *state = DEV_STATE_UNINIT;
5916 }
636f9d37
VP
5917
5918 /*
5919 * If we're not the Master PF then we need to wait around for the
5920 * Master PF Driver to finish setting up the adapter.
5921 *
5922 * Note that we also do this wait if we're a non-Master-capable PF and
5923 * there is no current Master PF; a Master PF may show up momentarily
5924 * and we wouldn't want to fail pointlessly. (This can happen when an
5925 * OS loads lots of different drivers rapidly at the same time). In
5926 * this case, the Master PF returned by the firmware will be
b2e1a3f0 5927 * PCIE_FW_MASTER_M so the test below will work ...
636f9d37 5928 */
5167865a 5929 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
636f9d37
VP
5930 master_mbox != mbox) {
5931 int waiting = FW_CMD_HELLO_TIMEOUT;
5932
5933 /*
5934 * Wait for the firmware to either indicate an error or
5935 * initialized state. If we see either of these we bail out
5936 * and report the issue to the caller. If we exhaust the
5937 * "hello timeout" and we haven't exhausted our retries, try
5938 * again. Otherwise bail with a timeout error.
5939 */
5940 for (;;) {
5941 u32 pcie_fw;
5942
5943 msleep(50);
5944 waiting -= 50;
5945
5946 /*
5947 * If neither Error nor Initialialized are indicated
5948 * by the firmware keep waiting till we exaust our
5949 * timeout ... and then retry if we haven't exhausted
5950 * our retries ...
5951 */
f061de42
HS
5952 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
5953 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
636f9d37
VP
5954 if (waiting <= 0) {
5955 if (retries-- > 0)
5956 goto retry;
5957
5958 return -ETIMEDOUT;
5959 }
5960 continue;
5961 }
5962
5963 /*
5964 * We either have an Error or Initialized condition
5965 * report errors preferentially.
5966 */
5967 if (state) {
f061de42 5968 if (pcie_fw & PCIE_FW_ERR_F)
636f9d37 5969 *state = DEV_STATE_ERR;
f061de42 5970 else if (pcie_fw & PCIE_FW_INIT_F)
636f9d37
VP
5971 *state = DEV_STATE_INIT;
5972 }
5973
5974 /*
5975 * If we arrived before a Master PF was selected and
5976 * there's not a valid Master PF, grab its identity
5977 * for our caller.
5978 */
b2e1a3f0 5979 if (master_mbox == PCIE_FW_MASTER_M &&
f061de42 5980 (pcie_fw & PCIE_FW_MASTER_VLD_F))
b2e1a3f0 5981 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
636f9d37
VP
5982 break;
5983 }
5984 }
5985
5986 return master_mbox;
56d36be4
DM
5987}
5988
5989/**
5990 * t4_fw_bye - end communication with FW
5991 * @adap: the adapter
5992 * @mbox: mailbox to use for the FW command
5993 *
5994 * Issues a command to terminate communication with FW.
5995 */
5996int t4_fw_bye(struct adapter *adap, unsigned int mbox)
5997{
5998 struct fw_bye_cmd c;
5999
0062b15c 6000 memset(&c, 0, sizeof(c));
56d36be4
DM
6001 INIT_CMD(c, BYE, WRITE);
6002 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6003}
6004
6005/**
6006 * t4_init_cmd - ask FW to initialize the device
6007 * @adap: the adapter
6008 * @mbox: mailbox to use for the FW command
6009 *
6010 * Issues a command to FW to partially initialize the device. This
6011 * performs initialization that generally doesn't depend on user input.
6012 */
6013int t4_early_init(struct adapter *adap, unsigned int mbox)
6014{
6015 struct fw_initialize_cmd c;
6016
0062b15c 6017 memset(&c, 0, sizeof(c));
56d36be4
DM
6018 INIT_CMD(c, INITIALIZE, WRITE);
6019 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6020}
6021
6022/**
6023 * t4_fw_reset - issue a reset to FW
6024 * @adap: the adapter
6025 * @mbox: mailbox to use for the FW command
6026 * @reset: specifies the type of reset to perform
6027 *
6028 * Issues a reset command of the specified type to FW.
6029 */
6030int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6031{
6032 struct fw_reset_cmd c;
6033
0062b15c 6034 memset(&c, 0, sizeof(c));
56d36be4 6035 INIT_CMD(c, RESET, WRITE);
f404f80c 6036 c.val = cpu_to_be32(reset);
56d36be4
DM
6037 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6038}
6039
26f7cbc0
VP
6040/**
6041 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6042 * @adap: the adapter
6043 * @mbox: mailbox to use for the FW RESET command (if desired)
6044 * @force: force uP into RESET even if FW RESET command fails
6045 *
6046 * Issues a RESET command to firmware (if desired) with a HALT indication
6047 * and then puts the microprocessor into RESET state. The RESET command
6048 * will only be issued if a legitimate mailbox is provided (mbox <=
b2e1a3f0 6049 * PCIE_FW_MASTER_M).
26f7cbc0
VP
6050 *
6051 * This is generally used in order for the host to safely manipulate the
6052 * adapter without fear of conflicting with whatever the firmware might
6053 * be doing. The only way out of this state is to RESTART the firmware
6054 * ...
6055 */
de5b8677 6056static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
26f7cbc0
VP
6057{
6058 int ret = 0;
6059
6060 /*
6061 * If a legitimate mailbox is provided, issue a RESET command
6062 * with a HALT indication.
6063 */
b2e1a3f0 6064 if (mbox <= PCIE_FW_MASTER_M) {
26f7cbc0
VP
6065 struct fw_reset_cmd c;
6066
6067 memset(&c, 0, sizeof(c));
6068 INIT_CMD(c, RESET, WRITE);
f404f80c
HS
6069 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
6070 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
26f7cbc0
VP
6071 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6072 }
6073
6074 /*
6075 * Normally we won't complete the operation if the firmware RESET
6076 * command fails but if our caller insists we'll go ahead and put the
6077 * uP into RESET. This can be useful if the firmware is hung or even
6078 * missing ... We'll have to take the risk of putting the uP into
6079 * RESET without the cooperation of firmware in that case.
6080 *
6081 * We also force the firmware's HALT flag to be on in case we bypassed
6082 * the firmware RESET command above or we're dealing with old firmware
6083 * which doesn't have the HALT capability. This will serve as a flag
6084 * for the incoming firmware to know that it's coming out of a HALT
6085 * rather than a RESET ... if it's new enough to understand that ...
6086 */
6087 if (ret == 0 || force) {
89c3a86c 6088 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
f061de42 6089 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
b2e1a3f0 6090 PCIE_FW_HALT_F);
26f7cbc0
VP
6091 }
6092
6093 /*
6094 * And we always return the result of the firmware RESET command
6095 * even when we force the uP into RESET ...
6096 */
6097 return ret;
6098}
6099
6100/**
6101 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6102 * @adap: the adapter
6103 * @reset: if we want to do a RESET to restart things
6104 *
6105 * Restart firmware previously halted by t4_fw_halt(). On successful
6106 * return the previous PF Master remains as the new PF Master and there
6107 * is no need to issue a new HELLO command, etc.
6108 *
6109 * We do this in two ways:
6110 *
6111 * 1. If we're dealing with newer firmware we'll simply want to take
6112 * the chip's microprocessor out of RESET. This will cause the
6113 * firmware to start up from its start vector. And then we'll loop
6114 * until the firmware indicates it's started again (PCIE_FW.HALT
6115 * reset to 0) or we timeout.
6116 *
6117 * 2. If we're dealing with older firmware then we'll need to RESET
6118 * the chip since older firmware won't recognize the PCIE_FW.HALT
6119 * flag and automatically RESET itself on startup.
6120 */
de5b8677 6121static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
26f7cbc0
VP
6122{
6123 if (reset) {
6124 /*
6125 * Since we're directing the RESET instead of the firmware
6126 * doing it automatically, we need to clear the PCIE_FW.HALT
6127 * bit.
6128 */
f061de42 6129 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
26f7cbc0
VP
6130
6131 /*
6132 * If we've been given a valid mailbox, first try to get the
6133 * firmware to do the RESET. If that works, great and we can
6134 * return success. Otherwise, if we haven't been given a
6135 * valid mailbox or the RESET command failed, fall back to
6136 * hitting the chip with a hammer.
6137 */
b2e1a3f0 6138 if (mbox <= PCIE_FW_MASTER_M) {
89c3a86c 6139 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
26f7cbc0
VP
6140 msleep(100);
6141 if (t4_fw_reset(adap, mbox,
0d804338 6142 PIORST_F | PIORSTMODE_F) == 0)
26f7cbc0
VP
6143 return 0;
6144 }
6145
0d804338 6146 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
26f7cbc0
VP
6147 msleep(2000);
6148 } else {
6149 int ms;
6150
89c3a86c 6151 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
26f7cbc0 6152 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
f061de42 6153 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
26f7cbc0
VP
6154 return 0;
6155 msleep(100);
6156 ms += 100;
6157 }
6158 return -ETIMEDOUT;
6159 }
6160 return 0;
6161}
6162
6163/**
6164 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6165 * @adap: the adapter
6166 * @mbox: mailbox to use for the FW RESET command (if desired)
6167 * @fw_data: the firmware image to write
6168 * @size: image size
6169 * @force: force upgrade even if firmware doesn't cooperate
6170 *
6171 * Perform all of the steps necessary for upgrading an adapter's
6172 * firmware image. Normally this requires the cooperation of the
6173 * existing firmware in order to halt all existing activities
6174 * but if an invalid mailbox token is passed in we skip that step
6175 * (though we'll still put the adapter microprocessor into RESET in
6176 * that case).
6177 *
6178 * On successful return the new firmware will have been loaded and
6179 * the adapter will have been fully RESET losing all previous setup
6180 * state. On unsuccessful return the adapter may be completely hosed ...
6181 * positive errno indicates that the adapter is ~probably~ intact, a
6182 * negative errno indicates that things are looking bad ...
6183 */
22c0b963
HS
6184int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6185 const u8 *fw_data, unsigned int size, int force)
26f7cbc0
VP
6186{
6187 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6188 int reset, ret;
6189
79af221d
HS
6190 if (!t4_fw_matches_chip(adap, fw_hdr))
6191 return -EINVAL;
6192
26f7cbc0
VP
6193 ret = t4_fw_halt(adap, mbox, force);
6194 if (ret < 0 && !force)
6195 return ret;
6196
6197 ret = t4_load_fw(adap, fw_data, size);
6198 if (ret < 0)
6199 return ret;
6200
6201 /*
6202 * Older versions of the firmware don't understand the new
6203 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6204 * restart. So for newly loaded older firmware we'll have to do the
6205 * RESET for it so it starts up on a clean slate. We can tell if
6206 * the newly loaded firmware will handle this right by checking
6207 * its header flags to see if it advertises the capability.
6208 */
f404f80c 6209 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
26f7cbc0
VP
6210 return t4_fw_restart(adap, mbox, reset);
6211}
6212
acac5962
HS
6213/**
6214 * t4_fl_pkt_align - return the fl packet alignment
6215 * @adap: the adapter
6216 *
6217 * T4 has a single field to specify the packing and padding boundary.
6218 * T5 onwards has separate fields for this and hence the alignment for
6219 * next packet offset is maximum of these two.
6220 *
6221 */
6222int t4_fl_pkt_align(struct adapter *adap)
6223{
6224 u32 sge_control, sge_control2;
6225 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
6226
6227 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
6228
6229 /* T4 uses a single control field to specify both the PCIe Padding and
6230 * Packing Boundary. T5 introduced the ability to specify these
6231 * separately. The actual Ingress Packet Data alignment boundary
6232 * within Packed Buffer Mode is the maximum of these two
6233 * specifications. (Note that it makes no real practical sense to
6234 * have the Pading Boudary be larger than the Packing Boundary but you
6235 * could set the chip up that way and, in fact, legacy T4 code would
6236 * end doing this because it would initialize the Padding Boundary and
6237 * leave the Packing Boundary initialized to 0 (16 bytes).)
6238 * Padding Boundary values in T6 starts from 8B,
6239 * where as it is 32B for T4 and T5.
6240 */
6241 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
6242 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
6243 else
6244 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
6245
6246 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
6247
6248 fl_align = ingpadboundary;
6249 if (!is_t4(adap->params.chip)) {
6250 /* T5 has a weird interpretation of one of the PCIe Packing
6251 * Boundary values. No idea why ...
6252 */
6253 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
6254 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
6255 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
6256 ingpackboundary = 16;
6257 else
6258 ingpackboundary = 1 << (ingpackboundary +
6259 INGPACKBOUNDARY_SHIFT_X);
6260
6261 fl_align = max(ingpadboundary, ingpackboundary);
6262 }
6263 return fl_align;
6264}
6265
636f9d37
VP
6266/**
6267 * t4_fixup_host_params - fix up host-dependent parameters
6268 * @adap: the adapter
6269 * @page_size: the host's Base Page Size
6270 * @cache_line_size: the host's Cache Line Size
6271 *
6272 * Various registers in T4 contain values which are dependent on the
6273 * host's Base Page and Cache Line Sizes. This function will fix all of
6274 * those registers with the appropriate values as passed in ...
6275 */
6276int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
6277 unsigned int cache_line_size)
6278{
6279 unsigned int page_shift = fls(page_size) - 1;
6280 unsigned int sge_hps = page_shift - 10;
6281 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
6282 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
6283 unsigned int fl_align_log = fls(fl_align) - 1;
acac5962 6284 unsigned int ingpad;
636f9d37 6285
f612b815
HS
6286 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
6287 HOSTPAGESIZEPF0_V(sge_hps) |
6288 HOSTPAGESIZEPF1_V(sge_hps) |
6289 HOSTPAGESIZEPF2_V(sge_hps) |
6290 HOSTPAGESIZEPF3_V(sge_hps) |
6291 HOSTPAGESIZEPF4_V(sge_hps) |
6292 HOSTPAGESIZEPF5_V(sge_hps) |
6293 HOSTPAGESIZEPF6_V(sge_hps) |
6294 HOSTPAGESIZEPF7_V(sge_hps));
636f9d37 6295
ce8f407a 6296 if (is_t4(adap->params.chip)) {
f612b815
HS
6297 t4_set_reg_field(adap, SGE_CONTROL_A,
6298 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
6299 EGRSTATUSPAGESIZE_F,
6300 INGPADBOUNDARY_V(fl_align_log -
6301 INGPADBOUNDARY_SHIFT_X) |
6302 EGRSTATUSPAGESIZE_V(stat_len != 64));
ce8f407a
HS
6303 } else {
6304 /* T5 introduced the separation of the Free List Padding and
6305 * Packing Boundaries. Thus, we can select a smaller Padding
6306 * Boundary to avoid uselessly chewing up PCIe Link and Memory
6307 * Bandwidth, and use a Packing Boundary which is large enough
6308 * to avoid false sharing between CPUs, etc.
6309 *
6310 * For the PCI Link, the smaller the Padding Boundary the
6311 * better. For the Memory Controller, a smaller Padding
6312 * Boundary is better until we cross under the Memory Line
6313 * Size (the minimum unit of transfer to/from Memory). If we
6314 * have a Padding Boundary which is smaller than the Memory
6315 * Line Size, that'll involve a Read-Modify-Write cycle on the
6316 * Memory Controller which is never good. For T5 the smallest
6317 * Padding Boundary which we can select is 32 bytes which is
6318 * larger than any known Memory Controller Line Size so we'll
6319 * use that.
6320 *
6321 * T5 has a different interpretation of the "0" value for the
6322 * Packing Boundary. This corresponds to 16 bytes instead of
6323 * the expected 32 bytes. We never have a Packing Boundary
6324 * less than 32 bytes so we can't use that special value but
6325 * on the other hand, if we wanted 32 bytes, the best we can
6326 * really do is 64 bytes.
6327 */
6328 if (fl_align <= 32) {
6329 fl_align = 64;
6330 fl_align_log = 6;
6331 }
acac5962
HS
6332
6333 if (is_t5(adap->params.chip))
6334 ingpad = INGPCIEBOUNDARY_32B_X;
6335 else
6336 ingpad = T6_INGPADBOUNDARY_32B_X;
6337
f612b815
HS
6338 t4_set_reg_field(adap, SGE_CONTROL_A,
6339 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
6340 EGRSTATUSPAGESIZE_F,
acac5962 6341 INGPADBOUNDARY_V(ingpad) |
f612b815 6342 EGRSTATUSPAGESIZE_V(stat_len != 64));
ce8f407a
HS
6343 t4_set_reg_field(adap, SGE_CONTROL2_A,
6344 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
6345 INGPACKBOUNDARY_V(fl_align_log -
f612b815 6346 INGPACKBOUNDARY_SHIFT_X));
ce8f407a 6347 }
636f9d37
VP
6348 /*
6349 * Adjust various SGE Free List Host Buffer Sizes.
6350 *
6351 * This is something of a crock since we're using fixed indices into
6352 * the array which are also known by the sge.c code and the T4
6353 * Firmware Configuration File. We need to come up with a much better
6354 * approach to managing this array. For now, the first four entries
6355 * are:
6356 *
6357 * 0: Host Page Size
6358 * 1: 64KB
6359 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
6360 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
6361 *
6362 * For the single-MTU buffers in unpacked mode we need to include
6363 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
6364 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
dbedd44e 6365 * Padding boundary. All of these are accommodated in the Factory
636f9d37
VP
6366 * Default Firmware Configuration File but we need to adjust it for
6367 * this host's cache line size.
6368 */
f612b815
HS
6369 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
6370 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
6371 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
636f9d37 6372 & ~(fl_align-1));
f612b815
HS
6373 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
6374 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
636f9d37
VP
6375 & ~(fl_align-1));
6376
0d804338 6377 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
636f9d37
VP
6378
6379 return 0;
6380}
6381
6382/**
6383 * t4_fw_initialize - ask FW to initialize the device
6384 * @adap: the adapter
6385 * @mbox: mailbox to use for the FW command
6386 *
6387 * Issues a command to FW to partially initialize the device. This
6388 * performs initialization that generally doesn't depend on user input.
6389 */
6390int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
6391{
6392 struct fw_initialize_cmd c;
6393
6394 memset(&c, 0, sizeof(c));
6395 INIT_CMD(c, INITIALIZE, WRITE);
6396 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6397}
6398
56d36be4 6399/**
01b69614 6400 * t4_query_params_rw - query FW or device parameters
56d36be4
DM
6401 * @adap: the adapter
6402 * @mbox: mailbox to use for the FW command
6403 * @pf: the PF
6404 * @vf: the VF
6405 * @nparams: the number of parameters
6406 * @params: the parameter names
6407 * @val: the parameter values
01b69614 6408 * @rw: Write and read flag
56d36be4
DM
6409 *
6410 * Reads the value of FW or device parameters. Up to 7 parameters can be
6411 * queried at once.
6412 */
01b69614
HS
6413int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
6414 unsigned int vf, unsigned int nparams, const u32 *params,
6415 u32 *val, int rw)
56d36be4
DM
6416{
6417 int i, ret;
6418 struct fw_params_cmd c;
6419 __be32 *p = &c.param[0].mnem;
6420
6421 if (nparams > 7)
6422 return -EINVAL;
6423
6424 memset(&c, 0, sizeof(c));
f404f80c
HS
6425 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
6426 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6427 FW_PARAMS_CMD_PFN_V(pf) |
6428 FW_PARAMS_CMD_VFN_V(vf));
6429 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6430
01b69614
HS
6431 for (i = 0; i < nparams; i++) {
6432 *p++ = cpu_to_be32(*params++);
6433 if (rw)
6434 *p = cpu_to_be32(*(val + i));
6435 p++;
6436 }
56d36be4
DM
6437
6438 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6439 if (ret == 0)
6440 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
f404f80c 6441 *val++ = be32_to_cpu(*p);
56d36be4
DM
6442 return ret;
6443}
6444
01b69614
HS
6445int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6446 unsigned int vf, unsigned int nparams, const u32 *params,
6447 u32 *val)
6448{
6449 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
6450}
6451
688848b1 6452/**
01b69614 6453 * t4_set_params_timeout - sets FW or device parameters
688848b1
AB
6454 * @adap: the adapter
6455 * @mbox: mailbox to use for the FW command
6456 * @pf: the PF
6457 * @vf: the VF
6458 * @nparams: the number of parameters
6459 * @params: the parameter names
6460 * @val: the parameter values
01b69614 6461 * @timeout: the timeout time
688848b1 6462 *
688848b1
AB
6463 * Sets the value of FW or device parameters. Up to 7 parameters can be
6464 * specified at once.
6465 */
01b69614 6466int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
688848b1
AB
6467 unsigned int pf, unsigned int vf,
6468 unsigned int nparams, const u32 *params,
01b69614 6469 const u32 *val, int timeout)
688848b1
AB
6470{
6471 struct fw_params_cmd c;
6472 __be32 *p = &c.param[0].mnem;
6473
6474 if (nparams > 7)
6475 return -EINVAL;
6476
6477 memset(&c, 0, sizeof(c));
e2ac9628 6478 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
01b69614
HS
6479 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6480 FW_PARAMS_CMD_PFN_V(pf) |
6481 FW_PARAMS_CMD_VFN_V(vf));
688848b1
AB
6482 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6483
6484 while (nparams--) {
6485 *p++ = cpu_to_be32(*params++);
6486 *p++ = cpu_to_be32(*val++);
6487 }
6488
01b69614 6489 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
688848b1
AB
6490}
6491
56d36be4
DM
6492/**
6493 * t4_set_params - sets FW or device parameters
6494 * @adap: the adapter
6495 * @mbox: mailbox to use for the FW command
6496 * @pf: the PF
6497 * @vf: the VF
6498 * @nparams: the number of parameters
6499 * @params: the parameter names
6500 * @val: the parameter values
6501 *
6502 * Sets the value of FW or device parameters. Up to 7 parameters can be
6503 * specified at once.
6504 */
6505int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
6506 unsigned int vf, unsigned int nparams, const u32 *params,
6507 const u32 *val)
6508{
01b69614
HS
6509 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
6510 FW_CMD_MAX_TIMEOUT);
56d36be4
DM
6511}
6512
6513/**
6514 * t4_cfg_pfvf - configure PF/VF resource limits
6515 * @adap: the adapter
6516 * @mbox: mailbox to use for the FW command
6517 * @pf: the PF being configured
6518 * @vf: the VF being configured
6519 * @txq: the max number of egress queues
6520 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
6521 * @rxqi: the max number of interrupt-capable ingress queues
6522 * @rxq: the max number of interruptless ingress queues
6523 * @tc: the PCI traffic class
6524 * @vi: the max number of virtual interfaces
6525 * @cmask: the channel access rights mask for the PF/VF
6526 * @pmask: the port access rights mask for the PF/VF
6527 * @nexact: the maximum number of exact MPS filters
6528 * @rcaps: read capabilities
6529 * @wxcaps: write/execute capabilities
6530 *
6531 * Configures resource limits and capabilities for a physical or virtual
6532 * function.
6533 */
6534int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
6535 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
6536 unsigned int rxqi, unsigned int rxq, unsigned int tc,
6537 unsigned int vi, unsigned int cmask, unsigned int pmask,
6538 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
6539{
6540 struct fw_pfvf_cmd c;
6541
6542 memset(&c, 0, sizeof(c));
f404f80c
HS
6543 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
6544 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
6545 FW_PFVF_CMD_VFN_V(vf));
6546 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6547 c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
6548 FW_PFVF_CMD_NIQ_V(rxq));
6549 c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
6550 FW_PFVF_CMD_PMASK_V(pmask) |
6551 FW_PFVF_CMD_NEQ_V(txq));
6552 c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
6553 FW_PFVF_CMD_NVI_V(vi) |
6554 FW_PFVF_CMD_NEXACTF_V(nexact));
6555 c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
6556 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
6557 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
56d36be4
DM
6558 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6559}
6560
6561/**
6562 * t4_alloc_vi - allocate a virtual interface
6563 * @adap: the adapter
6564 * @mbox: mailbox to use for the FW command
6565 * @port: physical port associated with the VI
6566 * @pf: the PF owning the VI
6567 * @vf: the VF owning the VI
6568 * @nmac: number of MAC addresses needed (1 to 5)
6569 * @mac: the MAC addresses of the VI
6570 * @rss_size: size of RSS table slice associated with this VI
6571 *
6572 * Allocates a virtual interface for the given physical port. If @mac is
6573 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
6574 * @mac should be large enough to hold @nmac Ethernet addresses, they are
6575 * stored consecutively so the space needed is @nmac * 6 bytes.
6576 * Returns a negative error number or the non-negative VI id.
6577 */
6578int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
6579 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
6580 unsigned int *rss_size)
6581{
6582 int ret;
6583 struct fw_vi_cmd c;
6584
6585 memset(&c, 0, sizeof(c));
f404f80c
HS
6586 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
6587 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
6588 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
6589 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
2b5fb1f2 6590 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
56d36be4
DM
6591 c.nmac = nmac - 1;
6592
6593 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6594 if (ret)
6595 return ret;
6596
6597 if (mac) {
6598 memcpy(mac, c.mac, sizeof(c.mac));
6599 switch (nmac) {
6600 case 5:
6601 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
6602 case 4:
6603 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
6604 case 3:
6605 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
6606 case 2:
6607 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
6608 }
6609 }
6610 if (rss_size)
f404f80c
HS
6611 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
6612 return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
56d36be4
DM
6613}
6614
4f3a0fcf
HS
6615/**
6616 * t4_free_vi - free a virtual interface
6617 * @adap: the adapter
6618 * @mbox: mailbox to use for the FW command
6619 * @pf: the PF owning the VI
6620 * @vf: the VF owning the VI
6621 * @viid: virtual interface identifiler
6622 *
6623 * Free a previously allocated virtual interface.
6624 */
6625int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
6626 unsigned int vf, unsigned int viid)
6627{
6628 struct fw_vi_cmd c;
6629
6630 memset(&c, 0, sizeof(c));
6631 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
6632 FW_CMD_REQUEST_F |
6633 FW_CMD_EXEC_F |
6634 FW_VI_CMD_PFN_V(pf) |
6635 FW_VI_CMD_VFN_V(vf));
6636 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
6637 c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
6638
6639 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
56d36be4
DM
6640}
6641
56d36be4
DM
6642/**
6643 * t4_set_rxmode - set Rx properties of a virtual interface
6644 * @adap: the adapter
6645 * @mbox: mailbox to use for the FW command
6646 * @viid: the VI id
6647 * @mtu: the new MTU or -1
6648 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
6649 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
6650 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 6651 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
6652 * @sleep_ok: if true we may sleep while awaiting command completion
6653 *
6654 * Sets Rx properties of a virtual interface.
6655 */
6656int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
6657 int mtu, int promisc, int all_multi, int bcast, int vlanex,
6658 bool sleep_ok)
56d36be4
DM
6659{
6660 struct fw_vi_rxmode_cmd c;
6661
6662 /* convert to FW values */
6663 if (mtu < 0)
6664 mtu = FW_RXMODE_MTU_NO_CHG;
6665 if (promisc < 0)
2b5fb1f2 6666 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
56d36be4 6667 if (all_multi < 0)
2b5fb1f2 6668 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
56d36be4 6669 if (bcast < 0)
2b5fb1f2 6670 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
f8f5aafa 6671 if (vlanex < 0)
2b5fb1f2 6672 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
56d36be4
DM
6673
6674 memset(&c, 0, sizeof(c));
f404f80c
HS
6675 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
6676 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6677 FW_VI_RXMODE_CMD_VIID_V(viid));
6678 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
6679 c.mtu_to_vlanexen =
6680 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
6681 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
6682 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
6683 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
6684 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
56d36be4
DM
6685 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
6686}
6687
6688/**
6689 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
6690 * @adap: the adapter
6691 * @mbox: mailbox to use for the FW command
6692 * @viid: the VI id
6693 * @free: if true any existing filters for this VI id are first removed
6694 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
6695 * @addr: the MAC address(es)
6696 * @idx: where to store the index of each allocated filter
6697 * @hash: pointer to hash address filter bitmap
6698 * @sleep_ok: call is allowed to sleep
6699 *
6700 * Allocates an exact-match filter for each of the supplied addresses and
6701 * sets it to the corresponding address. If @idx is not %NULL it should
6702 * have at least @naddr entries, each of which will be set to the index of
6703 * the filter allocated for the corresponding MAC address. If a filter
6704 * could not be allocated for an address its index is set to 0xffff.
6705 * If @hash is not %NULL addresses that fail to allocate an exact filter
6706 * are hashed and update the hash filter bitmap pointed at by @hash.
6707 *
6708 * Returns a negative error number or the number of filters allocated.
6709 */
6710int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
6711 unsigned int viid, bool free, unsigned int naddr,
6712 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
6713{
3ccc6cf7 6714 int offset, ret = 0;
56d36be4 6715 struct fw_vi_mac_cmd c;
3ccc6cf7
HS
6716 unsigned int nfilters = 0;
6717 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
6718 unsigned int rem = naddr;
56d36be4 6719
3ccc6cf7 6720 if (naddr > max_naddr)
56d36be4
DM
6721 return -EINVAL;
6722
3ccc6cf7
HS
6723 for (offset = 0; offset < naddr ; /**/) {
6724 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
6725 rem : ARRAY_SIZE(c.u.exact));
6726 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
6727 u.exact[fw_naddr]), 16);
6728 struct fw_vi_mac_exact *p;
6729 int i;
56d36be4 6730
3ccc6cf7
HS
6731 memset(&c, 0, sizeof(c));
6732 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
6733 FW_CMD_REQUEST_F |
6734 FW_CMD_WRITE_F |
6735 FW_CMD_EXEC_V(free) |
6736 FW_VI_MAC_CMD_VIID_V(viid));
6737 c.freemacs_to_len16 =
6738 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
6739 FW_CMD_LEN16_V(len16));
6740
6741 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
6742 p->valid_to_idx =
6743 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
6744 FW_VI_MAC_CMD_IDX_V(
6745 FW_VI_MAC_ADD_MAC));
6746 memcpy(p->macaddr, addr[offset + i],
6747 sizeof(p->macaddr));
6748 }
56d36be4 6749
3ccc6cf7
HS
6750 /* It's okay if we run out of space in our MAC address arena.
6751 * Some of the addresses we submit may get stored so we need
6752 * to run through the reply to see what the results were ...
6753 */
6754 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
6755 if (ret && ret != -FW_ENOMEM)
6756 break;
56d36be4 6757
3ccc6cf7
HS
6758 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
6759 u16 index = FW_VI_MAC_CMD_IDX_G(
6760 be16_to_cpu(p->valid_to_idx));
6761
6762 if (idx)
6763 idx[offset + i] = (index >= max_naddr ?
6764 0xffff : index);
6765 if (index < max_naddr)
6766 nfilters++;
6767 else if (hash)
6768 *hash |= (1ULL <<
6769 hash_mac_addr(addr[offset + i]));
6770 }
56d36be4 6771
3ccc6cf7
HS
6772 free = false;
6773 offset += fw_naddr;
6774 rem -= fw_naddr;
56d36be4 6775 }
3ccc6cf7
HS
6776
6777 if (ret == 0 || ret == -FW_ENOMEM)
6778 ret = nfilters;
56d36be4
DM
6779 return ret;
6780}
6781
fc08a01a
HS
6782/**
6783 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
6784 * @adap: the adapter
6785 * @mbox: mailbox to use for the FW command
6786 * @viid: the VI id
6787 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
6788 * @addr: the MAC address(es)
6789 * @sleep_ok: call is allowed to sleep
6790 *
6791 * Frees the exact-match filter for each of the supplied addresses
6792 *
6793 * Returns a negative error number or the number of filters freed.
6794 */
6795int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
6796 unsigned int viid, unsigned int naddr,
6797 const u8 **addr, bool sleep_ok)
6798{
6799 int offset, ret = 0;
6800 struct fw_vi_mac_cmd c;
6801 unsigned int nfilters = 0;
6802 unsigned int max_naddr = is_t4(adap->params.chip) ?
6803 NUM_MPS_CLS_SRAM_L_INSTANCES :
6804 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
6805 unsigned int rem = naddr;
6806
6807 if (naddr > max_naddr)
6808 return -EINVAL;
6809
6810 for (offset = 0; offset < (int)naddr ; /**/) {
6811 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
6812 ? rem
6813 : ARRAY_SIZE(c.u.exact));
6814 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
6815 u.exact[fw_naddr]), 16);
6816 struct fw_vi_mac_exact *p;
6817 int i;
6818
6819 memset(&c, 0, sizeof(c));
6820 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
6821 FW_CMD_REQUEST_F |
6822 FW_CMD_WRITE_F |
6823 FW_CMD_EXEC_V(0) |
6824 FW_VI_MAC_CMD_VIID_V(viid));
6825 c.freemacs_to_len16 =
6826 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
6827 FW_CMD_LEN16_V(len16));
6828
6829 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
6830 p->valid_to_idx = cpu_to_be16(
6831 FW_VI_MAC_CMD_VALID_F |
6832 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
6833 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
6834 }
6835
6836 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
6837 if (ret)
6838 break;
6839
6840 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
6841 u16 index = FW_VI_MAC_CMD_IDX_G(
6842 be16_to_cpu(p->valid_to_idx));
6843
6844 if (index < max_naddr)
6845 nfilters++;
6846 }
6847
6848 offset += fw_naddr;
6849 rem -= fw_naddr;
6850 }
6851
6852 if (ret == 0)
6853 ret = nfilters;
6854 return ret;
6855}
6856
56d36be4
DM
6857/**
6858 * t4_change_mac - modifies the exact-match filter for a MAC address
6859 * @adap: the adapter
6860 * @mbox: mailbox to use for the FW command
6861 * @viid: the VI id
6862 * @idx: index of existing filter for old value of MAC address, or -1
6863 * @addr: the new MAC address value
6864 * @persist: whether a new MAC allocation should be persistent
6865 * @add_smt: if true also add the address to the HW SMT
6866 *
6867 * Modifies an exact-match filter and sets it to the new MAC address.
6868 * Note that in general it is not possible to modify the value of a given
6869 * filter so the generic way to modify an address filter is to free the one
6870 * being used by the old address value and allocate a new filter for the
6871 * new address value. @idx can be -1 if the address is a new addition.
6872 *
6873 * Returns a negative error number or the index of the filter with the new
6874 * MAC value.
6875 */
6876int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
6877 int idx, const u8 *addr, bool persist, bool add_smt)
6878{
6879 int ret, mode;
6880 struct fw_vi_mac_cmd c;
6881 struct fw_vi_mac_exact *p = c.u.exact;
3ccc6cf7 6882 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
56d36be4
DM
6883
6884 if (idx < 0) /* new allocation */
6885 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
6886 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
6887
6888 memset(&c, 0, sizeof(c));
f404f80c
HS
6889 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
6890 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6891 FW_VI_MAC_CMD_VIID_V(viid));
6892 c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
6893 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
6894 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
6895 FW_VI_MAC_CMD_IDX_V(idx));
56d36be4
DM
6896 memcpy(p->macaddr, addr, sizeof(p->macaddr));
6897
6898 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6899 if (ret == 0) {
f404f80c 6900 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
0a57a536 6901 if (ret >= max_mac_addr)
56d36be4
DM
6902 ret = -ENOMEM;
6903 }
6904 return ret;
6905}
6906
6907/**
6908 * t4_set_addr_hash - program the MAC inexact-match hash filter
6909 * @adap: the adapter
6910 * @mbox: mailbox to use for the FW command
6911 * @viid: the VI id
6912 * @ucast: whether the hash filter should also match unicast addresses
6913 * @vec: the value to be written to the hash filter
6914 * @sleep_ok: call is allowed to sleep
6915 *
6916 * Sets the 64-bit inexact-match hash filter for a virtual interface.
6917 */
6918int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
6919 bool ucast, u64 vec, bool sleep_ok)
6920{
6921 struct fw_vi_mac_cmd c;
6922
6923 memset(&c, 0, sizeof(c));
f404f80c
HS
6924 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
6925 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6926 FW_VI_ENABLE_CMD_VIID_V(viid));
6927 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
6928 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
6929 FW_CMD_LEN16_V(1));
56d36be4
DM
6930 c.u.hash.hashvec = cpu_to_be64(vec);
6931 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
6932}
6933
688848b1
AB
6934/**
6935 * t4_enable_vi_params - enable/disable a virtual interface
6936 * @adap: the adapter
6937 * @mbox: mailbox to use for the FW command
6938 * @viid: the VI id
6939 * @rx_en: 1=enable Rx, 0=disable Rx
6940 * @tx_en: 1=enable Tx, 0=disable Tx
6941 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
6942 *
6943 * Enables/disables a virtual interface. Note that setting DCB Enable
6944 * only makes sense when enabling a Virtual Interface ...
6945 */
6946int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
6947 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
6948{
6949 struct fw_vi_enable_cmd c;
6950
6951 memset(&c, 0, sizeof(c));
f404f80c
HS
6952 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
6953 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
6954 FW_VI_ENABLE_CMD_VIID_V(viid));
6955 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
6956 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
6957 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
6958 FW_LEN16(c));
30f00847 6959 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
688848b1
AB
6960}
6961
56d36be4
DM
6962/**
6963 * t4_enable_vi - enable/disable a virtual interface
6964 * @adap: the adapter
6965 * @mbox: mailbox to use for the FW command
6966 * @viid: the VI id
6967 * @rx_en: 1=enable Rx, 0=disable Rx
6968 * @tx_en: 1=enable Tx, 0=disable Tx
6969 *
6970 * Enables/disables a virtual interface.
6971 */
6972int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
6973 bool rx_en, bool tx_en)
6974{
688848b1 6975 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
56d36be4
DM
6976}
6977
6978/**
6979 * t4_identify_port - identify a VI's port by blinking its LED
6980 * @adap: the adapter
6981 * @mbox: mailbox to use for the FW command
6982 * @viid: the VI id
6983 * @nblinks: how many times to blink LED at 2.5 Hz
6984 *
6985 * Identifies a VI's port by blinking its LED.
6986 */
6987int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
6988 unsigned int nblinks)
6989{
6990 struct fw_vi_enable_cmd c;
6991
0062b15c 6992 memset(&c, 0, sizeof(c));
f404f80c
HS
6993 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
6994 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
6995 FW_VI_ENABLE_CMD_VIID_V(viid));
6996 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
6997 c.blinkdur = cpu_to_be16(nblinks);
56d36be4 6998 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
56d36be4
DM
6999}
7000
ebf4dc2b
HS
7001/**
7002 * t4_iq_stop - stop an ingress queue and its FLs
7003 * @adap: the adapter
7004 * @mbox: mailbox to use for the FW command
7005 * @pf: the PF owning the queues
7006 * @vf: the VF owning the queues
7007 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7008 * @iqid: ingress queue id
7009 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7010 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7011 *
7012 * Stops an ingress queue and its associated FLs, if any. This causes
7013 * any current or future data/messages destined for these queues to be
7014 * tossed.
7015 */
7016int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7017 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7018 unsigned int fl0id, unsigned int fl1id)
7019{
7020 struct fw_iq_cmd c;
7021
7022 memset(&c, 0, sizeof(c));
7023 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7024 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7025 FW_IQ_CMD_VFN_V(vf));
7026 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
7027 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7028 c.iqid = cpu_to_be16(iqid);
7029 c.fl0id = cpu_to_be16(fl0id);
7030 c.fl1id = cpu_to_be16(fl1id);
7031 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7032}
7033
56d36be4
DM
7034/**
7035 * t4_iq_free - free an ingress queue and its FLs
7036 * @adap: the adapter
7037 * @mbox: mailbox to use for the FW command
7038 * @pf: the PF owning the queues
7039 * @vf: the VF owning the queues
7040 * @iqtype: the ingress queue type
7041 * @iqid: ingress queue id
7042 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7043 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7044 *
7045 * Frees an ingress queue and its associated FLs, if any.
7046 */
7047int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7048 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7049 unsigned int fl0id, unsigned int fl1id)
7050{
7051 struct fw_iq_cmd c;
7052
7053 memset(&c, 0, sizeof(c));
f404f80c
HS
7054 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7055 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7056 FW_IQ_CMD_VFN_V(vf));
7057 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
7058 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7059 c.iqid = cpu_to_be16(iqid);
7060 c.fl0id = cpu_to_be16(fl0id);
7061 c.fl1id = cpu_to_be16(fl1id);
56d36be4
DM
7062 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7063}
7064
7065/**
7066 * t4_eth_eq_free - free an Ethernet egress queue
7067 * @adap: the adapter
7068 * @mbox: mailbox to use for the FW command
7069 * @pf: the PF owning the queue
7070 * @vf: the VF owning the queue
7071 * @eqid: egress queue id
7072 *
7073 * Frees an Ethernet egress queue.
7074 */
7075int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7076 unsigned int vf, unsigned int eqid)
7077{
7078 struct fw_eq_eth_cmd c;
7079
7080 memset(&c, 0, sizeof(c));
f404f80c
HS
7081 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
7082 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7083 FW_EQ_ETH_CMD_PFN_V(pf) |
7084 FW_EQ_ETH_CMD_VFN_V(vf));
7085 c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
7086 c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
56d36be4
DM
7087 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7088}
7089
7090/**
7091 * t4_ctrl_eq_free - free a control egress queue
7092 * @adap: the adapter
7093 * @mbox: mailbox to use for the FW command
7094 * @pf: the PF owning the queue
7095 * @vf: the VF owning the queue
7096 * @eqid: egress queue id
7097 *
7098 * Frees a control egress queue.
7099 */
7100int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7101 unsigned int vf, unsigned int eqid)
7102{
7103 struct fw_eq_ctrl_cmd c;
7104
7105 memset(&c, 0, sizeof(c));
f404f80c
HS
7106 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
7107 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7108 FW_EQ_CTRL_CMD_PFN_V(pf) |
7109 FW_EQ_CTRL_CMD_VFN_V(vf));
7110 c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
7111 c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
56d36be4
DM
7112 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7113}
7114
7115/**
7116 * t4_ofld_eq_free - free an offload egress queue
7117 * @adap: the adapter
7118 * @mbox: mailbox to use for the FW command
7119 * @pf: the PF owning the queue
7120 * @vf: the VF owning the queue
7121 * @eqid: egress queue id
7122 *
7123 * Frees a control egress queue.
7124 */
7125int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7126 unsigned int vf, unsigned int eqid)
7127{
7128 struct fw_eq_ofld_cmd c;
7129
7130 memset(&c, 0, sizeof(c));
f404f80c
HS
7131 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
7132 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7133 FW_EQ_OFLD_CMD_PFN_V(pf) |
7134 FW_EQ_OFLD_CMD_VFN_V(vf));
7135 c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
7136 c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
56d36be4
DM
7137 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7138}
7139
ddc7740d
HS
7140/**
7141 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7142 * @adap: the adapter
7143 * @link_down_rc: Link Down Reason Code
7144 *
7145 * Returns a string representation of the Link Down Reason Code.
7146 */
7147static const char *t4_link_down_rc_str(unsigned char link_down_rc)
7148{
7149 static const char * const reason[] = {
7150 "Link Down",
7151 "Remote Fault",
7152 "Auto-negotiation Failure",
7153 "Reserved",
7154 "Insufficient Airflow",
7155 "Unable To Determine Reason",
7156 "No RX Signal Detected",
7157 "Reserved",
7158 };
7159
7160 if (link_down_rc >= ARRAY_SIZE(reason))
7161 return "Bad Reason Code";
7162
7163 return reason[link_down_rc];
7164}
7165
56d36be4 7166/**
23853a0a
HS
7167 * t4_handle_get_port_info - process a FW reply message
7168 * @pi: the port info
56d36be4
DM
7169 * @rpl: start of the FW message
7170 *
23853a0a
HS
7171 * Processes a GET_PORT_INFO FW reply message.
7172 */
7173void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
7174{
7175 const struct fw_port_cmd *p = (const void *)rpl;
7176 struct adapter *adap = pi->adapter;
7177
7178 /* link/module state change message */
7179 int speed = 0, fc = 0;
7180 struct link_config *lc;
7181 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
7182 int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
7183 u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
7184
7185 if (stat & FW_PORT_CMD_RXPAUSE_F)
7186 fc |= PAUSE_RX;
7187 if (stat & FW_PORT_CMD_TXPAUSE_F)
7188 fc |= PAUSE_TX;
7189 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
7190 speed = 100;
7191 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
7192 speed = 1000;
7193 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
7194 speed = 10000;
7195 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
7196 speed = 40000;
7197
7198 lc = &pi->link_cfg;
7199
7200 if (mod != pi->mod_type) {
7201 pi->mod_type = mod;
7202 t4_os_portmod_changed(adap, pi->port_id);
7203 }
7204 if (link_ok != lc->link_ok || speed != lc->speed ||
7205 fc != lc->fc) { /* something changed */
ddc7740d
HS
7206 if (!link_ok && lc->link_ok) {
7207 unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat);
7208
7209 lc->link_down_rc = rc;
7210 dev_warn(adap->pdev_dev,
7211 "Port %d link down, reason: %s\n",
7212 pi->port_id, t4_link_down_rc_str(rc));
7213 }
23853a0a
HS
7214 lc->link_ok = link_ok;
7215 lc->speed = speed;
7216 lc->fc = fc;
7217 lc->supported = be16_to_cpu(p->u.info.pcap);
7218 t4_os_link_changed(adap, pi->port_id, link_ok);
7219 }
7220}
7221
7222/**
7223 * t4_handle_fw_rpl - process a FW reply message
7224 * @adap: the adapter
7225 * @rpl: start of the FW message
7226 *
7227 * Processes a FW message, such as link state change messages.
56d36be4
DM
7228 */
7229int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
7230{
7231 u8 opcode = *(const u8 *)rpl;
7232
23853a0a
HS
7233 /* This might be a port command ... this simplifies the following
7234 * conditionals ... We can get away with pre-dereferencing
7235 * action_to_len16 because it's in the first 16 bytes and all messages
7236 * will be at least that long.
7237 */
7238 const struct fw_port_cmd *p = (const void *)rpl;
7239 unsigned int action =
7240 FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
7241
7242 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
7243 int i;
f404f80c 7244 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
23853a0a
HS
7245 struct port_info *pi = NULL;
7246
7247 for_each_port(adap, i) {
7248 pi = adap2pinfo(adap, i);
7249 if (pi->tx_chan == chan)
7250 break;
56d36be4 7251 }
23853a0a
HS
7252
7253 t4_handle_get_port_info(pi, rpl);
7254 } else {
7255 dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode);
7256 return -EINVAL;
56d36be4
DM
7257 }
7258 return 0;
7259}
7260
1dd06ae8 7261static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
56d36be4
DM
7262{
7263 u16 val;
56d36be4 7264
e5c8ae5f
JL
7265 if (pci_is_pcie(adapter->pdev)) {
7266 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
56d36be4
DM
7267 p->speed = val & PCI_EXP_LNKSTA_CLS;
7268 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
7269 }
7270}
7271
7272/**
7273 * init_link_config - initialize a link's SW state
7274 * @lc: structure holding the link state
7275 * @caps: link capabilities
7276 *
7277 * Initializes the SW state maintained for each link, including the link's
7278 * capabilities and default speed/flow-control/autonegotiation settings.
7279 */
1dd06ae8 7280static void init_link_config(struct link_config *lc, unsigned int caps)
56d36be4
DM
7281{
7282 lc->supported = caps;
7283 lc->requested_speed = 0;
7284 lc->speed = 0;
7285 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
7286 if (lc->supported & FW_PORT_CAP_ANEG) {
7287 lc->advertising = lc->supported & ADVERT_MASK;
7288 lc->autoneg = AUTONEG_ENABLE;
7289 lc->requested_fc |= PAUSE_AUTONEG;
7290 } else {
7291 lc->advertising = 0;
7292 lc->autoneg = AUTONEG_DISABLE;
7293 }
7294}
7295
8203b509
HS
7296#define CIM_PF_NOACCESS 0xeeeeeeee
7297
7298int t4_wait_dev_ready(void __iomem *regs)
56d36be4 7299{
8203b509
HS
7300 u32 whoami;
7301
0d804338 7302 whoami = readl(regs + PL_WHOAMI_A);
8203b509 7303 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
56d36be4 7304 return 0;
8203b509 7305
56d36be4 7306 msleep(500);
0d804338 7307 whoami = readl(regs + PL_WHOAMI_A);
8203b509 7308 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
56d36be4
DM
7309}
7310
fe2ee139
HS
7311struct flash_desc {
7312 u32 vendor_and_model_id;
7313 u32 size_mb;
7314};
7315
91744948 7316static int get_flash_params(struct adapter *adap)
900a6596 7317{
fe2ee139
HS
7318 /* Table for non-Numonix supported flash parts. Numonix parts are left
7319 * to the preexisting code. All flash parts have 64KB sectors.
7320 */
7321 static struct flash_desc supported_flash[] = {
7322 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
7323 };
7324
900a6596
DM
7325 int ret;
7326 u32 info;
7327
7328 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
7329 if (!ret)
7330 ret = sf1_read(adap, 3, 0, 1, &info);
0d804338 7331 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
900a6596
DM
7332 if (ret)
7333 return ret;
7334
fe2ee139
HS
7335 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
7336 if (supported_flash[ret].vendor_and_model_id == info) {
7337 adap->params.sf_size = supported_flash[ret].size_mb;
7338 adap->params.sf_nsec =
7339 adap->params.sf_size / SF_SEC_SIZE;
7340 return 0;
7341 }
7342
900a6596
DM
7343 if ((info & 0xff) != 0x20) /* not a Numonix flash */
7344 return -EINVAL;
7345 info >>= 16; /* log2 of size */
7346 if (info >= 0x14 && info < 0x18)
7347 adap->params.sf_nsec = 1 << (info - 16);
7348 else if (info == 0x18)
7349 adap->params.sf_nsec = 64;
7350 else
7351 return -EINVAL;
7352 adap->params.sf_size = 1 << info;
7353 adap->params.sf_fw_start =
89c3a86c 7354 t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
c290607e
HS
7355
7356 if (adap->params.sf_size < FLASH_MIN_SIZE)
7357 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
7358 adap->params.sf_size, FLASH_MIN_SIZE);
900a6596
DM
7359 return 0;
7360}
7361
eca0f6ee
HS
7362static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
7363{
7364 u16 val;
7365 u32 pcie_cap;
7366
7367 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
7368 if (pcie_cap) {
7369 pci_read_config_word(adapter->pdev,
7370 pcie_cap + PCI_EXP_DEVCTL2, &val);
7371 val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
7372 val |= range;
7373 pci_write_config_word(adapter->pdev,
7374 pcie_cap + PCI_EXP_DEVCTL2, val);
7375 }
7376}
7377
56d36be4
DM
7378/**
7379 * t4_prep_adapter - prepare SW and HW for operation
7380 * @adapter: the adapter
7381 * @reset: if true perform a HW reset
7382 *
7383 * Initialize adapter SW state for the various HW modules, set initial
7384 * values for some adapter tunables, take PHYs out of reset, and
7385 * initialize the MDIO interface.
7386 */
91744948 7387int t4_prep_adapter(struct adapter *adapter)
56d36be4 7388{
0a57a536
SR
7389 int ret, ver;
7390 uint16_t device_id;
d14807dd 7391 u32 pl_rev;
56d36be4 7392
56d36be4 7393 get_pci_mode(adapter, &adapter->params.pci);
0d804338 7394 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
56d36be4 7395
900a6596
DM
7396 ret = get_flash_params(adapter);
7397 if (ret < 0) {
7398 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
7399 return ret;
7400 }
7401
0a57a536
SR
7402 /* Retrieve adapter's device ID
7403 */
7404 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
7405 ver = device_id >> 12;
d14807dd 7406 adapter->params.chip = 0;
0a57a536
SR
7407 switch (ver) {
7408 case CHELSIO_T4:
d14807dd 7409 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
3ccc6cf7
HS
7410 adapter->params.arch.sge_fl_db = DBPRIO_F;
7411 adapter->params.arch.mps_tcam_size =
7412 NUM_MPS_CLS_SRAM_L_INSTANCES;
7413 adapter->params.arch.mps_rplc_size = 128;
7414 adapter->params.arch.nchan = NCHAN;
44588560 7415 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
3ccc6cf7 7416 adapter->params.arch.vfcount = 128;
2216d014
HS
7417 /* Congestion map is for 4 channels so that
7418 * MPS can have 4 priority per port.
7419 */
7420 adapter->params.arch.cng_ch_bits_log = 2;
0a57a536
SR
7421 break;
7422 case CHELSIO_T5:
d14807dd 7423 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
3ccc6cf7
HS
7424 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
7425 adapter->params.arch.mps_tcam_size =
7426 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
7427 adapter->params.arch.mps_rplc_size = 128;
7428 adapter->params.arch.nchan = NCHAN;
44588560 7429 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
3ccc6cf7 7430 adapter->params.arch.vfcount = 128;
2216d014 7431 adapter->params.arch.cng_ch_bits_log = 2;
3ccc6cf7
HS
7432 break;
7433 case CHELSIO_T6:
7434 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
7435 adapter->params.arch.sge_fl_db = 0;
7436 adapter->params.arch.mps_tcam_size =
7437 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
7438 adapter->params.arch.mps_rplc_size = 256;
7439 adapter->params.arch.nchan = 2;
44588560 7440 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
3ccc6cf7 7441 adapter->params.arch.vfcount = 256;
2216d014
HS
7442 /* Congestion map will be for 2 channels so that
7443 * MPS can have 8 priority per port.
7444 */
7445 adapter->params.arch.cng_ch_bits_log = 3;
0a57a536
SR
7446 break;
7447 default:
7448 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
7449 device_id);
7450 return -EINVAL;
7451 }
7452
f1ff24aa 7453 adapter->params.cim_la_size = CIMLA_SIZE;
56d36be4
DM
7454 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
7455
7456 /*
7457 * Default port for debugging in case we can't reach FW.
7458 */
7459 adapter->params.nports = 1;
7460 adapter->params.portvec = 1;
636f9d37 7461 adapter->params.vpd.cclk = 50000;
eca0f6ee
HS
7462
7463 /* Set pci completion timeout value to 4 seconds. */
7464 set_pcie_completion_timeout(adapter, 0xd);
56d36be4
DM
7465 return 0;
7466}
7467
e85c9a7a 7468/**
b2612722 7469 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
e85c9a7a
HS
7470 * @adapter: the adapter
7471 * @qid: the Queue ID
7472 * @qtype: the Ingress or Egress type for @qid
66cf188e 7473 * @user: true if this request is for a user mode queue
e85c9a7a
HS
7474 * @pbar2_qoffset: BAR2 Queue Offset
7475 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
7476 *
7477 * Returns the BAR2 SGE Queue Registers information associated with the
7478 * indicated Absolute Queue ID. These are passed back in return value
7479 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
7480 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
7481 *
7482 * This may return an error which indicates that BAR2 SGE Queue
7483 * registers aren't available. If an error is not returned, then the
7484 * following values are returned:
7485 *
7486 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
7487 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
7488 *
7489 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
7490 * require the "Inferred Queue ID" ability may be used. E.g. the
7491 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
7492 * then these "Inferred Queue ID" register may not be used.
7493 */
b2612722 7494int t4_bar2_sge_qregs(struct adapter *adapter,
e85c9a7a
HS
7495 unsigned int qid,
7496 enum t4_bar2_qtype qtype,
66cf188e 7497 int user,
e85c9a7a
HS
7498 u64 *pbar2_qoffset,
7499 unsigned int *pbar2_qid)
7500{
7501 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
7502 u64 bar2_page_offset, bar2_qoffset;
7503 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
7504
66cf188e
H
7505 /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
7506 if (!user && is_t4(adapter->params.chip))
e85c9a7a
HS
7507 return -EINVAL;
7508
7509 /* Get our SGE Page Size parameters.
7510 */
7511 page_shift = adapter->params.sge.hps + 10;
7512 page_size = 1 << page_shift;
7513
7514 /* Get the right Queues per Page parameters for our Queue.
7515 */
7516 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
7517 ? adapter->params.sge.eq_qpp
7518 : adapter->params.sge.iq_qpp);
7519 qpp_mask = (1 << qpp_shift) - 1;
7520
7521 /* Calculate the basics of the BAR2 SGE Queue register area:
7522 * o The BAR2 page the Queue registers will be in.
7523 * o The BAR2 Queue ID.
7524 * o The BAR2 Queue ID Offset into the BAR2 page.
7525 */
513d1a1d 7526 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
e85c9a7a
HS
7527 bar2_qid = qid & qpp_mask;
7528 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
7529
7530 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
7531 * hardware will infer the Absolute Queue ID simply from the writes to
7532 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
7533 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
7534 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
7535 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
7536 * from the BAR2 Page and BAR2 Queue ID.
7537 *
7538 * One important censequence of this is that some BAR2 SGE registers
7539 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
7540 * there. But other registers synthesize the SGE Queue ID purely
7541 * from the writes to the registers -- the Write Combined Doorbell
7542 * Buffer is a good example. These BAR2 SGE Registers are only
7543 * available for those BAR2 SGE Register areas where the SGE Absolute
7544 * Queue ID can be inferred from simple writes.
7545 */
7546 bar2_qoffset = bar2_page_offset;
7547 bar2_qinferred = (bar2_qid_offset < page_size);
7548 if (bar2_qinferred) {
7549 bar2_qoffset += bar2_qid_offset;
7550 bar2_qid = 0;
7551 }
7552
7553 *pbar2_qoffset = bar2_qoffset;
7554 *pbar2_qid = bar2_qid;
7555 return 0;
7556}
7557
ae469b68
HS
7558/**
7559 * t4_init_devlog_params - initialize adapter->params.devlog
7560 * @adap: the adapter
7561 *
7562 * Initialize various fields of the adapter's Firmware Device Log
7563 * Parameters structure.
7564 */
7565int t4_init_devlog_params(struct adapter *adap)
7566{
7567 struct devlog_params *dparams = &adap->params.devlog;
7568 u32 pf_dparams;
7569 unsigned int devlog_meminfo;
7570 struct fw_devlog_cmd devlog_cmd;
7571 int ret;
7572
7573 /* If we're dealing with newer firmware, the Device Log Paramerters
7574 * are stored in a designated register which allows us to access the
7575 * Device Log even if we can't talk to the firmware.
7576 */
7577 pf_dparams =
7578 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
7579 if (pf_dparams) {
7580 unsigned int nentries, nentries128;
7581
7582 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
7583 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
7584
7585 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
7586 nentries = (nentries128 + 1) * 128;
7587 dparams->size = nentries * sizeof(struct fw_devlog_e);
7588
7589 return 0;
7590 }
7591
7592 /* Otherwise, ask the firmware for it's Device Log Parameters.
7593 */
7594 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
f404f80c
HS
7595 devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
7596 FW_CMD_REQUEST_F | FW_CMD_READ_F);
7597 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
ae469b68
HS
7598 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
7599 &devlog_cmd);
7600 if (ret)
7601 return ret;
7602
f404f80c
HS
7603 devlog_meminfo =
7604 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
ae469b68
HS
7605 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
7606 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
f404f80c 7607 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
ae469b68
HS
7608
7609 return 0;
7610}
7611
e85c9a7a
HS
7612/**
7613 * t4_init_sge_params - initialize adap->params.sge
7614 * @adapter: the adapter
7615 *
7616 * Initialize various fields of the adapter's SGE Parameters structure.
7617 */
7618int t4_init_sge_params(struct adapter *adapter)
7619{
7620 struct sge_params *sge_params = &adapter->params.sge;
7621 u32 hps, qpp;
7622 unsigned int s_hps, s_qpp;
7623
7624 /* Extract the SGE Page Size for our PF.
7625 */
f612b815 7626 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
e85c9a7a 7627 s_hps = (HOSTPAGESIZEPF0_S +
b2612722 7628 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
e85c9a7a
HS
7629 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
7630
7631 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
7632 */
7633 s_qpp = (QUEUESPERPAGEPF0_S +
b2612722 7634 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
f612b815
HS
7635 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
7636 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
f061de42 7637 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
f612b815 7638 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
e85c9a7a
HS
7639
7640 return 0;
7641}
7642
dcf7b6f5
KS
7643/**
7644 * t4_init_tp_params - initialize adap->params.tp
7645 * @adap: the adapter
7646 *
7647 * Initialize various fields of the adapter's TP Parameters structure.
7648 */
7649int t4_init_tp_params(struct adapter *adap)
7650{
7651 int chan;
7652 u32 v;
7653
837e4a42
HS
7654 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
7655 adap->params.tp.tre = TIMERRESOLUTION_G(v);
7656 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
dcf7b6f5
KS
7657
7658 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
7659 for (chan = 0; chan < NCHAN; chan++)
7660 adap->params.tp.tx_modq[chan] = chan;
7661
7662 /* Cache the adapter's Compressed Filter Mode and global Incress
7663 * Configuration.
7664 */
0b2c2a93 7665 if (t4_use_ldst(adap)) {
c1e9af0c
HS
7666 t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
7667 TP_VLAN_PRI_MAP_A, 1);
7668 t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
7669 TP_INGRESS_CONFIG_A, 1);
7670 } else {
7671 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
7672 &adap->params.tp.vlan_pri_map, 1,
7673 TP_VLAN_PRI_MAP_A);
7674 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
7675 &adap->params.tp.ingress_config, 1,
7676 TP_INGRESS_CONFIG_A);
7677 }
dcf7b6f5
KS
7678
7679 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
7680 * shift positions of several elements of the Compressed Filter Tuple
7681 * for this adapter which we need frequently ...
7682 */
0d804338
HS
7683 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
7684 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
7685 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
dcf7b6f5 7686 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
0d804338 7687 PROTOCOL_F);
dcf7b6f5
KS
7688
7689 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
dbedd44e 7690 * represents the presence of an Outer VLAN instead of a VNIC ID.
dcf7b6f5 7691 */
0d804338 7692 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
dcf7b6f5
KS
7693 adap->params.tp.vnic_shift = -1;
7694
7695 return 0;
7696}
7697
7698/**
7699 * t4_filter_field_shift - calculate filter field shift
7700 * @adap: the adapter
7701 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
7702 *
7703 * Return the shift position of a filter field within the Compressed
7704 * Filter Tuple. The filter field is specified via its selection bit
7705 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
7706 */
7707int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
7708{
7709 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
7710 unsigned int sel;
7711 int field_shift;
7712
7713 if ((filter_mode & filter_sel) == 0)
7714 return -1;
7715
7716 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
7717 switch (filter_mode & sel) {
0d804338
HS
7718 case FCOE_F:
7719 field_shift += FT_FCOE_W;
dcf7b6f5 7720 break;
0d804338
HS
7721 case PORT_F:
7722 field_shift += FT_PORT_W;
dcf7b6f5 7723 break;
0d804338
HS
7724 case VNIC_ID_F:
7725 field_shift += FT_VNIC_ID_W;
dcf7b6f5 7726 break;
0d804338
HS
7727 case VLAN_F:
7728 field_shift += FT_VLAN_W;
dcf7b6f5 7729 break;
0d804338
HS
7730 case TOS_F:
7731 field_shift += FT_TOS_W;
dcf7b6f5 7732 break;
0d804338
HS
7733 case PROTOCOL_F:
7734 field_shift += FT_PROTOCOL_W;
dcf7b6f5 7735 break;
0d804338
HS
7736 case ETHERTYPE_F:
7737 field_shift += FT_ETHERTYPE_W;
dcf7b6f5 7738 break;
0d804338
HS
7739 case MACMATCH_F:
7740 field_shift += FT_MACMATCH_W;
dcf7b6f5 7741 break;
0d804338
HS
7742 case MPSHITTYPE_F:
7743 field_shift += FT_MPSHITTYPE_W;
dcf7b6f5 7744 break;
0d804338
HS
7745 case FRAGMENTATION_F:
7746 field_shift += FT_FRAGMENTATION_W;
dcf7b6f5
KS
7747 break;
7748 }
7749 }
7750 return field_shift;
7751}
7752
c035e183
HS
7753int t4_init_rss_mode(struct adapter *adap, int mbox)
7754{
7755 int i, ret;
7756 struct fw_rss_vi_config_cmd rvc;
7757
7758 memset(&rvc, 0, sizeof(rvc));
7759
7760 for_each_port(adap, i) {
7761 struct port_info *p = adap2pinfo(adap, i);
7762
f404f80c
HS
7763 rvc.op_to_viid =
7764 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
7765 FW_CMD_REQUEST_F | FW_CMD_READ_F |
7766 FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
7767 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
c035e183
HS
7768 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
7769 if (ret)
7770 return ret;
f404f80c 7771 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
c035e183
HS
7772 }
7773 return 0;
7774}
7775
c3e324e3
HS
7776/**
7777 * t4_init_portinfo - allocate a virtual interface amd initialize port_info
7778 * @pi: the port_info
7779 * @mbox: mailbox to use for the FW command
7780 * @port: physical port associated with the VI
7781 * @pf: the PF owning the VI
7782 * @vf: the VF owning the VI
7783 * @mac: the MAC address of the VI
7784 *
7785 * Allocates a virtual interface for the given physical port. If @mac is
7786 * not %NULL it contains the MAC address of the VI as assigned by FW.
7787 * @mac should be large enough to hold an Ethernet address.
7788 * Returns < 0 on error.
7789 */
7790int t4_init_portinfo(struct port_info *pi, int mbox,
7791 int port, int pf, int vf, u8 mac[])
56d36be4 7792{
c3e324e3 7793 int ret;
56d36be4 7794 struct fw_port_cmd c;
c3e324e3 7795 unsigned int rss_size;
56d36be4
DM
7796
7797 memset(&c, 0, sizeof(c));
c3e324e3
HS
7798 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
7799 FW_CMD_REQUEST_F | FW_CMD_READ_F |
7800 FW_PORT_CMD_PORTID_V(port));
7801 c.action_to_len16 = cpu_to_be32(
7802 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
7803 FW_LEN16(c));
7804 ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c);
7805 if (ret)
7806 return ret;
7807
7808 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
7809 if (ret < 0)
7810 return ret;
7811
7812 pi->viid = ret;
7813 pi->tx_chan = port;
7814 pi->lport = port;
7815 pi->rss_size = rss_size;
7816
7817 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
7818 pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
7819 FW_PORT_CMD_MDIOADDR_G(ret) : -1;
7820 pi->port_type = FW_PORT_CMD_PTYPE_G(ret);
7821 pi->mod_type = FW_PORT_MOD_TYPE_NA;
7822
7823 init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap));
7824 return 0;
7825}
7826
7827int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
7828{
7829 u8 addr[6];
7830 int ret, i, j = 0;
56d36be4
DM
7831
7832 for_each_port(adap, i) {
c3e324e3 7833 struct port_info *pi = adap2pinfo(adap, i);
56d36be4
DM
7834
7835 while ((adap->params.portvec & (1 << j)) == 0)
7836 j++;
7837
c3e324e3 7838 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
56d36be4
DM
7839 if (ret)
7840 return ret;
7841
56d36be4 7842 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
40c9f8ab 7843 adap->port[i]->dev_port = j;
56d36be4
DM
7844 j++;
7845 }
7846 return 0;
7847}
f1ff24aa 7848
74b3092c
HS
7849/**
7850 * t4_read_cimq_cfg - read CIM queue configuration
7851 * @adap: the adapter
7852 * @base: holds the queue base addresses in bytes
7853 * @size: holds the queue sizes in bytes
7854 * @thres: holds the queue full thresholds in bytes
7855 *
7856 * Returns the current configuration of the CIM queues, starting with
7857 * the IBQs, then the OBQs.
7858 */
7859void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
7860{
7861 unsigned int i, v;
7862 int cim_num_obq = is_t4(adap->params.chip) ?
7863 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
7864
7865 for (i = 0; i < CIM_NUM_IBQ; i++) {
7866 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
7867 QUENUMSELECT_V(i));
7868 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
7869 /* value is in 256-byte units */
7870 *base++ = CIMQBASE_G(v) * 256;
7871 *size++ = CIMQSIZE_G(v) * 256;
7872 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
7873 }
7874 for (i = 0; i < cim_num_obq; i++) {
7875 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
7876 QUENUMSELECT_V(i));
7877 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
7878 /* value is in 256-byte units */
7879 *base++ = CIMQBASE_G(v) * 256;
7880 *size++ = CIMQSIZE_G(v) * 256;
7881 }
7882}
7883
e5f0e43b
HS
7884/**
7885 * t4_read_cim_ibq - read the contents of a CIM inbound queue
7886 * @adap: the adapter
7887 * @qid: the queue index
7888 * @data: where to store the queue contents
7889 * @n: capacity of @data in 32-bit words
7890 *
7891 * Reads the contents of the selected CIM queue starting at address 0 up
7892 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
7893 * error and the number of 32-bit words actually read on success.
7894 */
7895int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
7896{
7897 int i, err, attempts;
7898 unsigned int addr;
7899 const unsigned int nwords = CIM_IBQ_SIZE * 4;
7900
7901 if (qid > 5 || (n & 3))
7902 return -EINVAL;
7903
7904 addr = qid * nwords;
7905 if (n > nwords)
7906 n = nwords;
7907
7908 /* It might take 3-10ms before the IBQ debug read access is allowed.
7909 * Wait for 1 Sec with a delay of 1 usec.
7910 */
7911 attempts = 1000000;
7912
7913 for (i = 0; i < n; i++, addr++) {
7914 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
7915 IBQDBGEN_F);
7916 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
7917 attempts, 1);
7918 if (err)
7919 return err;
7920 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
7921 }
7922 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
c778af7d
HS
7923 return i;
7924}
7925
7926/**
7927 * t4_read_cim_obq - read the contents of a CIM outbound queue
7928 * @adap: the adapter
7929 * @qid: the queue index
7930 * @data: where to store the queue contents
7931 * @n: capacity of @data in 32-bit words
7932 *
7933 * Reads the contents of the selected CIM queue starting at address 0 up
7934 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
7935 * error and the number of 32-bit words actually read on success.
7936 */
7937int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
7938{
7939 int i, err;
7940 unsigned int addr, v, nwords;
7941 int cim_num_obq = is_t4(adap->params.chip) ?
7942 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
7943
7944 if ((qid > (cim_num_obq - 1)) || (n & 3))
7945 return -EINVAL;
7946
7947 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
7948 QUENUMSELECT_V(qid));
7949 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
7950
7951 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
7952 nwords = CIMQSIZE_G(v) * 64; /* same */
7953 if (n > nwords)
7954 n = nwords;
7955
7956 for (i = 0; i < n; i++, addr++) {
7957 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
7958 OBQDBGEN_F);
7959 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
7960 2, 1);
7961 if (err)
7962 return err;
7963 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
7964 }
7965 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
e5f0e43b
HS
7966 return i;
7967}
7968
f1ff24aa
HS
7969/**
7970 * t4_cim_read - read a block from CIM internal address space
7971 * @adap: the adapter
7972 * @addr: the start address within the CIM address space
7973 * @n: number of words to read
7974 * @valp: where to store the result
7975 *
7976 * Reads a block of 4-byte words from the CIM intenal address space.
7977 */
7978int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
7979 unsigned int *valp)
7980{
7981 int ret = 0;
7982
7983 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
7984 return -EBUSY;
7985
7986 for ( ; !ret && n--; addr += 4) {
7987 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
7988 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
7989 0, 5, 2);
7990 if (!ret)
7991 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
7992 }
7993 return ret;
7994}
7995
7996/**
7997 * t4_cim_write - write a block into CIM internal address space
7998 * @adap: the adapter
7999 * @addr: the start address within the CIM address space
8000 * @n: number of words to write
8001 * @valp: set of values to write
8002 *
8003 * Writes a block of 4-byte words into the CIM intenal address space.
8004 */
8005int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
8006 const unsigned int *valp)
8007{
8008 int ret = 0;
8009
8010 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
8011 return -EBUSY;
8012
8013 for ( ; !ret && n--; addr += 4) {
8014 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
8015 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
8016 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
8017 0, 5, 2);
8018 }
8019 return ret;
8020}
8021
8022static int t4_cim_write1(struct adapter *adap, unsigned int addr,
8023 unsigned int val)
8024{
8025 return t4_cim_write(adap, addr, 1, &val);
8026}
8027
8028/**
8029 * t4_cim_read_la - read CIM LA capture buffer
8030 * @adap: the adapter
8031 * @la_buf: where to store the LA data
8032 * @wrptr: the HW write pointer within the capture buffer
8033 *
8034 * Reads the contents of the CIM LA buffer with the most recent entry at
8035 * the end of the returned data and with the entry at @wrptr first.
8036 * We try to leave the LA in the running state we find it in.
8037 */
8038int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
8039{
8040 int i, ret;
8041 unsigned int cfg, val, idx;
8042
8043 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
8044 if (ret)
8045 return ret;
8046
8047 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
8048 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
8049 if (ret)
8050 return ret;
8051 }
8052
8053 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
8054 if (ret)
8055 goto restart;
8056
8057 idx = UPDBGLAWRPTR_G(val);
8058 if (wrptr)
8059 *wrptr = idx;
8060
8061 for (i = 0; i < adap->params.cim_la_size; i++) {
8062 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
8063 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
8064 if (ret)
8065 break;
8066 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
8067 if (ret)
8068 break;
8069 if (val & UPDBGLARDEN_F) {
8070 ret = -ETIMEDOUT;
8071 break;
8072 }
8073 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
8074 if (ret)
8075 break;
8076 idx = (idx + 1) & UPDBGLARDPTR_M;
8077 }
8078restart:
8079 if (cfg & UPDBGLAEN_F) {
8080 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
8081 cfg & ~UPDBGLARDEN_F);
8082 if (!ret)
8083 ret = r;
8084 }
8085 return ret;
8086}
2d277b3b
HS
8087
8088/**
8089 * t4_tp_read_la - read TP LA capture buffer
8090 * @adap: the adapter
8091 * @la_buf: where to store the LA data
8092 * @wrptr: the HW write pointer within the capture buffer
8093 *
8094 * Reads the contents of the TP LA buffer with the most recent entry at
8095 * the end of the returned data and with the entry at @wrptr first.
8096 * We leave the LA in the running state we find it in.
8097 */
8098void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
8099{
8100 bool last_incomplete;
8101 unsigned int i, cfg, val, idx;
8102
8103 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
8104 if (cfg & DBGLAENABLE_F) /* freeze LA */
8105 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
8106 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
8107
8108 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
8109 idx = DBGLAWPTR_G(val);
8110 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
8111 if (last_incomplete)
8112 idx = (idx + 1) & DBGLARPTR_M;
8113 if (wrptr)
8114 *wrptr = idx;
8115
8116 val &= 0xffff;
8117 val &= ~DBGLARPTR_V(DBGLARPTR_M);
8118 val |= adap->params.tp.la_mask;
8119
8120 for (i = 0; i < TPLA_SIZE; i++) {
8121 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
8122 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
8123 idx = (idx + 1) & DBGLARPTR_M;
8124 }
8125
8126 /* Wipe out last entry if it isn't valid */
8127 if (last_incomplete)
8128 la_buf[TPLA_SIZE - 1] = ~0ULL;
8129
8130 if (cfg & DBGLAENABLE_F) /* restore running state */
8131 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
8132 cfg | adap->params.tp.la_mask);
8133}
a3bfb617
HS
8134
8135/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
8136 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
8137 * state for more than the Warning Threshold then we'll issue a warning about
8138 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
8139 * appears to be hung every Warning Repeat second till the situation clears.
8140 * If the situation clears, we'll note that as well.
8141 */
8142#define SGE_IDMA_WARN_THRESH 1
8143#define SGE_IDMA_WARN_REPEAT 300
8144
8145/**
8146 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
8147 * @adapter: the adapter
8148 * @idma: the adapter IDMA Monitor state
8149 *
8150 * Initialize the state of an SGE Ingress DMA Monitor.
8151 */
8152void t4_idma_monitor_init(struct adapter *adapter,
8153 struct sge_idma_monitor_state *idma)
8154{
8155 /* Initialize the state variables for detecting an SGE Ingress DMA
8156 * hang. The SGE has internal counters which count up on each clock
8157 * tick whenever the SGE finds its Ingress DMA State Engines in the
8158 * same state they were on the previous clock tick. The clock used is
8159 * the Core Clock so we have a limit on the maximum "time" they can
8160 * record; typically a very small number of seconds. For instance,
8161 * with a 600MHz Core Clock, we can only count up to a bit more than
8162 * 7s. So we'll synthesize a larger counter in order to not run the
8163 * risk of having the "timers" overflow and give us the flexibility to
8164 * maintain a Hung SGE State Machine of our own which operates across
8165 * a longer time frame.
8166 */
8167 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
8168 idma->idma_stalled[0] = 0;
8169 idma->idma_stalled[1] = 0;
8170}
8171
8172/**
8173 * t4_idma_monitor - monitor SGE Ingress DMA state
8174 * @adapter: the adapter
8175 * @idma: the adapter IDMA Monitor state
8176 * @hz: number of ticks/second
8177 * @ticks: number of ticks since the last IDMA Monitor call
8178 */
8179void t4_idma_monitor(struct adapter *adapter,
8180 struct sge_idma_monitor_state *idma,
8181 int hz, int ticks)
8182{
8183 int i, idma_same_state_cnt[2];
8184
8185 /* Read the SGE Debug Ingress DMA Same State Count registers. These
8186 * are counters inside the SGE which count up on each clock when the
8187 * SGE finds its Ingress DMA State Engines in the same states they
8188 * were in the previous clock. The counters will peg out at
8189 * 0xffffffff without wrapping around so once they pass the 1s
8190 * threshold they'll stay above that till the IDMA state changes.
8191 */
8192 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
8193 idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
8194 idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
8195
8196 for (i = 0; i < 2; i++) {
8197 u32 debug0, debug11;
8198
8199 /* If the Ingress DMA Same State Counter ("timer") is less
8200 * than 1s, then we can reset our synthesized Stall Timer and
8201 * continue. If we have previously emitted warnings about a
8202 * potential stalled Ingress Queue, issue a note indicating
8203 * that the Ingress Queue has resumed forward progress.
8204 */
8205 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
8206 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
8207 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
8208 "resumed after %d seconds\n",
8209 i, idma->idma_qid[i],
8210 idma->idma_stalled[i] / hz);
8211 idma->idma_stalled[i] = 0;
8212 continue;
8213 }
8214
8215 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
8216 * domain. The first time we get here it'll be because we
8217 * passed the 1s Threshold; each additional time it'll be
8218 * because the RX Timer Callback is being fired on its regular
8219 * schedule.
8220 *
8221 * If the stall is below our Potential Hung Ingress Queue
8222 * Warning Threshold, continue.
8223 */
8224 if (idma->idma_stalled[i] == 0) {
8225 idma->idma_stalled[i] = hz;
8226 idma->idma_warn[i] = 0;
8227 } else {
8228 idma->idma_stalled[i] += ticks;
8229 idma->idma_warn[i] -= ticks;
8230 }
8231
8232 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
8233 continue;
8234
8235 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
8236 */
8237 if (idma->idma_warn[i] > 0)
8238 continue;
8239 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
8240
8241 /* Read and save the SGE IDMA State and Queue ID information.
8242 * We do this every time in case it changes across time ...
8243 * can't be too careful ...
8244 */
8245 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
8246 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
8247 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
8248
8249 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
8250 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
8251 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
8252
8253 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
8254 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
8255 i, idma->idma_qid[i], idma->idma_state[i],
8256 idma->idma_stalled[i] / hz,
8257 debug0, debug11);
8258 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
8259 }
8260}