Merge remote-tracking branches 'asoc/topic/wm8753', 'asoc/topic/wm8770', 'asoc/topic...
[linux-block.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
b72a32da 4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
56d36be4
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
56d36be4
DM
35#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
f612b815 38#include "t4_values.h"
56d36be4 39#include "t4fw_api.h"
a69265e9 40#include "t4fw_version.h"
56d36be4
DM
41
42/**
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
51 *
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
56 */
de498c89
RD
57static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
59{
60 while (1) {
61 u32 val = t4_read_reg(adapter, reg);
62
63 if (!!(val & mask) == polarity) {
64 if (valp)
65 *valp = val;
66 return 0;
67 }
68 if (--attempts == 0)
69 return -EAGAIN;
70 if (delay)
71 udelay(delay);
72 }
73}
74
75static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
77{
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79 delay, NULL);
80}
81
82/**
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
88 *
89 * Sets a register field specified by the supplied mask to the
90 * given value.
91 */
92void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93 u32 val)
94{
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
96
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
99}
100
101/**
102 * t4_read_indirect - read indirectly addressed registers
103 * @adap: the adapter
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
109 *
110 * Reads registers that are accessed indirectly through an address/data
111 * register pair.
112 */
f2b7e78d 113void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
de498c89
RD
114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
116{
117 while (nregs--) {
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
120 start_idx++;
121 }
122}
123
13ee15d3
VP
124/**
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
132 *
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
135 */
136void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
139{
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
143 }
144}
145
0abfd152
HS
146/*
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
151 */
152void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153{
3ccc6cf7
HS
154 u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
155
156 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
157 req |= ENABLE_F;
158 else
159 req |= T6_ENABLE_F;
0abfd152
HS
160
161 if (is_t4(adap->params.chip))
f061de42 162 req |= LOCALCFG_F;
0abfd152 163
f061de42
HS
164 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
0abfd152
HS
166
167 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168 * Configuration Space read. (None of the other fields matter when
169 * ENABLE is 0 so a simple register write is easier than a
170 * read-modify-write via t4_set_reg_field().)
171 */
f061de42 172 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
0abfd152
HS
173}
174
31d55c2d
HS
175/*
176 * t4_report_fw_error - report firmware error
177 * @adap: the adapter
178 *
179 * The adapter firmware can indicate error conditions to the host.
180 * If the firmware has indicated an error, print out the reason for
181 * the firmware error.
182 */
183static void t4_report_fw_error(struct adapter *adap)
184{
185 static const char *const reason[] = {
186 "Crash", /* PCIE_FW_EVAL_CRASH */
187 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
188 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
189 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
190 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
192 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193 "Reserved", /* reserved */
194 };
195 u32 pcie_fw;
196
f061de42 197 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
825b2b6f 198 if (pcie_fw & PCIE_FW_ERR_F) {
31d55c2d 199 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
b2e1a3f0 200 reason[PCIE_FW_EVAL_G(pcie_fw)]);
825b2b6f
RL
201 adap->flags &= ~FW_OK;
202 }
31d55c2d
HS
203}
204
56d36be4
DM
205/*
206 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
207 */
208static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
209 u32 mbox_addr)
210{
211 for ( ; nflit; nflit--, mbox_addr += 8)
212 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
213}
214
215/*
216 * Handle a FW assertion reported in a mailbox.
217 */
218static void fw_asrt(struct adapter *adap, u32 mbox_addr)
219{
220 struct fw_debug_cmd asrt;
221
222 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
223 dev_alert(adap->pdev_dev,
224 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
f404f80c
HS
225 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
226 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
56d36be4
DM
227}
228
7f080c3f
HS
229/**
230 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
231 * @adapter: the adapter
232 * @cmd: the Firmware Mailbox Command or Reply
233 * @size: command length in bytes
234 * @access: the time (ms) needed to access the Firmware Mailbox
235 * @execute: the time (ms) the command spent being executed
236 */
237static void t4_record_mbox(struct adapter *adapter,
238 const __be64 *cmd, unsigned int size,
239 int access, int execute)
56d36be4 240{
7f080c3f
HS
241 struct mbox_cmd_log *log = adapter->mbox_log;
242 struct mbox_cmd *entry;
243 int i;
244
245 entry = mbox_cmd_log_entry(log, log->cursor++);
246 if (log->cursor == log->size)
247 log->cursor = 0;
248
249 for (i = 0; i < size / 8; i++)
250 entry->cmd[i] = be64_to_cpu(cmd[i]);
251 while (i < MBOX_LEN / 8)
252 entry->cmd[i++] = 0;
253 entry->timestamp = jiffies;
254 entry->seqno = log->seqno++;
255 entry->access = access;
256 entry->execute = execute;
56d36be4
DM
257}
258
259/**
01b69614 260 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
56d36be4
DM
261 * @adap: the adapter
262 * @mbox: index of the mailbox to use
263 * @cmd: the command to write
264 * @size: command length in bytes
265 * @rpl: where to optionally store the reply
266 * @sleep_ok: if true we may sleep while awaiting command completion
01b69614 267 * @timeout: time to wait for command to finish before timing out
56d36be4
DM
268 *
269 * Sends the given command to FW through the selected mailbox and waits
270 * for the FW to execute the command. If @rpl is not %NULL it is used to
271 * store the FW's reply to the command. The command and its optional
272 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
273 * to respond. @sleep_ok determines whether we may sleep while awaiting
274 * the response. If sleeping is allowed we use progressive backoff
275 * otherwise we spin.
276 *
277 * The return value is 0 on success or a negative errno on failure. A
278 * failure can happen either because we are not able to execute the
279 * command or FW executes it but signals an error. In the latter case
280 * the return value is the error code indicated by FW (negated).
281 */
01b69614
HS
282int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
283 int size, void *rpl, bool sleep_ok, int timeout)
56d36be4 284{
005b5717 285 static const int delay[] = {
56d36be4
DM
286 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
287 };
288
4055ae5e 289 struct mbox_list entry;
7f080c3f
HS
290 u16 access = 0;
291 u16 execute = 0;
56d36be4
DM
292 u32 v;
293 u64 res;
7f080c3f 294 int i, ms, delay_idx, ret;
56d36be4 295 const __be64 *p = cmd;
89c3a86c
HS
296 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
297 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
7f080c3f 298 __be64 cmd_rpl[MBOX_LEN / 8];
f358738b 299 u32 pcie_fw;
56d36be4
DM
300
301 if ((size & 15) || size > MBOX_LEN)
302 return -EINVAL;
303
204dc3c0
DM
304 /*
305 * If the device is off-line, as in EEH, commands will time out.
306 * Fail them early so we don't waste time waiting.
307 */
308 if (adap->pdev->error_state != pci_channel_io_normal)
309 return -EIO;
310
5a20f5cf
HS
311 /* If we have a negative timeout, that implies that we can't sleep. */
312 if (timeout < 0) {
313 sleep_ok = false;
314 timeout = -timeout;
315 }
316
4055ae5e
HS
317 /* Queue ourselves onto the mailbox access list. When our entry is at
318 * the front of the list, we have rights to access the mailbox. So we
319 * wait [for a while] till we're at the front [or bail out with an
320 * EBUSY] ...
321 */
d9ac2d99 322 spin_lock_bh(&adap->mbox_lock);
4055ae5e 323 list_add_tail(&entry.list, &adap->mlist.list);
d9ac2d99 324 spin_unlock_bh(&adap->mbox_lock);
4055ae5e
HS
325
326 delay_idx = 0;
327 ms = delay[0];
328
329 for (i = 0; ; i += ms) {
330 /* If we've waited too long, return a busy indication. This
331 * really ought to be based on our initial position in the
332 * mailbox access list but this is a start. We very rearely
333 * contend on access to the mailbox ...
334 */
3be0679b
HS
335 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
336 if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
d9ac2d99 337 spin_lock_bh(&adap->mbox_lock);
4055ae5e 338 list_del(&entry.list);
d9ac2d99 339 spin_unlock_bh(&adap->mbox_lock);
3be0679b 340 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
4055ae5e
HS
341 t4_record_mbox(adap, cmd, size, access, ret);
342 return ret;
343 }
344
345 /* If we're at the head, break out and start the mailbox
346 * protocol.
347 */
348 if (list_first_entry(&adap->mlist.list, struct mbox_list,
349 list) == &entry)
350 break;
351
352 /* Delay for a bit before checking again ... */
353 if (sleep_ok) {
354 ms = delay[delay_idx]; /* last element may repeat */
355 if (delay_idx < ARRAY_SIZE(delay) - 1)
356 delay_idx++;
357 msleep(ms);
358 } else {
359 mdelay(ms);
360 }
361 }
362
363 /* Loop trying to get ownership of the mailbox. Return an error
364 * if we can't gain ownership.
365 */
89c3a86c 366 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
56d36be4 367 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
89c3a86c 368 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
7f080c3f 369 if (v != MBOX_OWNER_DRV) {
d9ac2d99 370 spin_lock_bh(&adap->mbox_lock);
4055ae5e 371 list_del(&entry.list);
d9ac2d99 372 spin_unlock_bh(&adap->mbox_lock);
7f080c3f 373 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
0f308686 374 t4_record_mbox(adap, cmd, size, access, ret);
7f080c3f
HS
375 return ret;
376 }
56d36be4 377
7f080c3f 378 /* Copy in the new mailbox command and send it on its way ... */
0f308686 379 t4_record_mbox(adap, cmd, size, access, 0);
56d36be4
DM
380 for (i = 0; i < size; i += 8)
381 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
382
89c3a86c 383 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
56d36be4
DM
384 t4_read_reg(adap, ctl_reg); /* flush write */
385
386 delay_idx = 0;
387 ms = delay[0];
388
f358738b
HS
389 for (i = 0;
390 !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
391 i < timeout;
392 i += ms) {
56d36be4
DM
393 if (sleep_ok) {
394 ms = delay[delay_idx]; /* last element may repeat */
395 if (delay_idx < ARRAY_SIZE(delay) - 1)
396 delay_idx++;
397 msleep(ms);
398 } else
399 mdelay(ms);
400
401 v = t4_read_reg(adap, ctl_reg);
89c3a86c
HS
402 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
403 if (!(v & MBMSGVALID_F)) {
56d36be4
DM
404 t4_write_reg(adap, ctl_reg, 0);
405 continue;
406 }
407
7f080c3f
HS
408 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
409 res = be64_to_cpu(cmd_rpl[0]);
410
e2ac9628 411 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
56d36be4 412 fw_asrt(adap, data_reg);
e2ac9628
HS
413 res = FW_CMD_RETVAL_V(EIO);
414 } else if (rpl) {
7f080c3f 415 memcpy(rpl, cmd_rpl, size);
e2ac9628 416 }
56d36be4 417
56d36be4 418 t4_write_reg(adap, ctl_reg, 0);
7f080c3f
HS
419
420 execute = i + ms;
421 t4_record_mbox(adap, cmd_rpl,
422 MBOX_LEN, access, execute);
d9ac2d99 423 spin_lock_bh(&adap->mbox_lock);
4055ae5e 424 list_del(&entry.list);
d9ac2d99 425 spin_unlock_bh(&adap->mbox_lock);
e2ac9628 426 return -FW_CMD_RETVAL_G((int)res);
56d36be4
DM
427 }
428 }
429
f358738b 430 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
0f308686 431 t4_record_mbox(adap, cmd, size, access, ret);
56d36be4
DM
432 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
433 *(const u8 *)cmd, mbox);
31d55c2d 434 t4_report_fw_error(adap);
d9ac2d99 435 spin_lock_bh(&adap->mbox_lock);
4055ae5e 436 list_del(&entry.list);
d9ac2d99 437 spin_unlock_bh(&adap->mbox_lock);
3be0679b 438 t4_fatal_err(adap);
7f080c3f 439 return ret;
56d36be4
DM
440}
441
01b69614
HS
442int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
443 void *rpl, bool sleep_ok)
56d36be4 444{
01b69614
HS
445 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
446 FW_CMD_MAX_TIMEOUT);
56d36be4
DM
447}
448
bf8ebb67
HS
449static int t4_edc_err_read(struct adapter *adap, int idx)
450{
451 u32 edc_ecc_err_addr_reg;
452 u32 rdata_reg;
453
454 if (is_t4(adap->params.chip)) {
455 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
456 return 0;
457 }
458 if (idx != 0 && idx != 1) {
459 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
460 return 0;
461 }
462
463 edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
464 rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
465
466 CH_WARN(adap,
467 "edc%d err addr 0x%x: 0x%x.\n",
468 idx, edc_ecc_err_addr_reg,
469 t4_read_reg(adap, edc_ecc_err_addr_reg));
470 CH_WARN(adap,
471 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
472 rdata_reg,
473 (unsigned long long)t4_read_reg64(adap, rdata_reg),
474 (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
475 (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
476 (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
477 (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
478 (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
479 (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
480 (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
481 (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
482
483 return 0;
484}
485
5afc8b84
VP
486/**
487 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
488 * @adap: the adapter
fc5ab020 489 * @win: PCI-E Memory Window to use
5afc8b84
VP
490 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
491 * @addr: address within indicated memory type
492 * @len: amount of memory to transfer
f01aa633 493 * @hbuf: host memory buffer
fc5ab020 494 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5afc8b84
VP
495 *
496 * Reads/writes an [almost] arbitrary memory region in the firmware: the
fc5ab020
HS
497 * firmware memory address and host buffer must be aligned on 32-bit
498 * boudaries; the length may be arbitrary. The memory is transferred as
499 * a raw byte sequence from/to the firmware's memory. If this memory
500 * contains data structures which contain multi-byte integers, it's the
501 * caller's responsibility to perform appropriate byte order conversions.
5afc8b84 502 */
fc5ab020 503int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
f01aa633 504 u32 len, void *hbuf, int dir)
5afc8b84 505{
fc5ab020
HS
506 u32 pos, offset, resid, memoffset;
507 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
f01aa633 508 u32 *buf;
5afc8b84 509
fc5ab020 510 /* Argument sanity checks ...
5afc8b84 511 */
f01aa633 512 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
5afc8b84 513 return -EINVAL;
f01aa633 514 buf = (u32 *)hbuf;
5afc8b84 515
fc5ab020
HS
516 /* It's convenient to be able to handle lengths which aren't a
517 * multiple of 32-bits because we often end up transferring files to
518 * the firmware. So we'll handle that by normalizing the length here
519 * and then handling any residual transfer at the end.
520 */
521 resid = len & 0x3;
522 len -= resid;
8c357ebd 523
19dd37ba 524 /* Offset into the region of memory which is being accessed
5afc8b84
VP
525 * MEM_EDC0 = 0
526 * MEM_EDC1 = 1
3ccc6cf7
HS
527 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
528 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
4db0401f 529 * MEM_HMA = 4
5afc8b84 530 */
6559a7e8 531 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
4db0401f
RL
532 if (mtype == MEM_HMA) {
533 memoffset = 2 * (edc_size * 1024 * 1024);
534 } else if (mtype != MEM_MC1) {
19dd37ba 535 memoffset = (mtype * (edc_size * 1024 * 1024));
4db0401f 536 } else {
6559a7e8 537 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
7f0b8a56 538 MA_EXT_MEMORY0_BAR_A));
19dd37ba
SR
539 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
540 }
5afc8b84
VP
541
542 /* Determine the PCIE_MEM_ACCESS_OFFSET */
543 addr = addr + memoffset;
544
fc5ab020
HS
545 /* Each PCI-E Memory Window is programmed with a window size -- or
546 * "aperture" -- which controls the granularity of its mapping onto
547 * adapter memory. We need to grab that aperture in order to know
548 * how to use the specified window. The window is also programmed
549 * with the base address of the Memory Window in BAR0's address
550 * space. For T4 this is an absolute PCI-E Bus Address. For T5
551 * the address is relative to BAR0.
5afc8b84 552 */
fc5ab020 553 mem_reg = t4_read_reg(adap,
f061de42 554 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
fc5ab020 555 win));
f061de42
HS
556 mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
557 mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
fc5ab020
HS
558 if (is_t4(adap->params.chip))
559 mem_base -= adap->t4_bar0;
b2612722 560 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
5afc8b84 561
fc5ab020
HS
562 /* Calculate our initial PCI-E Memory Window Position and Offset into
563 * that Window.
564 */
565 pos = addr & ~(mem_aperture-1);
566 offset = addr - pos;
5afc8b84 567
fc5ab020
HS
568 /* Set up initial PCI-E Memory Window to cover the start of our
569 * transfer. (Read it back to ensure that changes propagate before we
570 * attempt to use the new value.)
571 */
572 t4_write_reg(adap,
f061de42 573 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
fc5ab020
HS
574 pos | win_pf);
575 t4_read_reg(adap,
f061de42 576 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
fc5ab020
HS
577
578 /* Transfer data to/from the adapter as long as there's an integral
579 * number of 32-bit transfers to complete.
f01aa633
HS
580 *
581 * A note on Endianness issues:
582 *
583 * The "register" reads and writes below from/to the PCI-E Memory
584 * Window invoke the standard adapter Big-Endian to PCI-E Link
585 * Little-Endian "swizzel." As a result, if we have the following
586 * data in adapter memory:
587 *
588 * Memory: ... | b0 | b1 | b2 | b3 | ...
589 * Address: i+0 i+1 i+2 i+3
590 *
591 * Then a read of the adapter memory via the PCI-E Memory Window
592 * will yield:
593 *
594 * x = readl(i)
595 * 31 0
596 * [ b3 | b2 | b1 | b0 ]
597 *
598 * If this value is stored into local memory on a Little-Endian system
599 * it will show up correctly in local memory as:
600 *
601 * ( ..., b0, b1, b2, b3, ... )
602 *
603 * But on a Big-Endian system, the store will show up in memory
604 * incorrectly swizzled as:
605 *
606 * ( ..., b3, b2, b1, b0, ... )
607 *
608 * So we need to account for this in the reads and writes to the
609 * PCI-E Memory Window below by undoing the register read/write
610 * swizzels.
fc5ab020
HS
611 */
612 while (len > 0) {
613 if (dir == T4_MEMORY_READ)
f01aa633
HS
614 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
615 mem_base + offset));
fc5ab020
HS
616 else
617 t4_write_reg(adap, mem_base + offset,
f01aa633 618 (__force u32)cpu_to_le32(*buf++));
fc5ab020
HS
619 offset += sizeof(__be32);
620 len -= sizeof(__be32);
621
622 /* If we've reached the end of our current window aperture,
623 * move the PCI-E Memory Window on to the next. Note that
624 * doing this here after "len" may be 0 allows us to set up
625 * the PCI-E Memory Window for a possible final residual
626 * transfer below ...
5afc8b84 627 */
fc5ab020
HS
628 if (offset == mem_aperture) {
629 pos += mem_aperture;
630 offset = 0;
631 t4_write_reg(adap,
f061de42
HS
632 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
633 win), pos | win_pf);
fc5ab020 634 t4_read_reg(adap,
f061de42
HS
635 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
636 win));
5afc8b84 637 }
5afc8b84
VP
638 }
639
fc5ab020
HS
640 /* If the original transfer had a length which wasn't a multiple of
641 * 32-bits, now's where we need to finish off the transfer of the
642 * residual amount. The PCI-E Memory Window has already been moved
643 * above (if necessary) to cover this final transfer.
644 */
645 if (resid) {
646 union {
f01aa633 647 u32 word;
fc5ab020
HS
648 char byte[4];
649 } last;
650 unsigned char *bp;
651 int i;
652
c81576c2 653 if (dir == T4_MEMORY_READ) {
f01aa633
HS
654 last.word = le32_to_cpu(
655 (__force __le32)t4_read_reg(adap,
656 mem_base + offset));
fc5ab020
HS
657 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
658 bp[i] = last.byte[i];
659 } else {
660 last.word = *buf;
661 for (i = resid; i < 4; i++)
662 last.byte[i] = 0;
663 t4_write_reg(adap, mem_base + offset,
f01aa633 664 (__force u32)cpu_to_le32(last.word));
fc5ab020
HS
665 }
666 }
5afc8b84 667
fc5ab020 668 return 0;
5afc8b84
VP
669}
670
b562fc37
HS
671/* Return the specified PCI-E Configuration Space register from our Physical
672 * Function. We try first via a Firmware LDST Command since we prefer to let
673 * the firmware own all of these registers, but if that fails we go for it
674 * directly ourselves.
675 */
676u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
677{
678 u32 val, ldst_addrspace;
679
680 /* If fw_attach != 0, construct and send the Firmware LDST Command to
681 * retrieve the specified PCI-E Configuration Space register.
682 */
683 struct fw_ldst_cmd ldst_cmd;
684 int ret;
685
686 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
687 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
688 ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
689 FW_CMD_REQUEST_F |
690 FW_CMD_READ_F |
691 ldst_addrspace);
692 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
693 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
694 ldst_cmd.u.pcie.ctrl_to_fn =
b2612722 695 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
b562fc37
HS
696 ldst_cmd.u.pcie.r = reg;
697
698 /* If the LDST Command succeeds, return the result, otherwise
699 * fall through to reading it directly ourselves ...
700 */
701 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
702 &ldst_cmd);
703 if (ret == 0)
704 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
705 else
706 /* Read the desired Configuration Space register via the PCI-E
707 * Backdoor mechanism.
708 */
709 t4_hw_pci_read_cfg4(adap, reg, &val);
710 return val;
711}
712
713/* Get the window based on base passed to it.
714 * Window aperture is currently unhandled, but there is no use case for it
715 * right now
716 */
717static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
718 u32 memwin_base)
719{
720 u32 ret;
721
722 if (is_t4(adap->params.chip)) {
723 u32 bar0;
724
725 /* Truncation intentional: we only read the bottom 32-bits of
726 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
727 * mechanism to read BAR0 instead of using
728 * pci_resource_start() because we could be operating from
729 * within a Virtual Machine which is trapping our accesses to
730 * our Configuration Space and we need to set up the PCI-E
731 * Memory Window decoders with the actual addresses which will
732 * be coming across the PCI-E link.
733 */
734 bar0 = t4_read_pcie_cfg4(adap, pci_base);
735 bar0 &= pci_mask;
736 adap->t4_bar0 = bar0;
737
738 ret = bar0 + memwin_base;
739 } else {
740 /* For T5, only relative offset inside the PCIe BAR is passed */
741 ret = memwin_base;
742 }
743 return ret;
744}
745
746/* Get the default utility window (win0) used by everyone */
747u32 t4_get_util_window(struct adapter *adap)
748{
749 return t4_get_window(adap, PCI_BASE_ADDRESS_0,
750 PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
751}
752
753/* Set up memory window for accessing adapter memory ranges. (Read
754 * back MA register to ensure that changes propagate before we attempt
755 * to use the new values.)
756 */
757void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
758{
759 t4_write_reg(adap,
760 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
761 memwin_base | BIR_V(0) |
762 WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
763 t4_read_reg(adap,
764 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
765}
766
812034f1
HS
767/**
768 * t4_get_regs_len - return the size of the chips register set
769 * @adapter: the adapter
770 *
771 * Returns the size of the chip's BAR0 register space.
772 */
773unsigned int t4_get_regs_len(struct adapter *adapter)
774{
775 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
776
777 switch (chip_version) {
778 case CHELSIO_T4:
779 return T4_REGMAP_SIZE;
780
781 case CHELSIO_T5:
ab4b583b 782 case CHELSIO_T6:
812034f1
HS
783 return T5_REGMAP_SIZE;
784 }
785
786 dev_err(adapter->pdev_dev,
787 "Unsupported chip version %d\n", chip_version);
788 return 0;
789}
790
791/**
792 * t4_get_regs - read chip registers into provided buffer
793 * @adap: the adapter
794 * @buf: register buffer
795 * @buf_size: size (in bytes) of register buffer
796 *
797 * If the provided register buffer isn't large enough for the chip's
798 * full register range, the register dump will be truncated to the
799 * register buffer's size.
800 */
801void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
802{
803 static const unsigned int t4_reg_ranges[] = {
804 0x1008, 0x1108,
8119c018
HS
805 0x1180, 0x1184,
806 0x1190, 0x1194,
807 0x11a0, 0x11a4,
808 0x11b0, 0x11b4,
812034f1
HS
809 0x11fc, 0x123c,
810 0x1300, 0x173c,
811 0x1800, 0x18fc,
8119c018
HS
812 0x3000, 0x30d8,
813 0x30e0, 0x30e4,
814 0x30ec, 0x5910,
815 0x5920, 0x5924,
816 0x5960, 0x5960,
817 0x5968, 0x5968,
818 0x5970, 0x5970,
819 0x5978, 0x5978,
820 0x5980, 0x5980,
821 0x5988, 0x5988,
822 0x5990, 0x5990,
823 0x5998, 0x5998,
824 0x59a0, 0x59d4,
825 0x5a00, 0x5ae0,
826 0x5ae8, 0x5ae8,
827 0x5af0, 0x5af0,
828 0x5af8, 0x5af8,
812034f1
HS
829 0x6000, 0x6098,
830 0x6100, 0x6150,
831 0x6200, 0x6208,
832 0x6240, 0x6248,
8119c018
HS
833 0x6280, 0x62b0,
834 0x62c0, 0x6338,
812034f1
HS
835 0x6370, 0x638c,
836 0x6400, 0x643c,
837 0x6500, 0x6524,
8119c018
HS
838 0x6a00, 0x6a04,
839 0x6a14, 0x6a38,
840 0x6a60, 0x6a70,
841 0x6a78, 0x6a78,
842 0x6b00, 0x6b0c,
843 0x6b1c, 0x6b84,
844 0x6bf0, 0x6bf8,
845 0x6c00, 0x6c0c,
846 0x6c1c, 0x6c84,
847 0x6cf0, 0x6cf8,
848 0x6d00, 0x6d0c,
849 0x6d1c, 0x6d84,
850 0x6df0, 0x6df8,
851 0x6e00, 0x6e0c,
852 0x6e1c, 0x6e84,
853 0x6ef0, 0x6ef8,
854 0x6f00, 0x6f0c,
855 0x6f1c, 0x6f84,
856 0x6ff0, 0x6ff8,
857 0x7000, 0x700c,
858 0x701c, 0x7084,
859 0x70f0, 0x70f8,
860 0x7100, 0x710c,
861 0x711c, 0x7184,
862 0x71f0, 0x71f8,
863 0x7200, 0x720c,
864 0x721c, 0x7284,
865 0x72f0, 0x72f8,
866 0x7300, 0x730c,
867 0x731c, 0x7384,
868 0x73f0, 0x73f8,
869 0x7400, 0x7450,
812034f1 870 0x7500, 0x7530,
8119c018
HS
871 0x7600, 0x760c,
872 0x7614, 0x761c,
812034f1
HS
873 0x7680, 0x76cc,
874 0x7700, 0x7798,
875 0x77c0, 0x77fc,
876 0x7900, 0x79fc,
8119c018
HS
877 0x7b00, 0x7b58,
878 0x7b60, 0x7b84,
879 0x7b8c, 0x7c38,
880 0x7d00, 0x7d38,
881 0x7d40, 0x7d80,
882 0x7d8c, 0x7ddc,
883 0x7de4, 0x7e04,
884 0x7e10, 0x7e1c,
885 0x7e24, 0x7e38,
886 0x7e40, 0x7e44,
887 0x7e4c, 0x7e78,
888 0x7e80, 0x7ea4,
889 0x7eac, 0x7edc,
890 0x7ee8, 0x7efc,
891 0x8dc0, 0x8e04,
892 0x8e10, 0x8e1c,
812034f1 893 0x8e30, 0x8e78,
8119c018
HS
894 0x8ea0, 0x8eb8,
895 0x8ec0, 0x8f6c,
896 0x8fc0, 0x9008,
897 0x9010, 0x9058,
898 0x9060, 0x9060,
899 0x9068, 0x9074,
812034f1 900 0x90fc, 0x90fc,
8119c018
HS
901 0x9400, 0x9408,
902 0x9410, 0x9458,
903 0x9600, 0x9600,
904 0x9608, 0x9638,
905 0x9640, 0x96bc,
812034f1
HS
906 0x9800, 0x9808,
907 0x9820, 0x983c,
908 0x9850, 0x9864,
909 0x9c00, 0x9c6c,
910 0x9c80, 0x9cec,
911 0x9d00, 0x9d6c,
912 0x9d80, 0x9dec,
913 0x9e00, 0x9e6c,
914 0x9e80, 0x9eec,
915 0x9f00, 0x9f6c,
916 0x9f80, 0x9fec,
8119c018
HS
917 0xd004, 0xd004,
918 0xd010, 0xd03c,
812034f1
HS
919 0xdfc0, 0xdfe0,
920 0xe000, 0xea7c,
04d8980b
AV
921 0xf000, 0x11110,
922 0x11118, 0x11190,
812034f1
HS
923 0x19040, 0x1906c,
924 0x19078, 0x19080,
8119c018
HS
925 0x1908c, 0x190e4,
926 0x190f0, 0x190f8,
927 0x19100, 0x19110,
928 0x19120, 0x19124,
929 0x19150, 0x19194,
930 0x1919c, 0x191b0,
812034f1
HS
931 0x191d0, 0x191e8,
932 0x19238, 0x1924c,
8119c018
HS
933 0x193f8, 0x1943c,
934 0x1944c, 0x19474,
935 0x19490, 0x194e0,
936 0x194f0, 0x194f8,
937 0x19800, 0x19c08,
938 0x19c10, 0x19c90,
939 0x19ca0, 0x19ce4,
940 0x19cf0, 0x19d40,
941 0x19d50, 0x19d94,
942 0x19da0, 0x19de8,
943 0x19df0, 0x19e40,
944 0x19e50, 0x19e90,
945 0x19ea0, 0x19f4c,
946 0x1a000, 0x1a004,
947 0x1a010, 0x1a06c,
948 0x1a0b0, 0x1a0e4,
949 0x1a0ec, 0x1a0f4,
950 0x1a100, 0x1a108,
951 0x1a114, 0x1a120,
952 0x1a128, 0x1a130,
953 0x1a138, 0x1a138,
812034f1
HS
954 0x1a190, 0x1a1c4,
955 0x1a1fc, 0x1a1fc,
956 0x1e040, 0x1e04c,
957 0x1e284, 0x1e28c,
958 0x1e2c0, 0x1e2c0,
959 0x1e2e0, 0x1e2e0,
960 0x1e300, 0x1e384,
961 0x1e3c0, 0x1e3c8,
962 0x1e440, 0x1e44c,
963 0x1e684, 0x1e68c,
964 0x1e6c0, 0x1e6c0,
965 0x1e6e0, 0x1e6e0,
966 0x1e700, 0x1e784,
967 0x1e7c0, 0x1e7c8,
968 0x1e840, 0x1e84c,
969 0x1ea84, 0x1ea8c,
970 0x1eac0, 0x1eac0,
971 0x1eae0, 0x1eae0,
972 0x1eb00, 0x1eb84,
973 0x1ebc0, 0x1ebc8,
974 0x1ec40, 0x1ec4c,
975 0x1ee84, 0x1ee8c,
976 0x1eec0, 0x1eec0,
977 0x1eee0, 0x1eee0,
978 0x1ef00, 0x1ef84,
979 0x1efc0, 0x1efc8,
980 0x1f040, 0x1f04c,
981 0x1f284, 0x1f28c,
982 0x1f2c0, 0x1f2c0,
983 0x1f2e0, 0x1f2e0,
984 0x1f300, 0x1f384,
985 0x1f3c0, 0x1f3c8,
986 0x1f440, 0x1f44c,
987 0x1f684, 0x1f68c,
988 0x1f6c0, 0x1f6c0,
989 0x1f6e0, 0x1f6e0,
990 0x1f700, 0x1f784,
991 0x1f7c0, 0x1f7c8,
992 0x1f840, 0x1f84c,
993 0x1fa84, 0x1fa8c,
994 0x1fac0, 0x1fac0,
995 0x1fae0, 0x1fae0,
996 0x1fb00, 0x1fb84,
997 0x1fbc0, 0x1fbc8,
998 0x1fc40, 0x1fc4c,
999 0x1fe84, 0x1fe8c,
1000 0x1fec0, 0x1fec0,
1001 0x1fee0, 0x1fee0,
1002 0x1ff00, 0x1ff84,
1003 0x1ffc0, 0x1ffc8,
1004 0x20000, 0x2002c,
1005 0x20100, 0x2013c,
8119c018
HS
1006 0x20190, 0x201a0,
1007 0x201a8, 0x201b8,
1008 0x201c4, 0x201c8,
812034f1 1009 0x20200, 0x20318,
8119c018
HS
1010 0x20400, 0x204b4,
1011 0x204c0, 0x20528,
812034f1
HS
1012 0x20540, 0x20614,
1013 0x21000, 0x21040,
1014 0x2104c, 0x21060,
1015 0x210c0, 0x210ec,
1016 0x21200, 0x21268,
1017 0x21270, 0x21284,
1018 0x212fc, 0x21388,
1019 0x21400, 0x21404,
8119c018
HS
1020 0x21500, 0x21500,
1021 0x21510, 0x21518,
1022 0x2152c, 0x21530,
1023 0x2153c, 0x2153c,
812034f1
HS
1024 0x21550, 0x21554,
1025 0x21600, 0x21600,
8119c018
HS
1026 0x21608, 0x2161c,
1027 0x21624, 0x21628,
1028 0x21630, 0x21634,
1029 0x2163c, 0x2163c,
812034f1
HS
1030 0x21700, 0x2171c,
1031 0x21780, 0x2178c,
8119c018
HS
1032 0x21800, 0x21818,
1033 0x21820, 0x21828,
1034 0x21830, 0x21848,
1035 0x21850, 0x21854,
1036 0x21860, 0x21868,
1037 0x21870, 0x21870,
1038 0x21878, 0x21898,
1039 0x218a0, 0x218a8,
1040 0x218b0, 0x218c8,
1041 0x218d0, 0x218d4,
1042 0x218e0, 0x218e8,
1043 0x218f0, 0x218f0,
1044 0x218f8, 0x21a18,
1045 0x21a20, 0x21a28,
1046 0x21a30, 0x21a48,
1047 0x21a50, 0x21a54,
1048 0x21a60, 0x21a68,
1049 0x21a70, 0x21a70,
1050 0x21a78, 0x21a98,
1051 0x21aa0, 0x21aa8,
1052 0x21ab0, 0x21ac8,
1053 0x21ad0, 0x21ad4,
1054 0x21ae0, 0x21ae8,
1055 0x21af0, 0x21af0,
1056 0x21af8, 0x21c18,
1057 0x21c20, 0x21c20,
1058 0x21c28, 0x21c30,
1059 0x21c38, 0x21c38,
1060 0x21c80, 0x21c98,
1061 0x21ca0, 0x21ca8,
1062 0x21cb0, 0x21cc8,
1063 0x21cd0, 0x21cd4,
1064 0x21ce0, 0x21ce8,
1065 0x21cf0, 0x21cf0,
1066 0x21cf8, 0x21d7c,
812034f1
HS
1067 0x21e00, 0x21e04,
1068 0x22000, 0x2202c,
1069 0x22100, 0x2213c,
8119c018
HS
1070 0x22190, 0x221a0,
1071 0x221a8, 0x221b8,
1072 0x221c4, 0x221c8,
812034f1 1073 0x22200, 0x22318,
8119c018
HS
1074 0x22400, 0x224b4,
1075 0x224c0, 0x22528,
812034f1
HS
1076 0x22540, 0x22614,
1077 0x23000, 0x23040,
1078 0x2304c, 0x23060,
1079 0x230c0, 0x230ec,
1080 0x23200, 0x23268,
1081 0x23270, 0x23284,
1082 0x232fc, 0x23388,
1083 0x23400, 0x23404,
8119c018
HS
1084 0x23500, 0x23500,
1085 0x23510, 0x23518,
1086 0x2352c, 0x23530,
1087 0x2353c, 0x2353c,
812034f1
HS
1088 0x23550, 0x23554,
1089 0x23600, 0x23600,
8119c018
HS
1090 0x23608, 0x2361c,
1091 0x23624, 0x23628,
1092 0x23630, 0x23634,
1093 0x2363c, 0x2363c,
812034f1
HS
1094 0x23700, 0x2371c,
1095 0x23780, 0x2378c,
8119c018
HS
1096 0x23800, 0x23818,
1097 0x23820, 0x23828,
1098 0x23830, 0x23848,
1099 0x23850, 0x23854,
1100 0x23860, 0x23868,
1101 0x23870, 0x23870,
1102 0x23878, 0x23898,
1103 0x238a0, 0x238a8,
1104 0x238b0, 0x238c8,
1105 0x238d0, 0x238d4,
1106 0x238e0, 0x238e8,
1107 0x238f0, 0x238f0,
1108 0x238f8, 0x23a18,
1109 0x23a20, 0x23a28,
1110 0x23a30, 0x23a48,
1111 0x23a50, 0x23a54,
1112 0x23a60, 0x23a68,
1113 0x23a70, 0x23a70,
1114 0x23a78, 0x23a98,
1115 0x23aa0, 0x23aa8,
1116 0x23ab0, 0x23ac8,
1117 0x23ad0, 0x23ad4,
1118 0x23ae0, 0x23ae8,
1119 0x23af0, 0x23af0,
1120 0x23af8, 0x23c18,
1121 0x23c20, 0x23c20,
1122 0x23c28, 0x23c30,
1123 0x23c38, 0x23c38,
1124 0x23c80, 0x23c98,
1125 0x23ca0, 0x23ca8,
1126 0x23cb0, 0x23cc8,
1127 0x23cd0, 0x23cd4,
1128 0x23ce0, 0x23ce8,
1129 0x23cf0, 0x23cf0,
1130 0x23cf8, 0x23d7c,
812034f1
HS
1131 0x23e00, 0x23e04,
1132 0x24000, 0x2402c,
1133 0x24100, 0x2413c,
8119c018
HS
1134 0x24190, 0x241a0,
1135 0x241a8, 0x241b8,
1136 0x241c4, 0x241c8,
812034f1 1137 0x24200, 0x24318,
8119c018
HS
1138 0x24400, 0x244b4,
1139 0x244c0, 0x24528,
812034f1
HS
1140 0x24540, 0x24614,
1141 0x25000, 0x25040,
1142 0x2504c, 0x25060,
1143 0x250c0, 0x250ec,
1144 0x25200, 0x25268,
1145 0x25270, 0x25284,
1146 0x252fc, 0x25388,
1147 0x25400, 0x25404,
8119c018
HS
1148 0x25500, 0x25500,
1149 0x25510, 0x25518,
1150 0x2552c, 0x25530,
1151 0x2553c, 0x2553c,
812034f1
HS
1152 0x25550, 0x25554,
1153 0x25600, 0x25600,
8119c018
HS
1154 0x25608, 0x2561c,
1155 0x25624, 0x25628,
1156 0x25630, 0x25634,
1157 0x2563c, 0x2563c,
812034f1
HS
1158 0x25700, 0x2571c,
1159 0x25780, 0x2578c,
8119c018
HS
1160 0x25800, 0x25818,
1161 0x25820, 0x25828,
1162 0x25830, 0x25848,
1163 0x25850, 0x25854,
1164 0x25860, 0x25868,
1165 0x25870, 0x25870,
1166 0x25878, 0x25898,
1167 0x258a0, 0x258a8,
1168 0x258b0, 0x258c8,
1169 0x258d0, 0x258d4,
1170 0x258e0, 0x258e8,
1171 0x258f0, 0x258f0,
1172 0x258f8, 0x25a18,
1173 0x25a20, 0x25a28,
1174 0x25a30, 0x25a48,
1175 0x25a50, 0x25a54,
1176 0x25a60, 0x25a68,
1177 0x25a70, 0x25a70,
1178 0x25a78, 0x25a98,
1179 0x25aa0, 0x25aa8,
1180 0x25ab0, 0x25ac8,
1181 0x25ad0, 0x25ad4,
1182 0x25ae0, 0x25ae8,
1183 0x25af0, 0x25af0,
1184 0x25af8, 0x25c18,
1185 0x25c20, 0x25c20,
1186 0x25c28, 0x25c30,
1187 0x25c38, 0x25c38,
1188 0x25c80, 0x25c98,
1189 0x25ca0, 0x25ca8,
1190 0x25cb0, 0x25cc8,
1191 0x25cd0, 0x25cd4,
1192 0x25ce0, 0x25ce8,
1193 0x25cf0, 0x25cf0,
1194 0x25cf8, 0x25d7c,
812034f1
HS
1195 0x25e00, 0x25e04,
1196 0x26000, 0x2602c,
1197 0x26100, 0x2613c,
8119c018
HS
1198 0x26190, 0x261a0,
1199 0x261a8, 0x261b8,
1200 0x261c4, 0x261c8,
812034f1 1201 0x26200, 0x26318,
8119c018
HS
1202 0x26400, 0x264b4,
1203 0x264c0, 0x26528,
812034f1
HS
1204 0x26540, 0x26614,
1205 0x27000, 0x27040,
1206 0x2704c, 0x27060,
1207 0x270c0, 0x270ec,
1208 0x27200, 0x27268,
1209 0x27270, 0x27284,
1210 0x272fc, 0x27388,
1211 0x27400, 0x27404,
8119c018
HS
1212 0x27500, 0x27500,
1213 0x27510, 0x27518,
1214 0x2752c, 0x27530,
1215 0x2753c, 0x2753c,
812034f1
HS
1216 0x27550, 0x27554,
1217 0x27600, 0x27600,
8119c018
HS
1218 0x27608, 0x2761c,
1219 0x27624, 0x27628,
1220 0x27630, 0x27634,
1221 0x2763c, 0x2763c,
812034f1
HS
1222 0x27700, 0x2771c,
1223 0x27780, 0x2778c,
8119c018
HS
1224 0x27800, 0x27818,
1225 0x27820, 0x27828,
1226 0x27830, 0x27848,
1227 0x27850, 0x27854,
1228 0x27860, 0x27868,
1229 0x27870, 0x27870,
1230 0x27878, 0x27898,
1231 0x278a0, 0x278a8,
1232 0x278b0, 0x278c8,
1233 0x278d0, 0x278d4,
1234 0x278e0, 0x278e8,
1235 0x278f0, 0x278f0,
1236 0x278f8, 0x27a18,
1237 0x27a20, 0x27a28,
1238 0x27a30, 0x27a48,
1239 0x27a50, 0x27a54,
1240 0x27a60, 0x27a68,
1241 0x27a70, 0x27a70,
1242 0x27a78, 0x27a98,
1243 0x27aa0, 0x27aa8,
1244 0x27ab0, 0x27ac8,
1245 0x27ad0, 0x27ad4,
1246 0x27ae0, 0x27ae8,
1247 0x27af0, 0x27af0,
1248 0x27af8, 0x27c18,
1249 0x27c20, 0x27c20,
1250 0x27c28, 0x27c30,
1251 0x27c38, 0x27c38,
1252 0x27c80, 0x27c98,
1253 0x27ca0, 0x27ca8,
1254 0x27cb0, 0x27cc8,
1255 0x27cd0, 0x27cd4,
1256 0x27ce0, 0x27ce8,
1257 0x27cf0, 0x27cf0,
1258 0x27cf8, 0x27d7c,
9f5ac48d 1259 0x27e00, 0x27e04,
812034f1
HS
1260 };
1261
1262 static const unsigned int t5_reg_ranges[] = {
8119c018
HS
1263 0x1008, 0x10c0,
1264 0x10cc, 0x10f8,
1265 0x1100, 0x1100,
1266 0x110c, 0x1148,
1267 0x1180, 0x1184,
1268 0x1190, 0x1194,
1269 0x11a0, 0x11a4,
1270 0x11b0, 0x11b4,
812034f1
HS
1271 0x11fc, 0x123c,
1272 0x1280, 0x173c,
1273 0x1800, 0x18fc,
1274 0x3000, 0x3028,
8119c018
HS
1275 0x3060, 0x30b0,
1276 0x30b8, 0x30d8,
812034f1
HS
1277 0x30e0, 0x30fc,
1278 0x3140, 0x357c,
1279 0x35a8, 0x35cc,
1280 0x35ec, 0x35ec,
1281 0x3600, 0x5624,
8119c018
HS
1282 0x56cc, 0x56ec,
1283 0x56f4, 0x5720,
1284 0x5728, 0x575c,
812034f1 1285 0x580c, 0x5814,
8119c018
HS
1286 0x5890, 0x589c,
1287 0x58a4, 0x58ac,
1288 0x58b8, 0x58bc,
1289 0x5940, 0x59c8,
1290 0x59d0, 0x59dc,
812034f1 1291 0x59fc, 0x5a18,
8119c018
HS
1292 0x5a60, 0x5a70,
1293 0x5a80, 0x5a9c,
9f5ac48d 1294 0x5b94, 0x5bfc,
8119c018
HS
1295 0x6000, 0x6020,
1296 0x6028, 0x6040,
1297 0x6058, 0x609c,
1298 0x60a8, 0x614c,
812034f1
HS
1299 0x7700, 0x7798,
1300 0x77c0, 0x78fc,
8119c018
HS
1301 0x7b00, 0x7b58,
1302 0x7b60, 0x7b84,
1303 0x7b8c, 0x7c54,
1304 0x7d00, 0x7d38,
1305 0x7d40, 0x7d80,
1306 0x7d8c, 0x7ddc,
1307 0x7de4, 0x7e04,
1308 0x7e10, 0x7e1c,
1309 0x7e24, 0x7e38,
1310 0x7e40, 0x7e44,
1311 0x7e4c, 0x7e78,
1312 0x7e80, 0x7edc,
1313 0x7ee8, 0x7efc,
812034f1 1314 0x8dc0, 0x8de0,
8119c018
HS
1315 0x8df8, 0x8e04,
1316 0x8e10, 0x8e84,
812034f1 1317 0x8ea0, 0x8f84,
8119c018
HS
1318 0x8fc0, 0x9058,
1319 0x9060, 0x9060,
1320 0x9068, 0x90f8,
1321 0x9400, 0x9408,
1322 0x9410, 0x9470,
1323 0x9600, 0x9600,
1324 0x9608, 0x9638,
1325 0x9640, 0x96f4,
812034f1
HS
1326 0x9800, 0x9808,
1327 0x9820, 0x983c,
1328 0x9850, 0x9864,
1329 0x9c00, 0x9c6c,
1330 0x9c80, 0x9cec,
1331 0x9d00, 0x9d6c,
1332 0x9d80, 0x9dec,
1333 0x9e00, 0x9e6c,
1334 0x9e80, 0x9eec,
1335 0x9f00, 0x9f6c,
1336 0x9f80, 0xa020,
8119c018
HS
1337 0xd004, 0xd004,
1338 0xd010, 0xd03c,
812034f1 1339 0xdfc0, 0xdfe0,
8119c018
HS
1340 0xe000, 0x1106c,
1341 0x11074, 0x11088,
1342 0x1109c, 0x1117c,
812034f1
HS
1343 0x11190, 0x11204,
1344 0x19040, 0x1906c,
1345 0x19078, 0x19080,
8119c018
HS
1346 0x1908c, 0x190e8,
1347 0x190f0, 0x190f8,
1348 0x19100, 0x19110,
1349 0x19120, 0x19124,
1350 0x19150, 0x19194,
1351 0x1919c, 0x191b0,
812034f1
HS
1352 0x191d0, 0x191e8,
1353 0x19238, 0x19290,
8119c018
HS
1354 0x193f8, 0x19428,
1355 0x19430, 0x19444,
1356 0x1944c, 0x1946c,
1357 0x19474, 0x19474,
812034f1
HS
1358 0x19490, 0x194cc,
1359 0x194f0, 0x194f8,
8119c018
HS
1360 0x19c00, 0x19c08,
1361 0x19c10, 0x19c60,
1362 0x19c94, 0x19ce4,
1363 0x19cf0, 0x19d40,
1364 0x19d50, 0x19d94,
1365 0x19da0, 0x19de8,
1366 0x19df0, 0x19e10,
1367 0x19e50, 0x19e90,
1368 0x19ea0, 0x19f24,
1369 0x19f34, 0x19f34,
812034f1 1370 0x19f40, 0x19f50,
8119c018
HS
1371 0x19f90, 0x19fb4,
1372 0x19fc4, 0x19fe4,
1373 0x1a000, 0x1a004,
1374 0x1a010, 0x1a06c,
1375 0x1a0b0, 0x1a0e4,
1376 0x1a0ec, 0x1a0f8,
1377 0x1a100, 0x1a108,
1378 0x1a114, 0x1a120,
1379 0x1a128, 0x1a130,
1380 0x1a138, 0x1a138,
812034f1
HS
1381 0x1a190, 0x1a1c4,
1382 0x1a1fc, 0x1a1fc,
1383 0x1e008, 0x1e00c,
8119c018
HS
1384 0x1e040, 0x1e044,
1385 0x1e04c, 0x1e04c,
812034f1
HS
1386 0x1e284, 0x1e290,
1387 0x1e2c0, 0x1e2c0,
1388 0x1e2e0, 0x1e2e0,
1389 0x1e300, 0x1e384,
1390 0x1e3c0, 0x1e3c8,
1391 0x1e408, 0x1e40c,
8119c018
HS
1392 0x1e440, 0x1e444,
1393 0x1e44c, 0x1e44c,
812034f1
HS
1394 0x1e684, 0x1e690,
1395 0x1e6c0, 0x1e6c0,
1396 0x1e6e0, 0x1e6e0,
1397 0x1e700, 0x1e784,
1398 0x1e7c0, 0x1e7c8,
1399 0x1e808, 0x1e80c,
8119c018
HS
1400 0x1e840, 0x1e844,
1401 0x1e84c, 0x1e84c,
812034f1
HS
1402 0x1ea84, 0x1ea90,
1403 0x1eac0, 0x1eac0,
1404 0x1eae0, 0x1eae0,
1405 0x1eb00, 0x1eb84,
1406 0x1ebc0, 0x1ebc8,
1407 0x1ec08, 0x1ec0c,
8119c018
HS
1408 0x1ec40, 0x1ec44,
1409 0x1ec4c, 0x1ec4c,
812034f1
HS
1410 0x1ee84, 0x1ee90,
1411 0x1eec0, 0x1eec0,
1412 0x1eee0, 0x1eee0,
1413 0x1ef00, 0x1ef84,
1414 0x1efc0, 0x1efc8,
1415 0x1f008, 0x1f00c,
8119c018
HS
1416 0x1f040, 0x1f044,
1417 0x1f04c, 0x1f04c,
812034f1
HS
1418 0x1f284, 0x1f290,
1419 0x1f2c0, 0x1f2c0,
1420 0x1f2e0, 0x1f2e0,
1421 0x1f300, 0x1f384,
1422 0x1f3c0, 0x1f3c8,
1423 0x1f408, 0x1f40c,
8119c018
HS
1424 0x1f440, 0x1f444,
1425 0x1f44c, 0x1f44c,
812034f1
HS
1426 0x1f684, 0x1f690,
1427 0x1f6c0, 0x1f6c0,
1428 0x1f6e0, 0x1f6e0,
1429 0x1f700, 0x1f784,
1430 0x1f7c0, 0x1f7c8,
1431 0x1f808, 0x1f80c,
8119c018
HS
1432 0x1f840, 0x1f844,
1433 0x1f84c, 0x1f84c,
812034f1
HS
1434 0x1fa84, 0x1fa90,
1435 0x1fac0, 0x1fac0,
1436 0x1fae0, 0x1fae0,
1437 0x1fb00, 0x1fb84,
1438 0x1fbc0, 0x1fbc8,
1439 0x1fc08, 0x1fc0c,
8119c018
HS
1440 0x1fc40, 0x1fc44,
1441 0x1fc4c, 0x1fc4c,
812034f1
HS
1442 0x1fe84, 0x1fe90,
1443 0x1fec0, 0x1fec0,
1444 0x1fee0, 0x1fee0,
1445 0x1ff00, 0x1ff84,
1446 0x1ffc0, 0x1ffc8,
1447 0x30000, 0x30030,
1448 0x30100, 0x30144,
8119c018
HS
1449 0x30190, 0x301a0,
1450 0x301a8, 0x301b8,
1451 0x301c4, 0x301c8,
1452 0x301d0, 0x301d0,
812034f1 1453 0x30200, 0x30318,
8119c018
HS
1454 0x30400, 0x304b4,
1455 0x304c0, 0x3052c,
812034f1 1456 0x30540, 0x3061c,
8119c018
HS
1457 0x30800, 0x30828,
1458 0x30834, 0x30834,
812034f1
HS
1459 0x308c0, 0x30908,
1460 0x30910, 0x309ac,
8119c018
HS
1461 0x30a00, 0x30a14,
1462 0x30a1c, 0x30a2c,
812034f1 1463 0x30a44, 0x30a50,
8119c018
HS
1464 0x30a74, 0x30a74,
1465 0x30a7c, 0x30afc,
1466 0x30b08, 0x30c24,
9f5ac48d 1467 0x30d00, 0x30d00,
812034f1
HS
1468 0x30d08, 0x30d14,
1469 0x30d1c, 0x30d20,
8119c018
HS
1470 0x30d3c, 0x30d3c,
1471 0x30d48, 0x30d50,
812034f1
HS
1472 0x31200, 0x3120c,
1473 0x31220, 0x31220,
1474 0x31240, 0x31240,
9f5ac48d 1475 0x31600, 0x3160c,
812034f1 1476 0x31a00, 0x31a1c,
9f5ac48d 1477 0x31e00, 0x31e20,
812034f1
HS
1478 0x31e38, 0x31e3c,
1479 0x31e80, 0x31e80,
1480 0x31e88, 0x31ea8,
1481 0x31eb0, 0x31eb4,
1482 0x31ec8, 0x31ed4,
1483 0x31fb8, 0x32004,
9f5ac48d
HS
1484 0x32200, 0x32200,
1485 0x32208, 0x32240,
1486 0x32248, 0x32280,
1487 0x32288, 0x322c0,
1488 0x322c8, 0x322fc,
812034f1
HS
1489 0x32600, 0x32630,
1490 0x32a00, 0x32abc,
8119c018
HS
1491 0x32b00, 0x32b10,
1492 0x32b20, 0x32b30,
1493 0x32b40, 0x32b50,
1494 0x32b60, 0x32b70,
1495 0x33000, 0x33028,
1496 0x33030, 0x33048,
1497 0x33060, 0x33068,
1498 0x33070, 0x3309c,
1499 0x330f0, 0x33128,
1500 0x33130, 0x33148,
1501 0x33160, 0x33168,
1502 0x33170, 0x3319c,
1503 0x331f0, 0x33238,
1504 0x33240, 0x33240,
1505 0x33248, 0x33250,
1506 0x3325c, 0x33264,
1507 0x33270, 0x332b8,
1508 0x332c0, 0x332e4,
1509 0x332f8, 0x33338,
1510 0x33340, 0x33340,
1511 0x33348, 0x33350,
1512 0x3335c, 0x33364,
1513 0x33370, 0x333b8,
1514 0x333c0, 0x333e4,
1515 0x333f8, 0x33428,
1516 0x33430, 0x33448,
1517 0x33460, 0x33468,
1518 0x33470, 0x3349c,
1519 0x334f0, 0x33528,
1520 0x33530, 0x33548,
1521 0x33560, 0x33568,
1522 0x33570, 0x3359c,
1523 0x335f0, 0x33638,
1524 0x33640, 0x33640,
1525 0x33648, 0x33650,
1526 0x3365c, 0x33664,
1527 0x33670, 0x336b8,
1528 0x336c0, 0x336e4,
1529 0x336f8, 0x33738,
1530 0x33740, 0x33740,
1531 0x33748, 0x33750,
1532 0x3375c, 0x33764,
1533 0x33770, 0x337b8,
1534 0x337c0, 0x337e4,
812034f1
HS
1535 0x337f8, 0x337fc,
1536 0x33814, 0x33814,
1537 0x3382c, 0x3382c,
1538 0x33880, 0x3388c,
1539 0x338e8, 0x338ec,
8119c018
HS
1540 0x33900, 0x33928,
1541 0x33930, 0x33948,
1542 0x33960, 0x33968,
1543 0x33970, 0x3399c,
1544 0x339f0, 0x33a38,
1545 0x33a40, 0x33a40,
1546 0x33a48, 0x33a50,
1547 0x33a5c, 0x33a64,
1548 0x33a70, 0x33ab8,
1549 0x33ac0, 0x33ae4,
812034f1
HS
1550 0x33af8, 0x33b10,
1551 0x33b28, 0x33b28,
1552 0x33b3c, 0x33b50,
1553 0x33bf0, 0x33c10,
1554 0x33c28, 0x33c28,
1555 0x33c3c, 0x33c50,
1556 0x33cf0, 0x33cfc,
1557 0x34000, 0x34030,
1558 0x34100, 0x34144,
8119c018
HS
1559 0x34190, 0x341a0,
1560 0x341a8, 0x341b8,
1561 0x341c4, 0x341c8,
1562 0x341d0, 0x341d0,
812034f1 1563 0x34200, 0x34318,
8119c018
HS
1564 0x34400, 0x344b4,
1565 0x344c0, 0x3452c,
812034f1 1566 0x34540, 0x3461c,
8119c018
HS
1567 0x34800, 0x34828,
1568 0x34834, 0x34834,
812034f1
HS
1569 0x348c0, 0x34908,
1570 0x34910, 0x349ac,
8119c018
HS
1571 0x34a00, 0x34a14,
1572 0x34a1c, 0x34a2c,
812034f1 1573 0x34a44, 0x34a50,
8119c018
HS
1574 0x34a74, 0x34a74,
1575 0x34a7c, 0x34afc,
1576 0x34b08, 0x34c24,
9f5ac48d 1577 0x34d00, 0x34d00,
812034f1
HS
1578 0x34d08, 0x34d14,
1579 0x34d1c, 0x34d20,
8119c018
HS
1580 0x34d3c, 0x34d3c,
1581 0x34d48, 0x34d50,
812034f1
HS
1582 0x35200, 0x3520c,
1583 0x35220, 0x35220,
1584 0x35240, 0x35240,
9f5ac48d 1585 0x35600, 0x3560c,
812034f1 1586 0x35a00, 0x35a1c,
9f5ac48d 1587 0x35e00, 0x35e20,
812034f1
HS
1588 0x35e38, 0x35e3c,
1589 0x35e80, 0x35e80,
1590 0x35e88, 0x35ea8,
1591 0x35eb0, 0x35eb4,
1592 0x35ec8, 0x35ed4,
1593 0x35fb8, 0x36004,
9f5ac48d
HS
1594 0x36200, 0x36200,
1595 0x36208, 0x36240,
1596 0x36248, 0x36280,
1597 0x36288, 0x362c0,
1598 0x362c8, 0x362fc,
812034f1
HS
1599 0x36600, 0x36630,
1600 0x36a00, 0x36abc,
8119c018
HS
1601 0x36b00, 0x36b10,
1602 0x36b20, 0x36b30,
1603 0x36b40, 0x36b50,
1604 0x36b60, 0x36b70,
1605 0x37000, 0x37028,
1606 0x37030, 0x37048,
1607 0x37060, 0x37068,
1608 0x37070, 0x3709c,
1609 0x370f0, 0x37128,
1610 0x37130, 0x37148,
1611 0x37160, 0x37168,
1612 0x37170, 0x3719c,
1613 0x371f0, 0x37238,
1614 0x37240, 0x37240,
1615 0x37248, 0x37250,
1616 0x3725c, 0x37264,
1617 0x37270, 0x372b8,
1618 0x372c0, 0x372e4,
1619 0x372f8, 0x37338,
1620 0x37340, 0x37340,
1621 0x37348, 0x37350,
1622 0x3735c, 0x37364,
1623 0x37370, 0x373b8,
1624 0x373c0, 0x373e4,
1625 0x373f8, 0x37428,
1626 0x37430, 0x37448,
1627 0x37460, 0x37468,
1628 0x37470, 0x3749c,
1629 0x374f0, 0x37528,
1630 0x37530, 0x37548,
1631 0x37560, 0x37568,
1632 0x37570, 0x3759c,
1633 0x375f0, 0x37638,
1634 0x37640, 0x37640,
1635 0x37648, 0x37650,
1636 0x3765c, 0x37664,
1637 0x37670, 0x376b8,
1638 0x376c0, 0x376e4,
1639 0x376f8, 0x37738,
1640 0x37740, 0x37740,
1641 0x37748, 0x37750,
1642 0x3775c, 0x37764,
1643 0x37770, 0x377b8,
1644 0x377c0, 0x377e4,
812034f1
HS
1645 0x377f8, 0x377fc,
1646 0x37814, 0x37814,
1647 0x3782c, 0x3782c,
1648 0x37880, 0x3788c,
1649 0x378e8, 0x378ec,
8119c018
HS
1650 0x37900, 0x37928,
1651 0x37930, 0x37948,
1652 0x37960, 0x37968,
1653 0x37970, 0x3799c,
1654 0x379f0, 0x37a38,
1655 0x37a40, 0x37a40,
1656 0x37a48, 0x37a50,
1657 0x37a5c, 0x37a64,
1658 0x37a70, 0x37ab8,
1659 0x37ac0, 0x37ae4,
812034f1
HS
1660 0x37af8, 0x37b10,
1661 0x37b28, 0x37b28,
1662 0x37b3c, 0x37b50,
1663 0x37bf0, 0x37c10,
1664 0x37c28, 0x37c28,
1665 0x37c3c, 0x37c50,
1666 0x37cf0, 0x37cfc,
1667 0x38000, 0x38030,
1668 0x38100, 0x38144,
8119c018
HS
1669 0x38190, 0x381a0,
1670 0x381a8, 0x381b8,
1671 0x381c4, 0x381c8,
1672 0x381d0, 0x381d0,
812034f1 1673 0x38200, 0x38318,
8119c018
HS
1674 0x38400, 0x384b4,
1675 0x384c0, 0x3852c,
812034f1 1676 0x38540, 0x3861c,
8119c018
HS
1677 0x38800, 0x38828,
1678 0x38834, 0x38834,
812034f1
HS
1679 0x388c0, 0x38908,
1680 0x38910, 0x389ac,
8119c018
HS
1681 0x38a00, 0x38a14,
1682 0x38a1c, 0x38a2c,
812034f1 1683 0x38a44, 0x38a50,
8119c018
HS
1684 0x38a74, 0x38a74,
1685 0x38a7c, 0x38afc,
1686 0x38b08, 0x38c24,
9f5ac48d 1687 0x38d00, 0x38d00,
812034f1
HS
1688 0x38d08, 0x38d14,
1689 0x38d1c, 0x38d20,
8119c018
HS
1690 0x38d3c, 0x38d3c,
1691 0x38d48, 0x38d50,
812034f1
HS
1692 0x39200, 0x3920c,
1693 0x39220, 0x39220,
1694 0x39240, 0x39240,
9f5ac48d 1695 0x39600, 0x3960c,
812034f1 1696 0x39a00, 0x39a1c,
9f5ac48d 1697 0x39e00, 0x39e20,
812034f1
HS
1698 0x39e38, 0x39e3c,
1699 0x39e80, 0x39e80,
1700 0x39e88, 0x39ea8,
1701 0x39eb0, 0x39eb4,
1702 0x39ec8, 0x39ed4,
1703 0x39fb8, 0x3a004,
9f5ac48d
HS
1704 0x3a200, 0x3a200,
1705 0x3a208, 0x3a240,
1706 0x3a248, 0x3a280,
1707 0x3a288, 0x3a2c0,
1708 0x3a2c8, 0x3a2fc,
812034f1
HS
1709 0x3a600, 0x3a630,
1710 0x3aa00, 0x3aabc,
8119c018
HS
1711 0x3ab00, 0x3ab10,
1712 0x3ab20, 0x3ab30,
1713 0x3ab40, 0x3ab50,
1714 0x3ab60, 0x3ab70,
1715 0x3b000, 0x3b028,
1716 0x3b030, 0x3b048,
1717 0x3b060, 0x3b068,
1718 0x3b070, 0x3b09c,
1719 0x3b0f0, 0x3b128,
1720 0x3b130, 0x3b148,
1721 0x3b160, 0x3b168,
1722 0x3b170, 0x3b19c,
1723 0x3b1f0, 0x3b238,
1724 0x3b240, 0x3b240,
1725 0x3b248, 0x3b250,
1726 0x3b25c, 0x3b264,
1727 0x3b270, 0x3b2b8,
1728 0x3b2c0, 0x3b2e4,
1729 0x3b2f8, 0x3b338,
1730 0x3b340, 0x3b340,
1731 0x3b348, 0x3b350,
1732 0x3b35c, 0x3b364,
1733 0x3b370, 0x3b3b8,
1734 0x3b3c0, 0x3b3e4,
1735 0x3b3f8, 0x3b428,
1736 0x3b430, 0x3b448,
1737 0x3b460, 0x3b468,
1738 0x3b470, 0x3b49c,
1739 0x3b4f0, 0x3b528,
1740 0x3b530, 0x3b548,
1741 0x3b560, 0x3b568,
1742 0x3b570, 0x3b59c,
1743 0x3b5f0, 0x3b638,
1744 0x3b640, 0x3b640,
1745 0x3b648, 0x3b650,
1746 0x3b65c, 0x3b664,
1747 0x3b670, 0x3b6b8,
1748 0x3b6c0, 0x3b6e4,
1749 0x3b6f8, 0x3b738,
1750 0x3b740, 0x3b740,
1751 0x3b748, 0x3b750,
1752 0x3b75c, 0x3b764,
1753 0x3b770, 0x3b7b8,
1754 0x3b7c0, 0x3b7e4,
812034f1
HS
1755 0x3b7f8, 0x3b7fc,
1756 0x3b814, 0x3b814,
1757 0x3b82c, 0x3b82c,
1758 0x3b880, 0x3b88c,
1759 0x3b8e8, 0x3b8ec,
8119c018
HS
1760 0x3b900, 0x3b928,
1761 0x3b930, 0x3b948,
1762 0x3b960, 0x3b968,
1763 0x3b970, 0x3b99c,
1764 0x3b9f0, 0x3ba38,
1765 0x3ba40, 0x3ba40,
1766 0x3ba48, 0x3ba50,
1767 0x3ba5c, 0x3ba64,
1768 0x3ba70, 0x3bab8,
1769 0x3bac0, 0x3bae4,
812034f1
HS
1770 0x3baf8, 0x3bb10,
1771 0x3bb28, 0x3bb28,
1772 0x3bb3c, 0x3bb50,
1773 0x3bbf0, 0x3bc10,
1774 0x3bc28, 0x3bc28,
1775 0x3bc3c, 0x3bc50,
1776 0x3bcf0, 0x3bcfc,
1777 0x3c000, 0x3c030,
1778 0x3c100, 0x3c144,
8119c018
HS
1779 0x3c190, 0x3c1a0,
1780 0x3c1a8, 0x3c1b8,
1781 0x3c1c4, 0x3c1c8,
1782 0x3c1d0, 0x3c1d0,
812034f1 1783 0x3c200, 0x3c318,
8119c018
HS
1784 0x3c400, 0x3c4b4,
1785 0x3c4c0, 0x3c52c,
812034f1 1786 0x3c540, 0x3c61c,
8119c018
HS
1787 0x3c800, 0x3c828,
1788 0x3c834, 0x3c834,
812034f1
HS
1789 0x3c8c0, 0x3c908,
1790 0x3c910, 0x3c9ac,
8119c018
HS
1791 0x3ca00, 0x3ca14,
1792 0x3ca1c, 0x3ca2c,
812034f1 1793 0x3ca44, 0x3ca50,
8119c018
HS
1794 0x3ca74, 0x3ca74,
1795 0x3ca7c, 0x3cafc,
1796 0x3cb08, 0x3cc24,
9f5ac48d 1797 0x3cd00, 0x3cd00,
812034f1
HS
1798 0x3cd08, 0x3cd14,
1799 0x3cd1c, 0x3cd20,
8119c018
HS
1800 0x3cd3c, 0x3cd3c,
1801 0x3cd48, 0x3cd50,
812034f1
HS
1802 0x3d200, 0x3d20c,
1803 0x3d220, 0x3d220,
1804 0x3d240, 0x3d240,
9f5ac48d 1805 0x3d600, 0x3d60c,
812034f1 1806 0x3da00, 0x3da1c,
9f5ac48d 1807 0x3de00, 0x3de20,
812034f1
HS
1808 0x3de38, 0x3de3c,
1809 0x3de80, 0x3de80,
1810 0x3de88, 0x3dea8,
1811 0x3deb0, 0x3deb4,
1812 0x3dec8, 0x3ded4,
1813 0x3dfb8, 0x3e004,
9f5ac48d
HS
1814 0x3e200, 0x3e200,
1815 0x3e208, 0x3e240,
1816 0x3e248, 0x3e280,
1817 0x3e288, 0x3e2c0,
1818 0x3e2c8, 0x3e2fc,
812034f1
HS
1819 0x3e600, 0x3e630,
1820 0x3ea00, 0x3eabc,
8119c018
HS
1821 0x3eb00, 0x3eb10,
1822 0x3eb20, 0x3eb30,
1823 0x3eb40, 0x3eb50,
1824 0x3eb60, 0x3eb70,
1825 0x3f000, 0x3f028,
1826 0x3f030, 0x3f048,
1827 0x3f060, 0x3f068,
1828 0x3f070, 0x3f09c,
1829 0x3f0f0, 0x3f128,
1830 0x3f130, 0x3f148,
1831 0x3f160, 0x3f168,
1832 0x3f170, 0x3f19c,
1833 0x3f1f0, 0x3f238,
1834 0x3f240, 0x3f240,
1835 0x3f248, 0x3f250,
1836 0x3f25c, 0x3f264,
1837 0x3f270, 0x3f2b8,
1838 0x3f2c0, 0x3f2e4,
1839 0x3f2f8, 0x3f338,
1840 0x3f340, 0x3f340,
1841 0x3f348, 0x3f350,
1842 0x3f35c, 0x3f364,
1843 0x3f370, 0x3f3b8,
1844 0x3f3c0, 0x3f3e4,
1845 0x3f3f8, 0x3f428,
1846 0x3f430, 0x3f448,
1847 0x3f460, 0x3f468,
1848 0x3f470, 0x3f49c,
1849 0x3f4f0, 0x3f528,
1850 0x3f530, 0x3f548,
1851 0x3f560, 0x3f568,
1852 0x3f570, 0x3f59c,
1853 0x3f5f0, 0x3f638,
1854 0x3f640, 0x3f640,
1855 0x3f648, 0x3f650,
1856 0x3f65c, 0x3f664,
1857 0x3f670, 0x3f6b8,
1858 0x3f6c0, 0x3f6e4,
1859 0x3f6f8, 0x3f738,
1860 0x3f740, 0x3f740,
1861 0x3f748, 0x3f750,
1862 0x3f75c, 0x3f764,
1863 0x3f770, 0x3f7b8,
1864 0x3f7c0, 0x3f7e4,
812034f1
HS
1865 0x3f7f8, 0x3f7fc,
1866 0x3f814, 0x3f814,
1867 0x3f82c, 0x3f82c,
1868 0x3f880, 0x3f88c,
1869 0x3f8e8, 0x3f8ec,
8119c018
HS
1870 0x3f900, 0x3f928,
1871 0x3f930, 0x3f948,
1872 0x3f960, 0x3f968,
1873 0x3f970, 0x3f99c,
1874 0x3f9f0, 0x3fa38,
1875 0x3fa40, 0x3fa40,
1876 0x3fa48, 0x3fa50,
1877 0x3fa5c, 0x3fa64,
1878 0x3fa70, 0x3fab8,
1879 0x3fac0, 0x3fae4,
812034f1
HS
1880 0x3faf8, 0x3fb10,
1881 0x3fb28, 0x3fb28,
1882 0x3fb3c, 0x3fb50,
1883 0x3fbf0, 0x3fc10,
1884 0x3fc28, 0x3fc28,
1885 0x3fc3c, 0x3fc50,
1886 0x3fcf0, 0x3fcfc,
1887 0x40000, 0x4000c,
8119c018
HS
1888 0x40040, 0x40050,
1889 0x40060, 0x40068,
1890 0x4007c, 0x4008c,
1891 0x40094, 0x400b0,
1892 0x400c0, 0x40144,
812034f1 1893 0x40180, 0x4018c,
8119c018
HS
1894 0x40200, 0x40254,
1895 0x40260, 0x40264,
1896 0x40270, 0x40288,
1897 0x40290, 0x40298,
1898 0x402ac, 0x402c8,
1899 0x402d0, 0x402e0,
1900 0x402f0, 0x402f0,
1901 0x40300, 0x4033c,
812034f1
HS
1902 0x403f8, 0x403fc,
1903 0x41304, 0x413c4,
8119c018
HS
1904 0x41400, 0x4140c,
1905 0x41414, 0x4141c,
812034f1 1906 0x41480, 0x414d0,
8119c018
HS
1907 0x44000, 0x44054,
1908 0x4405c, 0x44078,
1909 0x440c0, 0x44174,
1910 0x44180, 0x441ac,
1911 0x441b4, 0x441b8,
1912 0x441c0, 0x44254,
1913 0x4425c, 0x44278,
1914 0x442c0, 0x44374,
1915 0x44380, 0x443ac,
1916 0x443b4, 0x443b8,
1917 0x443c0, 0x44454,
1918 0x4445c, 0x44478,
1919 0x444c0, 0x44574,
1920 0x44580, 0x445ac,
1921 0x445b4, 0x445b8,
1922 0x445c0, 0x44654,
1923 0x4465c, 0x44678,
1924 0x446c0, 0x44774,
1925 0x44780, 0x447ac,
1926 0x447b4, 0x447b8,
1927 0x447c0, 0x44854,
1928 0x4485c, 0x44878,
1929 0x448c0, 0x44974,
1930 0x44980, 0x449ac,
1931 0x449b4, 0x449b8,
1932 0x449c0, 0x449fc,
1933 0x45000, 0x45004,
1934 0x45010, 0x45030,
1935 0x45040, 0x45060,
1936 0x45068, 0x45068,
812034f1
HS
1937 0x45080, 0x45084,
1938 0x450a0, 0x450b0,
8119c018
HS
1939 0x45200, 0x45204,
1940 0x45210, 0x45230,
1941 0x45240, 0x45260,
1942 0x45268, 0x45268,
812034f1
HS
1943 0x45280, 0x45284,
1944 0x452a0, 0x452b0,
1945 0x460c0, 0x460e4,
8119c018
HS
1946 0x47000, 0x4703c,
1947 0x47044, 0x4708c,
812034f1 1948 0x47200, 0x47250,
8119c018
HS
1949 0x47400, 0x47408,
1950 0x47414, 0x47420,
812034f1
HS
1951 0x47600, 0x47618,
1952 0x47800, 0x47814,
1953 0x48000, 0x4800c,
8119c018
HS
1954 0x48040, 0x48050,
1955 0x48060, 0x48068,
1956 0x4807c, 0x4808c,
1957 0x48094, 0x480b0,
1958 0x480c0, 0x48144,
812034f1 1959 0x48180, 0x4818c,
8119c018
HS
1960 0x48200, 0x48254,
1961 0x48260, 0x48264,
1962 0x48270, 0x48288,
1963 0x48290, 0x48298,
1964 0x482ac, 0x482c8,
1965 0x482d0, 0x482e0,
1966 0x482f0, 0x482f0,
1967 0x48300, 0x4833c,
812034f1
HS
1968 0x483f8, 0x483fc,
1969 0x49304, 0x493c4,
8119c018
HS
1970 0x49400, 0x4940c,
1971 0x49414, 0x4941c,
812034f1 1972 0x49480, 0x494d0,
8119c018
HS
1973 0x4c000, 0x4c054,
1974 0x4c05c, 0x4c078,
1975 0x4c0c0, 0x4c174,
1976 0x4c180, 0x4c1ac,
1977 0x4c1b4, 0x4c1b8,
1978 0x4c1c0, 0x4c254,
1979 0x4c25c, 0x4c278,
1980 0x4c2c0, 0x4c374,
1981 0x4c380, 0x4c3ac,
1982 0x4c3b4, 0x4c3b8,
1983 0x4c3c0, 0x4c454,
1984 0x4c45c, 0x4c478,
1985 0x4c4c0, 0x4c574,
1986 0x4c580, 0x4c5ac,
1987 0x4c5b4, 0x4c5b8,
1988 0x4c5c0, 0x4c654,
1989 0x4c65c, 0x4c678,
1990 0x4c6c0, 0x4c774,
1991 0x4c780, 0x4c7ac,
1992 0x4c7b4, 0x4c7b8,
1993 0x4c7c0, 0x4c854,
1994 0x4c85c, 0x4c878,
1995 0x4c8c0, 0x4c974,
1996 0x4c980, 0x4c9ac,
1997 0x4c9b4, 0x4c9b8,
1998 0x4c9c0, 0x4c9fc,
1999 0x4d000, 0x4d004,
2000 0x4d010, 0x4d030,
2001 0x4d040, 0x4d060,
2002 0x4d068, 0x4d068,
812034f1
HS
2003 0x4d080, 0x4d084,
2004 0x4d0a0, 0x4d0b0,
8119c018
HS
2005 0x4d200, 0x4d204,
2006 0x4d210, 0x4d230,
2007 0x4d240, 0x4d260,
2008 0x4d268, 0x4d268,
812034f1
HS
2009 0x4d280, 0x4d284,
2010 0x4d2a0, 0x4d2b0,
2011 0x4e0c0, 0x4e0e4,
8119c018
HS
2012 0x4f000, 0x4f03c,
2013 0x4f044, 0x4f08c,
812034f1 2014 0x4f200, 0x4f250,
8119c018
HS
2015 0x4f400, 0x4f408,
2016 0x4f414, 0x4f420,
812034f1
HS
2017 0x4f600, 0x4f618,
2018 0x4f800, 0x4f814,
8119c018
HS
2019 0x50000, 0x50084,
2020 0x50090, 0x500cc,
812034f1 2021 0x50400, 0x50400,
8119c018
HS
2022 0x50800, 0x50884,
2023 0x50890, 0x508cc,
812034f1
HS
2024 0x50c00, 0x50c00,
2025 0x51000, 0x5101c,
2026 0x51300, 0x51308,
2027 };
2028
ab4b583b 2029 static const unsigned int t6_reg_ranges[] = {
8119c018
HS
2030 0x1008, 0x101c,
2031 0x1024, 0x10a8,
2032 0x10b4, 0x10f8,
2033 0x1100, 0x1114,
2034 0x111c, 0x112c,
2035 0x1138, 0x113c,
2036 0x1144, 0x114c,
2037 0x1180, 0x1184,
2038 0x1190, 0x1194,
2039 0x11a0, 0x11a4,
2040 0x11b0, 0x11b4,
04d8980b
AV
2041 0x11fc, 0x1274,
2042 0x1280, 0x133c,
ab4b583b
HS
2043 0x1800, 0x18fc,
2044 0x3000, 0x302c,
8119c018
HS
2045 0x3060, 0x30b0,
2046 0x30b8, 0x30d8,
ab4b583b
HS
2047 0x30e0, 0x30fc,
2048 0x3140, 0x357c,
2049 0x35a8, 0x35cc,
2050 0x35ec, 0x35ec,
2051 0x3600, 0x5624,
8119c018
HS
2052 0x56cc, 0x56ec,
2053 0x56f4, 0x5720,
2054 0x5728, 0x575c,
ab4b583b 2055 0x580c, 0x5814,
8119c018
HS
2056 0x5890, 0x589c,
2057 0x58a4, 0x58ac,
2058 0x58b8, 0x58bc,
ab4b583b
HS
2059 0x5940, 0x595c,
2060 0x5980, 0x598c,
8119c018
HS
2061 0x59b0, 0x59c8,
2062 0x59d0, 0x59dc,
ab4b583b
HS
2063 0x59fc, 0x5a18,
2064 0x5a60, 0x5a6c,
8119c018
HS
2065 0x5a80, 0x5a8c,
2066 0x5a94, 0x5a9c,
ab4b583b 2067 0x5b94, 0x5bfc,
8119c018
HS
2068 0x5c10, 0x5e48,
2069 0x5e50, 0x5e94,
2070 0x5ea0, 0x5eb0,
2071 0x5ec0, 0x5ec0,
676d6a75 2072 0x5ec8, 0x5ed0,
04d8980b
AV
2073 0x5ee0, 0x5ee0,
2074 0x5ef0, 0x5ef0,
2075 0x5f00, 0x5f00,
8119c018
HS
2076 0x6000, 0x6020,
2077 0x6028, 0x6040,
2078 0x6058, 0x609c,
2079 0x60a8, 0x619c,
ab4b583b
HS
2080 0x7700, 0x7798,
2081 0x77c0, 0x7880,
2082 0x78cc, 0x78fc,
8119c018
HS
2083 0x7b00, 0x7b58,
2084 0x7b60, 0x7b84,
2085 0x7b8c, 0x7c54,
2086 0x7d00, 0x7d38,
2087 0x7d40, 0x7d84,
2088 0x7d8c, 0x7ddc,
2089 0x7de4, 0x7e04,
2090 0x7e10, 0x7e1c,
2091 0x7e24, 0x7e38,
2092 0x7e40, 0x7e44,
2093 0x7e4c, 0x7e78,
2094 0x7e80, 0x7edc,
2095 0x7ee8, 0x7efc,
f109ff11 2096 0x8dc0, 0x8de4,
8119c018
HS
2097 0x8df8, 0x8e04,
2098 0x8e10, 0x8e84,
ab4b583b 2099 0x8ea0, 0x8f88,
8119c018
HS
2100 0x8fb8, 0x9058,
2101 0x9060, 0x9060,
2102 0x9068, 0x90f8,
2103 0x9100, 0x9124,
ab4b583b 2104 0x9400, 0x9470,
8119c018
HS
2105 0x9600, 0x9600,
2106 0x9608, 0x9638,
2107 0x9640, 0x9704,
2108 0x9710, 0x971c,
ab4b583b
HS
2109 0x9800, 0x9808,
2110 0x9820, 0x983c,
2111 0x9850, 0x9864,
2112 0x9c00, 0x9c6c,
2113 0x9c80, 0x9cec,
2114 0x9d00, 0x9d6c,
2115 0x9d80, 0x9dec,
2116 0x9e00, 0x9e6c,
2117 0x9e80, 0x9eec,
2118 0x9f00, 0x9f6c,
2119 0x9f80, 0xa020,
2120 0xd004, 0xd03c,
5b4e83e1 2121 0xd100, 0xd118,
8119c018
HS
2122 0xd200, 0xd214,
2123 0xd220, 0xd234,
2124 0xd240, 0xd254,
2125 0xd260, 0xd274,
2126 0xd280, 0xd294,
2127 0xd2a0, 0xd2b4,
2128 0xd2c0, 0xd2d4,
2129 0xd2e0, 0xd2f4,
2130 0xd300, 0xd31c,
ab4b583b
HS
2131 0xdfc0, 0xdfe0,
2132 0xe000, 0xf008,
04d8980b
AV
2133 0xf010, 0xf018,
2134 0xf020, 0xf028,
ab4b583b 2135 0x11000, 0x11014,
8119c018
HS
2136 0x11048, 0x1106c,
2137 0x11074, 0x11088,
2138 0x11098, 0x11120,
2139 0x1112c, 0x1117c,
2140 0x11190, 0x112e0,
ab4b583b 2141 0x11300, 0x1130c,
5b4e83e1 2142 0x12000, 0x1206c,
ab4b583b
HS
2143 0x19040, 0x1906c,
2144 0x19078, 0x19080,
8119c018
HS
2145 0x1908c, 0x190e8,
2146 0x190f0, 0x190f8,
2147 0x19100, 0x19110,
2148 0x19120, 0x19124,
2149 0x19150, 0x19194,
2150 0x1919c, 0x191b0,
ab4b583b 2151 0x191d0, 0x191e8,
676d6a75
HS
2152 0x19238, 0x19290,
2153 0x192a4, 0x192b0,
8119c018
HS
2154 0x192bc, 0x192bc,
2155 0x19348, 0x1934c,
2156 0x193f8, 0x19418,
2157 0x19420, 0x19428,
2158 0x19430, 0x19444,
2159 0x1944c, 0x1946c,
2160 0x19474, 0x19474,
ab4b583b
HS
2161 0x19490, 0x194cc,
2162 0x194f0, 0x194f8,
8119c018
HS
2163 0x19c00, 0x19c48,
2164 0x19c50, 0x19c80,
2165 0x19c94, 0x19c98,
2166 0x19ca0, 0x19cbc,
2167 0x19ce4, 0x19ce4,
2168 0x19cf0, 0x19cf8,
2169 0x19d00, 0x19d28,
ab4b583b 2170 0x19d50, 0x19d78,
8119c018
HS
2171 0x19d94, 0x19d98,
2172 0x19da0, 0x19dc8,
ab4b583b
HS
2173 0x19df0, 0x19e10,
2174 0x19e50, 0x19e6c,
8119c018
HS
2175 0x19ea0, 0x19ebc,
2176 0x19ec4, 0x19ef4,
2177 0x19f04, 0x19f2c,
2178 0x19f34, 0x19f34,
ab4b583b
HS
2179 0x19f40, 0x19f50,
2180 0x19f90, 0x19fac,
8119c018
HS
2181 0x19fc4, 0x19fc8,
2182 0x19fd0, 0x19fe4,
2183 0x1a000, 0x1a004,
2184 0x1a010, 0x1a06c,
2185 0x1a0b0, 0x1a0e4,
2186 0x1a0ec, 0x1a0f8,
2187 0x1a100, 0x1a108,
2188 0x1a114, 0x1a120,
2189 0x1a128, 0x1a130,
2190 0x1a138, 0x1a138,
ab4b583b
HS
2191 0x1a190, 0x1a1c4,
2192 0x1a1fc, 0x1a1fc,
2193 0x1e008, 0x1e00c,
8119c018
HS
2194 0x1e040, 0x1e044,
2195 0x1e04c, 0x1e04c,
ab4b583b
HS
2196 0x1e284, 0x1e290,
2197 0x1e2c0, 0x1e2c0,
2198 0x1e2e0, 0x1e2e0,
2199 0x1e300, 0x1e384,
2200 0x1e3c0, 0x1e3c8,
2201 0x1e408, 0x1e40c,
8119c018
HS
2202 0x1e440, 0x1e444,
2203 0x1e44c, 0x1e44c,
ab4b583b
HS
2204 0x1e684, 0x1e690,
2205 0x1e6c0, 0x1e6c0,
2206 0x1e6e0, 0x1e6e0,
2207 0x1e700, 0x1e784,
2208 0x1e7c0, 0x1e7c8,
2209 0x1e808, 0x1e80c,
8119c018
HS
2210 0x1e840, 0x1e844,
2211 0x1e84c, 0x1e84c,
ab4b583b
HS
2212 0x1ea84, 0x1ea90,
2213 0x1eac0, 0x1eac0,
2214 0x1eae0, 0x1eae0,
2215 0x1eb00, 0x1eb84,
2216 0x1ebc0, 0x1ebc8,
2217 0x1ec08, 0x1ec0c,
8119c018
HS
2218 0x1ec40, 0x1ec44,
2219 0x1ec4c, 0x1ec4c,
ab4b583b
HS
2220 0x1ee84, 0x1ee90,
2221 0x1eec0, 0x1eec0,
2222 0x1eee0, 0x1eee0,
2223 0x1ef00, 0x1ef84,
2224 0x1efc0, 0x1efc8,
2225 0x1f008, 0x1f00c,
8119c018
HS
2226 0x1f040, 0x1f044,
2227 0x1f04c, 0x1f04c,
ab4b583b
HS
2228 0x1f284, 0x1f290,
2229 0x1f2c0, 0x1f2c0,
2230 0x1f2e0, 0x1f2e0,
2231 0x1f300, 0x1f384,
2232 0x1f3c0, 0x1f3c8,
2233 0x1f408, 0x1f40c,
8119c018
HS
2234 0x1f440, 0x1f444,
2235 0x1f44c, 0x1f44c,
ab4b583b
HS
2236 0x1f684, 0x1f690,
2237 0x1f6c0, 0x1f6c0,
2238 0x1f6e0, 0x1f6e0,
2239 0x1f700, 0x1f784,
2240 0x1f7c0, 0x1f7c8,
2241 0x1f808, 0x1f80c,
8119c018
HS
2242 0x1f840, 0x1f844,
2243 0x1f84c, 0x1f84c,
ab4b583b
HS
2244 0x1fa84, 0x1fa90,
2245 0x1fac0, 0x1fac0,
2246 0x1fae0, 0x1fae0,
2247 0x1fb00, 0x1fb84,
2248 0x1fbc0, 0x1fbc8,
2249 0x1fc08, 0x1fc0c,
8119c018
HS
2250 0x1fc40, 0x1fc44,
2251 0x1fc4c, 0x1fc4c,
ab4b583b
HS
2252 0x1fe84, 0x1fe90,
2253 0x1fec0, 0x1fec0,
2254 0x1fee0, 0x1fee0,
2255 0x1ff00, 0x1ff84,
2256 0x1ffc0, 0x1ffc8,
8119c018 2257 0x30000, 0x30030,
8119c018
HS
2258 0x30100, 0x30168,
2259 0x30190, 0x301a0,
2260 0x301a8, 0x301b8,
2261 0x301c4, 0x301c8,
2262 0x301d0, 0x301d0,
f109ff11 2263 0x30200, 0x30320,
8119c018
HS
2264 0x30400, 0x304b4,
2265 0x304c0, 0x3052c,
ab4b583b 2266 0x30540, 0x3061c,
8119c018 2267 0x30800, 0x308a0,
ab4b583b
HS
2268 0x308c0, 0x30908,
2269 0x30910, 0x309b8,
2270 0x30a00, 0x30a04,
8119c018
HS
2271 0x30a0c, 0x30a14,
2272 0x30a1c, 0x30a2c,
ab4b583b 2273 0x30a44, 0x30a50,
8119c018
HS
2274 0x30a74, 0x30a74,
2275 0x30a7c, 0x30afc,
2276 0x30b08, 0x30c24,
2277 0x30d00, 0x30d14,
2278 0x30d1c, 0x30d3c,
2279 0x30d44, 0x30d4c,
2280 0x30d54, 0x30d74,
2281 0x30d7c, 0x30d7c,
ab4b583b
HS
2282 0x30de0, 0x30de0,
2283 0x30e00, 0x30ed4,
2284 0x30f00, 0x30fa4,
2285 0x30fc0, 0x30fc4,
2286 0x31000, 0x31004,
2287 0x31080, 0x310fc,
2288 0x31208, 0x31220,
2289 0x3123c, 0x31254,
2290 0x31300, 0x31300,
2291 0x31308, 0x3131c,
2292 0x31338, 0x3133c,
2293 0x31380, 0x31380,
2294 0x31388, 0x313a8,
2295 0x313b4, 0x313b4,
2296 0x31400, 0x31420,
2297 0x31438, 0x3143c,
2298 0x31480, 0x31480,
2299 0x314a8, 0x314a8,
2300 0x314b0, 0x314b4,
2301 0x314c8, 0x314d4,
2302 0x31a40, 0x31a4c,
2303 0x31af0, 0x31b20,
2304 0x31b38, 0x31b3c,
2305 0x31b80, 0x31b80,
2306 0x31ba8, 0x31ba8,
2307 0x31bb0, 0x31bb4,
2308 0x31bc8, 0x31bd4,
2309 0x32140, 0x3218c,
8119c018
HS
2310 0x321f0, 0x321f4,
2311 0x32200, 0x32200,
ab4b583b
HS
2312 0x32218, 0x32218,
2313 0x32400, 0x32400,
2314 0x32408, 0x3241c,
2315 0x32618, 0x32620,
2316 0x32664, 0x32664,
2317 0x326a8, 0x326a8,
2318 0x326ec, 0x326ec,
2319 0x32a00, 0x32abc,
04d8980b
AV
2320 0x32b00, 0x32b18,
2321 0x32b20, 0x32b38,
8119c018
HS
2322 0x32b40, 0x32b58,
2323 0x32b60, 0x32b78,
ab4b583b
HS
2324 0x32c00, 0x32c00,
2325 0x32c08, 0x32c3c,
8119c018
HS
2326 0x33000, 0x3302c,
2327 0x33034, 0x33050,
2328 0x33058, 0x33058,
2329 0x33060, 0x3308c,
2330 0x3309c, 0x330ac,
2331 0x330c0, 0x330c0,
2332 0x330c8, 0x330d0,
2333 0x330d8, 0x330e0,
2334 0x330ec, 0x3312c,
2335 0x33134, 0x33150,
2336 0x33158, 0x33158,
2337 0x33160, 0x3318c,
2338 0x3319c, 0x331ac,
2339 0x331c0, 0x331c0,
2340 0x331c8, 0x331d0,
2341 0x331d8, 0x331e0,
2342 0x331ec, 0x33290,
2343 0x33298, 0x332c4,
2344 0x332e4, 0x33390,
2345 0x33398, 0x333c4,
2346 0x333e4, 0x3342c,
2347 0x33434, 0x33450,
2348 0x33458, 0x33458,
2349 0x33460, 0x3348c,
2350 0x3349c, 0x334ac,
2351 0x334c0, 0x334c0,
2352 0x334c8, 0x334d0,
2353 0x334d8, 0x334e0,
2354 0x334ec, 0x3352c,
2355 0x33534, 0x33550,
2356 0x33558, 0x33558,
2357 0x33560, 0x3358c,
2358 0x3359c, 0x335ac,
2359 0x335c0, 0x335c0,
2360 0x335c8, 0x335d0,
2361 0x335d8, 0x335e0,
2362 0x335ec, 0x33690,
2363 0x33698, 0x336c4,
2364 0x336e4, 0x33790,
2365 0x33798, 0x337c4,
ab4b583b
HS
2366 0x337e4, 0x337fc,
2367 0x33814, 0x33814,
2368 0x33854, 0x33868,
2369 0x33880, 0x3388c,
2370 0x338c0, 0x338d0,
2371 0x338e8, 0x338ec,
8119c018
HS
2372 0x33900, 0x3392c,
2373 0x33934, 0x33950,
2374 0x33958, 0x33958,
2375 0x33960, 0x3398c,
2376 0x3399c, 0x339ac,
2377 0x339c0, 0x339c0,
2378 0x339c8, 0x339d0,
2379 0x339d8, 0x339e0,
2380 0x339ec, 0x33a90,
2381 0x33a98, 0x33ac4,
ab4b583b 2382 0x33ae4, 0x33b10,
8119c018
HS
2383 0x33b24, 0x33b28,
2384 0x33b38, 0x33b50,
ab4b583b 2385 0x33bf0, 0x33c10,
8119c018
HS
2386 0x33c24, 0x33c28,
2387 0x33c38, 0x33c50,
ab4b583b 2388 0x33cf0, 0x33cfc,
8119c018 2389 0x34000, 0x34030,
8119c018
HS
2390 0x34100, 0x34168,
2391 0x34190, 0x341a0,
2392 0x341a8, 0x341b8,
2393 0x341c4, 0x341c8,
2394 0x341d0, 0x341d0,
f109ff11 2395 0x34200, 0x34320,
8119c018
HS
2396 0x34400, 0x344b4,
2397 0x344c0, 0x3452c,
ab4b583b 2398 0x34540, 0x3461c,
8119c018 2399 0x34800, 0x348a0,
ab4b583b
HS
2400 0x348c0, 0x34908,
2401 0x34910, 0x349b8,
2402 0x34a00, 0x34a04,
8119c018
HS
2403 0x34a0c, 0x34a14,
2404 0x34a1c, 0x34a2c,
ab4b583b 2405 0x34a44, 0x34a50,
8119c018
HS
2406 0x34a74, 0x34a74,
2407 0x34a7c, 0x34afc,
2408 0x34b08, 0x34c24,
2409 0x34d00, 0x34d14,
2410 0x34d1c, 0x34d3c,
2411 0x34d44, 0x34d4c,
2412 0x34d54, 0x34d74,
2413 0x34d7c, 0x34d7c,
ab4b583b
HS
2414 0x34de0, 0x34de0,
2415 0x34e00, 0x34ed4,
2416 0x34f00, 0x34fa4,
2417 0x34fc0, 0x34fc4,
2418 0x35000, 0x35004,
2419 0x35080, 0x350fc,
2420 0x35208, 0x35220,
2421 0x3523c, 0x35254,
2422 0x35300, 0x35300,
2423 0x35308, 0x3531c,
2424 0x35338, 0x3533c,
2425 0x35380, 0x35380,
2426 0x35388, 0x353a8,
2427 0x353b4, 0x353b4,
2428 0x35400, 0x35420,
2429 0x35438, 0x3543c,
2430 0x35480, 0x35480,
2431 0x354a8, 0x354a8,
2432 0x354b0, 0x354b4,
2433 0x354c8, 0x354d4,
2434 0x35a40, 0x35a4c,
2435 0x35af0, 0x35b20,
2436 0x35b38, 0x35b3c,
2437 0x35b80, 0x35b80,
2438 0x35ba8, 0x35ba8,
2439 0x35bb0, 0x35bb4,
2440 0x35bc8, 0x35bd4,
2441 0x36140, 0x3618c,
8119c018
HS
2442 0x361f0, 0x361f4,
2443 0x36200, 0x36200,
ab4b583b
HS
2444 0x36218, 0x36218,
2445 0x36400, 0x36400,
2446 0x36408, 0x3641c,
2447 0x36618, 0x36620,
2448 0x36664, 0x36664,
2449 0x366a8, 0x366a8,
2450 0x366ec, 0x366ec,
2451 0x36a00, 0x36abc,
04d8980b
AV
2452 0x36b00, 0x36b18,
2453 0x36b20, 0x36b38,
8119c018
HS
2454 0x36b40, 0x36b58,
2455 0x36b60, 0x36b78,
ab4b583b
HS
2456 0x36c00, 0x36c00,
2457 0x36c08, 0x36c3c,
8119c018
HS
2458 0x37000, 0x3702c,
2459 0x37034, 0x37050,
2460 0x37058, 0x37058,
2461 0x37060, 0x3708c,
2462 0x3709c, 0x370ac,
2463 0x370c0, 0x370c0,
2464 0x370c8, 0x370d0,
2465 0x370d8, 0x370e0,
2466 0x370ec, 0x3712c,
2467 0x37134, 0x37150,
2468 0x37158, 0x37158,
2469 0x37160, 0x3718c,
2470 0x3719c, 0x371ac,
2471 0x371c0, 0x371c0,
2472 0x371c8, 0x371d0,
2473 0x371d8, 0x371e0,
2474 0x371ec, 0x37290,
2475 0x37298, 0x372c4,
2476 0x372e4, 0x37390,
2477 0x37398, 0x373c4,
2478 0x373e4, 0x3742c,
2479 0x37434, 0x37450,
2480 0x37458, 0x37458,
2481 0x37460, 0x3748c,
2482 0x3749c, 0x374ac,
2483 0x374c0, 0x374c0,
2484 0x374c8, 0x374d0,
2485 0x374d8, 0x374e0,
2486 0x374ec, 0x3752c,
2487 0x37534, 0x37550,
2488 0x37558, 0x37558,
2489 0x37560, 0x3758c,
2490 0x3759c, 0x375ac,
2491 0x375c0, 0x375c0,
2492 0x375c8, 0x375d0,
2493 0x375d8, 0x375e0,
2494 0x375ec, 0x37690,
2495 0x37698, 0x376c4,
2496 0x376e4, 0x37790,
2497 0x37798, 0x377c4,
ab4b583b
HS
2498 0x377e4, 0x377fc,
2499 0x37814, 0x37814,
2500 0x37854, 0x37868,
2501 0x37880, 0x3788c,
2502 0x378c0, 0x378d0,
2503 0x378e8, 0x378ec,
8119c018
HS
2504 0x37900, 0x3792c,
2505 0x37934, 0x37950,
2506 0x37958, 0x37958,
2507 0x37960, 0x3798c,
2508 0x3799c, 0x379ac,
2509 0x379c0, 0x379c0,
2510 0x379c8, 0x379d0,
2511 0x379d8, 0x379e0,
2512 0x379ec, 0x37a90,
2513 0x37a98, 0x37ac4,
ab4b583b 2514 0x37ae4, 0x37b10,
8119c018
HS
2515 0x37b24, 0x37b28,
2516 0x37b38, 0x37b50,
ab4b583b 2517 0x37bf0, 0x37c10,
8119c018
HS
2518 0x37c24, 0x37c28,
2519 0x37c38, 0x37c50,
ab4b583b
HS
2520 0x37cf0, 0x37cfc,
2521 0x40040, 0x40040,
2522 0x40080, 0x40084,
2523 0x40100, 0x40100,
2524 0x40140, 0x401bc,
2525 0x40200, 0x40214,
2526 0x40228, 0x40228,
2527 0x40240, 0x40258,
2528 0x40280, 0x40280,
2529 0x40304, 0x40304,
2530 0x40330, 0x4033c,
04d8980b 2531 0x41304, 0x413c8,
8119c018
HS
2532 0x413d0, 0x413dc,
2533 0x413f0, 0x413f0,
2534 0x41400, 0x4140c,
2535 0x41414, 0x4141c,
ab4b583b
HS
2536 0x41480, 0x414d0,
2537 0x44000, 0x4407c,
8119c018
HS
2538 0x440c0, 0x441ac,
2539 0x441b4, 0x4427c,
2540 0x442c0, 0x443ac,
2541 0x443b4, 0x4447c,
2542 0x444c0, 0x445ac,
2543 0x445b4, 0x4467c,
2544 0x446c0, 0x447ac,
2545 0x447b4, 0x4487c,
2546 0x448c0, 0x449ac,
2547 0x449b4, 0x44a7c,
2548 0x44ac0, 0x44bac,
2549 0x44bb4, 0x44c7c,
2550 0x44cc0, 0x44dac,
2551 0x44db4, 0x44e7c,
2552 0x44ec0, 0x44fac,
2553 0x44fb4, 0x4507c,
2554 0x450c0, 0x451ac,
2555 0x451b4, 0x451fc,
2556 0x45800, 0x45804,
2557 0x45810, 0x45830,
2558 0x45840, 0x45860,
2559 0x45868, 0x45868,
ab4b583b
HS
2560 0x45880, 0x45884,
2561 0x458a0, 0x458b0,
8119c018
HS
2562 0x45a00, 0x45a04,
2563 0x45a10, 0x45a30,
2564 0x45a40, 0x45a60,
2565 0x45a68, 0x45a68,
ab4b583b
HS
2566 0x45a80, 0x45a84,
2567 0x45aa0, 0x45ab0,
2568 0x460c0, 0x460e4,
8119c018
HS
2569 0x47000, 0x4703c,
2570 0x47044, 0x4708c,
ab4b583b 2571 0x47200, 0x47250,
8119c018
HS
2572 0x47400, 0x47408,
2573 0x47414, 0x47420,
ab4b583b 2574 0x47600, 0x47618,
8119c018
HS
2575 0x47800, 0x47814,
2576 0x47820, 0x4782c,
2577 0x50000, 0x50084,
2578 0x50090, 0x500cc,
2579 0x50300, 0x50384,
ab4b583b 2580 0x50400, 0x50400,
8119c018
HS
2581 0x50800, 0x50884,
2582 0x50890, 0x508cc,
2583 0x50b00, 0x50b84,
ab4b583b 2584 0x50c00, 0x50c00,
8119c018
HS
2585 0x51000, 0x51020,
2586 0x51028, 0x510b0,
ab4b583b
HS
2587 0x51300, 0x51324,
2588 };
2589
812034f1
HS
2590 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2591 const unsigned int *reg_ranges;
2592 int reg_ranges_size, range;
2593 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2594
2595 /* Select the right set of register ranges to dump depending on the
2596 * adapter chip type.
2597 */
2598 switch (chip_version) {
2599 case CHELSIO_T4:
2600 reg_ranges = t4_reg_ranges;
2601 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2602 break;
2603
2604 case CHELSIO_T5:
2605 reg_ranges = t5_reg_ranges;
2606 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2607 break;
2608
ab4b583b
HS
2609 case CHELSIO_T6:
2610 reg_ranges = t6_reg_ranges;
2611 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2612 break;
2613
812034f1
HS
2614 default:
2615 dev_err(adap->pdev_dev,
2616 "Unsupported chip version %d\n", chip_version);
2617 return;
2618 }
2619
2620 /* Clear the register buffer and insert the appropriate register
2621 * values selected by the above register ranges.
2622 */
2623 memset(buf, 0, buf_size);
2624 for (range = 0; range < reg_ranges_size; range += 2) {
2625 unsigned int reg = reg_ranges[range];
2626 unsigned int last_reg = reg_ranges[range + 1];
2627 u32 *bufp = (u32 *)((char *)buf + reg);
2628
2629 /* Iterate across the register range filling in the register
2630 * buffer but don't write past the end of the register buffer.
2631 */
2632 while (reg <= last_reg && bufp < buf_end) {
2633 *bufp++ = t4_read_reg(adap, reg);
2634 reg += sizeof(u32);
2635 }
2636 }
2637}
2638
56d36be4 2639#define EEPROM_STAT_ADDR 0x7bfc
47ce9c48
SR
2640#define VPD_BASE 0x400
2641#define VPD_BASE_OLD 0
0a57a536 2642#define VPD_LEN 1024
63a92fe6 2643#define CHELSIO_VPD_UNIQUE_ID 0x82
56d36be4 2644
940c9c45
RL
2645/**
2646 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2647 * @phys_addr: the physical EEPROM address
2648 * @fn: the PCI function number
2649 * @sz: size of function-specific area
2650 *
2651 * Translate a physical EEPROM address to virtual. The first 1K is
2652 * accessed through virtual addresses starting at 31K, the rest is
2653 * accessed through virtual addresses starting at 0.
2654 *
2655 * The mapping is as follows:
2656 * [0..1K) -> [31K..32K)
2657 * [1K..1K+A) -> [31K-A..31K)
2658 * [1K+A..ES) -> [0..ES-A-1K)
2659 *
2660 * where A = @fn * @sz, and ES = EEPROM size.
2661 */
2662int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2663{
2664 fn *= sz;
2665 if (phys_addr < 1024)
2666 return phys_addr + (31 << 10);
2667 if (phys_addr < 1024 + fn)
2668 return 31744 - fn + phys_addr - 1024;
2669 if (phys_addr < EEPROMSIZE)
2670 return phys_addr - 1024 - fn;
2671 return -EINVAL;
2672}
2673
56d36be4
DM
2674/**
2675 * t4_seeprom_wp - enable/disable EEPROM write protection
2676 * @adapter: the adapter
2677 * @enable: whether to enable or disable write protection
2678 *
2679 * Enables or disables write protection on the serial EEPROM.
2680 */
2681int t4_seeprom_wp(struct adapter *adapter, bool enable)
2682{
2683 unsigned int v = enable ? 0xc : 0;
2684 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2685 return ret < 0 ? ret : 0;
2686}
2687
2688/**
098ef6c2 2689 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
56d36be4
DM
2690 * @adapter: adapter to read
2691 * @p: where to store the parameters
2692 *
2693 * Reads card parameters stored in VPD EEPROM.
2694 */
098ef6c2 2695int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
56d36be4 2696{
098ef6c2
HS
2697 int i, ret = 0, addr;
2698 int ec, sn, pn, na;
8c357ebd 2699 u8 *vpd, csum;
23d88e1d 2700 unsigned int vpdr_len, kw_offset, id_len;
56d36be4 2701
8c357ebd
VP
2702 vpd = vmalloc(VPD_LEN);
2703 if (!vpd)
2704 return -ENOMEM;
2705
098ef6c2
HS
2706 /* Card information normally starts at VPD_BASE but early cards had
2707 * it at 0.
2708 */
47ce9c48
SR
2709 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
2710 if (ret < 0)
2711 goto out;
63a92fe6
HS
2712
2713 /* The VPD shall have a unique identifier specified by the PCI SIG.
2714 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2715 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2716 * is expected to automatically put this entry at the
2717 * beginning of the VPD.
2718 */
2719 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
47ce9c48
SR
2720
2721 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
56d36be4 2722 if (ret < 0)
8c357ebd 2723 goto out;
56d36be4 2724
23d88e1d
DM
2725 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2726 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
8c357ebd
VP
2727 ret = -EINVAL;
2728 goto out;
23d88e1d
DM
2729 }
2730
2731 id_len = pci_vpd_lrdt_size(vpd);
2732 if (id_len > ID_LEN)
2733 id_len = ID_LEN;
2734
2735 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2736 if (i < 0) {
2737 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
8c357ebd
VP
2738 ret = -EINVAL;
2739 goto out;
23d88e1d
DM
2740 }
2741
2742 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2743 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2744 if (vpdr_len + kw_offset > VPD_LEN) {
226ec5fd 2745 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
8c357ebd
VP
2746 ret = -EINVAL;
2747 goto out;
226ec5fd
DM
2748 }
2749
2750#define FIND_VPD_KW(var, name) do { \
23d88e1d 2751 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
226ec5fd
DM
2752 if (var < 0) { \
2753 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
8c357ebd
VP
2754 ret = -EINVAL; \
2755 goto out; \
226ec5fd
DM
2756 } \
2757 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2758} while (0)
2759
2760 FIND_VPD_KW(i, "RV");
2761 for (csum = 0; i >= 0; i--)
2762 csum += vpd[i];
56d36be4
DM
2763
2764 if (csum) {
2765 dev_err(adapter->pdev_dev,
2766 "corrupted VPD EEPROM, actual csum %u\n", csum);
8c357ebd
VP
2767 ret = -EINVAL;
2768 goto out;
56d36be4
DM
2769 }
2770
226ec5fd
DM
2771 FIND_VPD_KW(ec, "EC");
2772 FIND_VPD_KW(sn, "SN");
a94cd705 2773 FIND_VPD_KW(pn, "PN");
098ef6c2 2774 FIND_VPD_KW(na, "NA");
226ec5fd
DM
2775#undef FIND_VPD_KW
2776
23d88e1d 2777 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
56d36be4 2778 strim(p->id);
226ec5fd 2779 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 2780 strim(p->ec);
226ec5fd
DM
2781 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2782 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4 2783 strim(p->sn);
63a92fe6 2784 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
a94cd705
KS
2785 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2786 strim(p->pn);
098ef6c2
HS
2787 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2788 strim((char *)p->na);
636f9d37 2789
098ef6c2
HS
2790out:
2791 vfree(vpd);
661dbeb9 2792 return ret < 0 ? ret : 0;
098ef6c2
HS
2793}
2794
2795/**
2796 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2797 * @adapter: adapter to read
2798 * @p: where to store the parameters
2799 *
2800 * Reads card parameters stored in VPD EEPROM and retrieves the Core
2801 * Clock. This can only be called after a connection to the firmware
2802 * is established.
2803 */
2804int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2805{
2806 u32 cclk_param, cclk_val;
2807 int ret;
2808
2809 /* Grab the raw VPD parameters.
2810 */
2811 ret = t4_get_raw_vpd_params(adapter, p);
2812 if (ret)
2813 return ret;
2814
2815 /* Ask firmware for the Core Clock since it knows how to translate the
636f9d37
VP
2816 * Reference Clock ('V2') VPD field into a Core Clock value ...
2817 */
5167865a
HS
2818 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2819 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
098ef6c2 2820 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
636f9d37 2821 1, &cclk_param, &cclk_val);
8c357ebd 2822
636f9d37
VP
2823 if (ret)
2824 return ret;
2825 p->cclk = cclk_val;
2826
56d36be4
DM
2827 return 0;
2828}
2829
2830/* serial flash and firmware constants */
2831enum {
2832 SF_ATTEMPTS = 10, /* max retries for SF operations */
2833
2834 /* flash command opcodes */
2835 SF_PROG_PAGE = 2, /* program page */
2836 SF_WR_DISABLE = 4, /* disable writes */
2837 SF_RD_STATUS = 5, /* read status register */
2838 SF_WR_ENABLE = 6, /* enable writes */
2839 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 2840 SF_RD_ID = 0x9f, /* read ID */
56d36be4 2841 SF_ERASE_SECTOR = 0xd8, /* erase sector */
56d36be4
DM
2842};
2843
2844/**
2845 * sf1_read - read data from the serial flash
2846 * @adapter: the adapter
2847 * @byte_cnt: number of bytes to read
2848 * @cont: whether another operation will be chained
2849 * @lock: whether to lock SF for PL access only
2850 * @valp: where to store the read data
2851 *
2852 * Reads up to 4 bytes of data from the serial flash. The location of
2853 * the read needs to be specified prior to calling this by issuing the
2854 * appropriate commands to the serial flash.
2855 */
2856static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2857 int lock, u32 *valp)
2858{
2859 int ret;
2860
2861 if (!byte_cnt || byte_cnt > 4)
2862 return -EINVAL;
0d804338 2863 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
56d36be4 2864 return -EBUSY;
0d804338
HS
2865 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2866 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2867 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
56d36be4 2868 if (!ret)
0d804338 2869 *valp = t4_read_reg(adapter, SF_DATA_A);
56d36be4
DM
2870 return ret;
2871}
2872
2873/**
2874 * sf1_write - write data to the serial flash
2875 * @adapter: the adapter
2876 * @byte_cnt: number of bytes to write
2877 * @cont: whether another operation will be chained
2878 * @lock: whether to lock SF for PL access only
2879 * @val: value to write
2880 *
2881 * Writes up to 4 bytes of data to the serial flash. The location of
2882 * the write needs to be specified prior to calling this by issuing the
2883 * appropriate commands to the serial flash.
2884 */
2885static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2886 int lock, u32 val)
2887{
2888 if (!byte_cnt || byte_cnt > 4)
2889 return -EINVAL;
0d804338 2890 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
56d36be4 2891 return -EBUSY;
0d804338
HS
2892 t4_write_reg(adapter, SF_DATA_A, val);
2893 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2894 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2895 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
56d36be4
DM
2896}
2897
2898/**
2899 * flash_wait_op - wait for a flash operation to complete
2900 * @adapter: the adapter
2901 * @attempts: max number of polls of the status register
2902 * @delay: delay between polls in ms
2903 *
2904 * Wait for a flash operation to complete by polling the status register.
2905 */
2906static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
2907{
2908 int ret;
2909 u32 status;
2910
2911 while (1) {
2912 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
2913 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
2914 return ret;
2915 if (!(status & 1))
2916 return 0;
2917 if (--attempts == 0)
2918 return -EAGAIN;
2919 if (delay)
2920 msleep(delay);
2921 }
2922}
2923
2924/**
2925 * t4_read_flash - read words from serial flash
2926 * @adapter: the adapter
2927 * @addr: the start address for the read
2928 * @nwords: how many 32-bit words to read
2929 * @data: where to store the read data
2930 * @byte_oriented: whether to store data as bytes or as words
2931 *
2932 * Read the specified number of 32-bit words from the serial flash.
2933 * If @byte_oriented is set the read data is stored as a byte array
2934 * (i.e., big-endian), otherwise as 32-bit words in the platform's
dbedd44e 2935 * natural endianness.
56d36be4 2936 */
49216c1c
HS
2937int t4_read_flash(struct adapter *adapter, unsigned int addr,
2938 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
2939{
2940 int ret;
2941
900a6596 2942 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
2943 return -EINVAL;
2944
2945 addr = swab32(addr) | SF_RD_DATA_FAST;
2946
2947 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
2948 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
2949 return ret;
2950
2951 for ( ; nwords; nwords--, data++) {
2952 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2953 if (nwords == 1)
0d804338 2954 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
2955 if (ret)
2956 return ret;
2957 if (byte_oriented)
f404f80c 2958 *data = (__force __u32)(cpu_to_be32(*data));
56d36be4
DM
2959 }
2960 return 0;
2961}
2962
2963/**
2964 * t4_write_flash - write up to a page of data to the serial flash
2965 * @adapter: the adapter
2966 * @addr: the start address to write
2967 * @n: length of data to write in bytes
2968 * @data: the data to write
2969 *
2970 * Writes up to a page of data (256 bytes) to the serial flash starting
2971 * at the given address. All the data must be written to the same page.
2972 */
2973static int t4_write_flash(struct adapter *adapter, unsigned int addr,
2974 unsigned int n, const u8 *data)
2975{
2976 int ret;
2977 u32 buf[64];
2978 unsigned int i, c, left, val, offset = addr & 0xff;
2979
900a6596 2980 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
2981 return -EINVAL;
2982
2983 val = swab32(addr) | SF_PROG_PAGE;
2984
2985 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2986 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
2987 goto unlock;
2988
2989 for (left = n; left; left -= c) {
2990 c = min(left, 4U);
2991 for (val = 0, i = 0; i < c; ++i)
2992 val = (val << 8) + *data++;
2993
2994 ret = sf1_write(adapter, c, c != left, 1, val);
2995 if (ret)
2996 goto unlock;
2997 }
900a6596 2998 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
2999 if (ret)
3000 goto unlock;
3001
0d804338 3002 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
3003
3004 /* Read the page to verify the write succeeded */
3005 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
3006 if (ret)
3007 return ret;
3008
3009 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3010 dev_err(adapter->pdev_dev,
3011 "failed to correctly write the flash page at %#x\n",
3012 addr);
3013 return -EIO;
3014 }
3015 return 0;
3016
3017unlock:
0d804338 3018 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
3019 return ret;
3020}
3021
3022/**
16e47624 3023 * t4_get_fw_version - read the firmware version
56d36be4
DM
3024 * @adapter: the adapter
3025 * @vers: where to place the version
3026 *
3027 * Reads the FW version from flash.
3028 */
16e47624 3029int t4_get_fw_version(struct adapter *adapter, u32 *vers)
56d36be4 3030{
16e47624
HS
3031 return t4_read_flash(adapter, FLASH_FW_START +
3032 offsetof(struct fw_hdr, fw_ver), 1,
3033 vers, 0);
56d36be4
DM
3034}
3035
0de72738
HS
3036/**
3037 * t4_get_bs_version - read the firmware bootstrap version
3038 * @adapter: the adapter
3039 * @vers: where to place the version
3040 *
3041 * Reads the FW Bootstrap version from flash.
3042 */
3043int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3044{
3045 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3046 offsetof(struct fw_hdr, fw_ver), 1,
3047 vers, 0);
3048}
3049
56d36be4 3050/**
16e47624 3051 * t4_get_tp_version - read the TP microcode version
56d36be4
DM
3052 * @adapter: the adapter
3053 * @vers: where to place the version
3054 *
3055 * Reads the TP microcode version from flash.
3056 */
16e47624 3057int t4_get_tp_version(struct adapter *adapter, u32 *vers)
56d36be4 3058{
16e47624 3059 return t4_read_flash(adapter, FLASH_FW_START +
900a6596 3060 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
3061 1, vers, 0);
3062}
3063
ba3f8cd5
HS
3064/**
3065 * t4_get_exprom_version - return the Expansion ROM version (if any)
3066 * @adapter: the adapter
3067 * @vers: where to place the version
3068 *
3069 * Reads the Expansion ROM header from FLASH and returns the version
3070 * number (if present) through the @vers return value pointer. We return
3071 * this in the Firmware Version Format since it's convenient. Return
3072 * 0 on success, -ENOENT if no Expansion ROM is present.
3073 */
3074int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3075{
3076 struct exprom_header {
3077 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3078 unsigned char hdr_ver[4]; /* Expansion ROM version */
3079 } *hdr;
3080 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3081 sizeof(u32))];
3082 int ret;
3083
3084 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3085 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3086 0);
3087 if (ret)
3088 return ret;
3089
3090 hdr = (struct exprom_header *)exprom_header_buf;
3091 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3092 return -ENOENT;
3093
3094 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3095 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3096 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3097 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3098 return 0;
3099}
3100
760446f9
GG
3101/**
3102 * t4_get_vpd_version - return the VPD version
3103 * @adapter: the adapter
3104 * @vers: where to place the version
3105 *
3106 * Reads the VPD via the Firmware interface (thus this can only be called
3107 * once we're ready to issue Firmware commands). The format of the
3108 * VPD version is adapter specific. Returns 0 on success, an error on
3109 * failure.
3110 *
3111 * Note that early versions of the Firmware didn't include the ability
3112 * to retrieve the VPD version, so we zero-out the return-value parameter
3113 * in that case to avoid leaving it with garbage in it.
3114 *
3115 * Also note that the Firmware will return its cached copy of the VPD
3116 * Revision ID, not the actual Revision ID as written in the Serial
3117 * EEPROM. This is only an issue if a new VPD has been written and the
3118 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3119 * to defer calling this routine till after a FW_RESET_CMD has been issued
3120 * if the Host Driver will be performing a full adapter initialization.
3121 */
3122int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3123{
3124 u32 vpdrev_param;
3125 int ret;
3126
3127 vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3128 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
3129 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3130 1, &vpdrev_param, vers);
3131 if (ret)
3132 *vers = 0;
3133 return ret;
3134}
3135
3136/**
3137 * t4_get_scfg_version - return the Serial Configuration version
3138 * @adapter: the adapter
3139 * @vers: where to place the version
3140 *
3141 * Reads the Serial Configuration Version via the Firmware interface
3142 * (thus this can only be called once we're ready to issue Firmware
3143 * commands). The format of the Serial Configuration version is
3144 * adapter specific. Returns 0 on success, an error on failure.
3145 *
3146 * Note that early versions of the Firmware didn't include the ability
3147 * to retrieve the Serial Configuration version, so we zero-out the
3148 * return-value parameter in that case to avoid leaving it with
3149 * garbage in it.
3150 *
3151 * Also note that the Firmware will return its cached copy of the Serial
3152 * Initialization Revision ID, not the actual Revision ID as written in
3153 * the Serial EEPROM. This is only an issue if a new VPD has been written
3154 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3155 * it's best to defer calling this routine till after a FW_RESET_CMD has
3156 * been issued if the Host Driver will be performing a full adapter
3157 * initialization.
3158 */
3159int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3160{
3161 u32 scfgrev_param;
3162 int ret;
3163
3164 scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3165 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
3166 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3167 1, &scfgrev_param, vers);
3168 if (ret)
3169 *vers = 0;
3170 return ret;
3171}
3172
3173/**
3174 * t4_get_version_info - extract various chip/firmware version information
3175 * @adapter: the adapter
3176 *
3177 * Reads various chip/firmware version numbers and stores them into the
3178 * adapter Adapter Parameters structure. If any of the efforts fails
3179 * the first failure will be returned, but all of the version numbers
3180 * will be read.
3181 */
3182int t4_get_version_info(struct adapter *adapter)
3183{
3184 int ret = 0;
3185
3186 #define FIRST_RET(__getvinfo) \
3187 do { \
3188 int __ret = __getvinfo; \
3189 if (__ret && !ret) \
3190 ret = __ret; \
3191 } while (0)
3192
3193 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3194 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3195 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3196 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3197 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3198 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3199
3200 #undef FIRST_RET
3201 return ret;
3202}
3203
3204/**
3205 * t4_dump_version_info - dump all of the adapter configuration IDs
3206 * @adapter: the adapter
3207 *
3208 * Dumps all of the various bits of adapter configuration version/revision
3209 * IDs information. This is typically called at some point after
3210 * t4_get_version_info() has been called.
3211 */
3212void t4_dump_version_info(struct adapter *adapter)
3213{
3214 /* Device information */
3215 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
3216 adapter->params.vpd.id,
3217 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3218 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
3219 adapter->params.vpd.sn, adapter->params.vpd.pn);
3220
3221 /* Firmware Version */
3222 if (!adapter->params.fw_vers)
3223 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
3224 else
3225 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
3226 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
3227 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
3228 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
3229 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
3230
3231 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3232 * Firmware, so dev_info() is more appropriate here.)
3233 */
3234 if (!adapter->params.bs_vers)
3235 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
3236 else
3237 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
3238 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
3239 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
3240 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
3241 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
3242
3243 /* TP Microcode Version */
3244 if (!adapter->params.tp_vers)
3245 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
3246 else
3247 dev_info(adapter->pdev_dev,
3248 "TP Microcode version: %u.%u.%u.%u\n",
3249 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
3250 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
3251 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
3252 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
3253
3254 /* Expansion ROM version */
3255 if (!adapter->params.er_vers)
3256 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
3257 else
3258 dev_info(adapter->pdev_dev,
3259 "Expansion ROM version: %u.%u.%u.%u\n",
3260 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
3261 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
3262 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
3263 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
3264
3265 /* Serial Configuration version */
3266 dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
3267 adapter->params.scfg_vers);
3268
3269 /* VPD Version */
3270 dev_info(adapter->pdev_dev, "VPD version: %#x\n",
3271 adapter->params.vpd_vers);
3272}
3273
a69265e9
HS
3274/**
3275 * t4_check_fw_version - check if the FW is supported with this driver
3276 * @adap: the adapter
3277 *
3278 * Checks if an adapter's FW is compatible with the driver. Returns 0
3279 * if there's exact match, a negative error if the version could not be
3280 * read or there's a major version mismatch
3281 */
3282int t4_check_fw_version(struct adapter *adap)
3283{
21d11bd6 3284 int i, ret, major, minor, micro;
a69265e9
HS
3285 int exp_major, exp_minor, exp_micro;
3286 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3287
3288 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
21d11bd6
HS
3289 /* Try multiple times before returning error */
3290 for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3291 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3292
a69265e9
HS
3293 if (ret)
3294 return ret;
3295
3296 major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3297 minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3298 micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3299
3300 switch (chip_version) {
3301 case CHELSIO_T4:
3302 exp_major = T4FW_MIN_VERSION_MAJOR;
3303 exp_minor = T4FW_MIN_VERSION_MINOR;
3304 exp_micro = T4FW_MIN_VERSION_MICRO;
3305 break;
3306 case CHELSIO_T5:
3307 exp_major = T5FW_MIN_VERSION_MAJOR;
3308 exp_minor = T5FW_MIN_VERSION_MINOR;
3309 exp_micro = T5FW_MIN_VERSION_MICRO;
3310 break;
3311 case CHELSIO_T6:
3312 exp_major = T6FW_MIN_VERSION_MAJOR;
3313 exp_minor = T6FW_MIN_VERSION_MINOR;
3314 exp_micro = T6FW_MIN_VERSION_MICRO;
3315 break;
3316 default:
3317 dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3318 adap->chip);
3319 return -EINVAL;
3320 }
3321
3322 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3323 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3324 dev_err(adap->pdev_dev,
3325 "Card has firmware version %u.%u.%u, minimum "
3326 "supported firmware is %u.%u.%u.\n", major, minor,
3327 micro, exp_major, exp_minor, exp_micro);
3328 return -EFAULT;
3329 }
3330 return 0;
3331}
3332
16e47624
HS
3333/* Is the given firmware API compatible with the one the driver was compiled
3334 * with?
56d36be4 3335 */
16e47624 3336static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
56d36be4 3337{
56d36be4 3338
16e47624
HS
3339 /* short circuit if it's the exact same firmware version */
3340 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3341 return 1;
56d36be4 3342
16e47624
HS
3343#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3344 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3345 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3346 return 1;
3347#undef SAME_INTF
0a57a536 3348
16e47624
HS
3349 return 0;
3350}
56d36be4 3351
16e47624
HS
3352/* The firmware in the filesystem is usable, but should it be installed?
3353 * This routine explains itself in detail if it indicates the filesystem
3354 * firmware should be installed.
3355 */
3356static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3357 int k, int c)
3358{
3359 const char *reason;
3360
3361 if (!card_fw_usable) {
3362 reason = "incompatible or unusable";
3363 goto install;
e69972f5
JH
3364 }
3365
16e47624
HS
3366 if (k > c) {
3367 reason = "older than the version supported with this driver";
3368 goto install;
56d36be4
DM
3369 }
3370
16e47624
HS
3371 return 0;
3372
3373install:
3374 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3375 "installing firmware %u.%u.%u.%u on card.\n",
b2e1a3f0
HS
3376 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3377 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3378 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3379 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
56d36be4 3380
56d36be4
DM
3381 return 1;
3382}
3383
16e47624
HS
3384int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3385 const u8 *fw_data, unsigned int fw_size,
3386 struct fw_hdr *card_fw, enum dev_state state,
3387 int *reset)
3388{
3389 int ret, card_fw_usable, fs_fw_usable;
3390 const struct fw_hdr *fs_fw;
3391 const struct fw_hdr *drv_fw;
3392
3393 drv_fw = &fw_info->fw_hdr;
3394
3395 /* Read the header of the firmware on the card */
3396 ret = -t4_read_flash(adap, FLASH_FW_START,
3397 sizeof(*card_fw) / sizeof(uint32_t),
3398 (uint32_t *)card_fw, 1);
3399 if (ret == 0) {
3400 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3401 } else {
3402 dev_err(adap->pdev_dev,
3403 "Unable to read card's firmware header: %d\n", ret);
3404 card_fw_usable = 0;
3405 }
3406
3407 if (fw_data != NULL) {
3408 fs_fw = (const void *)fw_data;
3409 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3410 } else {
3411 fs_fw = NULL;
3412 fs_fw_usable = 0;
3413 }
3414
3415 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3416 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3417 /* Common case: the firmware on the card is an exact match and
3418 * the filesystem one is an exact match too, or the filesystem
3419 * one is absent/incompatible.
3420 */
3421 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3422 should_install_fs_fw(adap, card_fw_usable,
3423 be32_to_cpu(fs_fw->fw_ver),
3424 be32_to_cpu(card_fw->fw_ver))) {
3425 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
3426 fw_size, 0);
3427 if (ret != 0) {
3428 dev_err(adap->pdev_dev,
3429 "failed to install firmware: %d\n", ret);
3430 goto bye;
3431 }
3432
3433 /* Installed successfully, update the cached header too. */
e3d50738 3434 *card_fw = *fs_fw;
16e47624
HS
3435 card_fw_usable = 1;
3436 *reset = 0; /* already reset as part of load_fw */
3437 }
3438
3439 if (!card_fw_usable) {
3440 uint32_t d, c, k;
3441
3442 d = be32_to_cpu(drv_fw->fw_ver);
3443 c = be32_to_cpu(card_fw->fw_ver);
3444 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3445
3446 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3447 "chip state %d, "
3448 "driver compiled with %d.%d.%d.%d, "
3449 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3450 state,
b2e1a3f0
HS
3451 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3452 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3453 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3454 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3455 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3456 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
16e47624
HS
3457 ret = EINVAL;
3458 goto bye;
3459 }
3460
3461 /* We're using whatever's on the card and it's known to be good. */
3462 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3463 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3464
3465bye:
3466 return ret;
3467}
3468
56d36be4
DM
3469/**
3470 * t4_flash_erase_sectors - erase a range of flash sectors
3471 * @adapter: the adapter
3472 * @start: the first sector to erase
3473 * @end: the last sector to erase
3474 *
3475 * Erases the sectors in the given inclusive range.
3476 */
3477static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3478{
3479 int ret = 0;
3480
c0d5b8cf
HS
3481 if (end >= adapter->params.sf_nsec)
3482 return -EINVAL;
3483
56d36be4
DM
3484 while (start <= end) {
3485 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3486 (ret = sf1_write(adapter, 4, 0, 1,
3487 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 3488 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
3489 dev_err(adapter->pdev_dev,
3490 "erase of flash sector %d failed, error %d\n",
3491 start, ret);
3492 break;
3493 }
3494 start++;
3495 }
0d804338 3496 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
3497 return ret;
3498}
3499
636f9d37
VP
3500/**
3501 * t4_flash_cfg_addr - return the address of the flash configuration file
3502 * @adapter: the adapter
3503 *
3504 * Return the address within the flash where the Firmware Configuration
3505 * File is stored.
3506 */
3507unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3508{
3509 if (adapter->params.sf_size == 0x100000)
3510 return FLASH_FPGA_CFG_START;
3511 else
3512 return FLASH_CFG_START;
3513}
3514
79af221d
HS
3515/* Return TRUE if the specified firmware matches the adapter. I.e. T4
3516 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3517 * and emit an error message for mismatched firmware to save our caller the
3518 * effort ...
3519 */
3520static bool t4_fw_matches_chip(const struct adapter *adap,
3521 const struct fw_hdr *hdr)
3522{
3523 /* The expression below will return FALSE for any unsupported adapter
3524 * which will keep us "honest" in the future ...
3525 */
3526 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3ccc6cf7
HS
3527 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3528 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
79af221d
HS
3529 return true;
3530
3531 dev_err(adap->pdev_dev,
3532 "FW image (%d) is not suitable for this adapter (%d)\n",
3533 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3534 return false;
3535}
3536
56d36be4
DM
3537/**
3538 * t4_load_fw - download firmware
3539 * @adap: the adapter
3540 * @fw_data: the firmware image to write
3541 * @size: image size
3542 *
3543 * Write the supplied firmware image to the card's serial flash.
3544 */
3545int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3546{
3547 u32 csum;
3548 int ret, addr;
3549 unsigned int i;
3550 u8 first_page[SF_PAGE_SIZE];
404d9e3f 3551 const __be32 *p = (const __be32 *)fw_data;
56d36be4 3552 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596 3553 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
15962a18
AV
3554 unsigned int fw_start_sec = FLASH_FW_START_SEC;
3555 unsigned int fw_size = FLASH_FW_MAX_SIZE;
3556 unsigned int fw_start = FLASH_FW_START;
56d36be4
DM
3557
3558 if (!size) {
3559 dev_err(adap->pdev_dev, "FW image has no data\n");
3560 return -EINVAL;
3561 }
3562 if (size & 511) {
3563 dev_err(adap->pdev_dev,
3564 "FW image size not multiple of 512 bytes\n");
3565 return -EINVAL;
3566 }
f404f80c 3567 if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
56d36be4
DM
3568 dev_err(adap->pdev_dev,
3569 "FW image size differs from size in FW header\n");
3570 return -EINVAL;
3571 }
15962a18 3572 if (size > fw_size) {
56d36be4 3573 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
15962a18 3574 fw_size);
56d36be4
DM
3575 return -EFBIG;
3576 }
79af221d
HS
3577 if (!t4_fw_matches_chip(adap, hdr))
3578 return -EINVAL;
56d36be4
DM
3579
3580 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
f404f80c 3581 csum += be32_to_cpu(p[i]);
56d36be4
DM
3582
3583 if (csum != 0xffffffff) {
3584 dev_err(adap->pdev_dev,
3585 "corrupted firmware image, checksum %#x\n", csum);
3586 return -EINVAL;
3587 }
3588
900a6596
DM
3589 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3590 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
3591 if (ret)
3592 goto out;
3593
3594 /*
3595 * We write the correct version at the end so the driver can see a bad
3596 * version if the FW write fails. Start by writing a copy of the
3597 * first page with a bad version.
3598 */
3599 memcpy(first_page, fw_data, SF_PAGE_SIZE);
f404f80c 3600 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
15962a18 3601 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
3602 if (ret)
3603 goto out;
3604
15962a18 3605 addr = fw_start;
56d36be4
DM
3606 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3607 addr += SF_PAGE_SIZE;
3608 fw_data += SF_PAGE_SIZE;
3609 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
3610 if (ret)
3611 goto out;
3612 }
3613
3614 ret = t4_write_flash(adap,
15962a18 3615 fw_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
3616 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3617out:
3618 if (ret)
3619 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3620 ret);
dff04bce
HS
3621 else
3622 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
56d36be4
DM
3623 return ret;
3624}
3625
01b69614
HS
3626/**
3627 * t4_phy_fw_ver - return current PHY firmware version
3628 * @adap: the adapter
3629 * @phy_fw_ver: return value buffer for PHY firmware version
3630 *
3631 * Returns the current version of external PHY firmware on the
3632 * adapter.
3633 */
3634int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3635{
3636 u32 param, val;
3637 int ret;
3638
3639 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3640 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3641 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3642 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
b2612722 3643 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
01b69614
HS
3644 &param, &val);
3645 if (ret < 0)
3646 return ret;
3647 *phy_fw_ver = val;
3648 return 0;
3649}
3650
3651/**
3652 * t4_load_phy_fw - download port PHY firmware
3653 * @adap: the adapter
3654 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
3655 * @win_lock: the lock to use to guard the memory copy
3656 * @phy_fw_version: function to check PHY firmware versions
3657 * @phy_fw_data: the PHY firmware image to write
3658 * @phy_fw_size: image size
3659 *
3660 * Transfer the specified PHY firmware to the adapter. If a non-NULL
3661 * @phy_fw_version is supplied, then it will be used to determine if
3662 * it's necessary to perform the transfer by comparing the version
3663 * of any existing adapter PHY firmware with that of the passed in
3664 * PHY firmware image. If @win_lock is non-NULL then it will be used
3665 * around the call to t4_memory_rw() which transfers the PHY firmware
3666 * to the adapter.
3667 *
3668 * A negative error number will be returned if an error occurs. If
3669 * version number support is available and there's no need to upgrade
3670 * the firmware, 0 will be returned. If firmware is successfully
3671 * transferred to the adapter, 1 will be retured.
3672 *
3673 * NOTE: some adapters only have local RAM to store the PHY firmware. As
3674 * a result, a RESET of the adapter would cause that RAM to lose its
3675 * contents. Thus, loading PHY firmware on such adapters must happen
3676 * after any FW_RESET_CMDs ...
3677 */
3678int t4_load_phy_fw(struct adapter *adap,
3679 int win, spinlock_t *win_lock,
3680 int (*phy_fw_version)(const u8 *, size_t),
3681 const u8 *phy_fw_data, size_t phy_fw_size)
3682{
3683 unsigned long mtype = 0, maddr = 0;
3684 u32 param, val;
3685 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3686 int ret;
3687
3688 /* If we have version number support, then check to see if the adapter
3689 * already has up-to-date PHY firmware loaded.
3690 */
3691 if (phy_fw_version) {
3692 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3693 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3694 if (ret < 0)
3695 return ret;
3696
3697 if (cur_phy_fw_ver >= new_phy_fw_vers) {
3698 CH_WARN(adap, "PHY Firmware already up-to-date, "
3699 "version %#x\n", cur_phy_fw_ver);
3700 return 0;
3701 }
3702 }
3703
3704 /* Ask the firmware where it wants us to copy the PHY firmware image.
3705 * The size of the file requires a special version of the READ coommand
3706 * which will pass the file size via the values field in PARAMS_CMD and
3707 * retrieve the return value from firmware and place it in the same
3708 * buffer values
3709 */
3710 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3711 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3712 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3713 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3714 val = phy_fw_size;
b2612722 3715 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
8f46d467 3716 &param, &val, 1, true);
01b69614
HS
3717 if (ret < 0)
3718 return ret;
3719 mtype = val >> 8;
3720 maddr = (val & 0xff) << 16;
3721
3722 /* Copy the supplied PHY Firmware image to the adapter memory location
3723 * allocated by the adapter firmware.
3724 */
3725 if (win_lock)
3726 spin_lock_bh(win_lock);
3727 ret = t4_memory_rw(adap, win, mtype, maddr,
3728 phy_fw_size, (__be32 *)phy_fw_data,
3729 T4_MEMORY_WRITE);
3730 if (win_lock)
3731 spin_unlock_bh(win_lock);
3732 if (ret)
3733 return ret;
3734
3735 /* Tell the firmware that the PHY firmware image has been written to
3736 * RAM and it can now start copying it over to the PHYs. The chip
3737 * firmware will RESET the affected PHYs as part of this operation
3738 * leaving them running the new PHY firmware image.
3739 */
3740 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3741 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3742 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3743 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
b2612722 3744 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
01b69614
HS
3745 &param, &val, 30000);
3746
3747 /* If we have version number support, then check to see that the new
3748 * firmware got loaded properly.
3749 */
3750 if (phy_fw_version) {
3751 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3752 if (ret < 0)
3753 return ret;
3754
3755 if (cur_phy_fw_ver != new_phy_fw_vers) {
3756 CH_WARN(adap, "PHY Firmware did not update: "
3757 "version on adapter %#x, "
3758 "version flashed %#x\n",
3759 cur_phy_fw_ver, new_phy_fw_vers);
3760 return -ENXIO;
3761 }
3762 }
3763
3764 return 1;
3765}
3766
49216c1c
HS
3767/**
3768 * t4_fwcache - firmware cache operation
3769 * @adap: the adapter
3770 * @op : the operation (flush or flush and invalidate)
3771 */
3772int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3773{
3774 struct fw_params_cmd c;
3775
3776 memset(&c, 0, sizeof(c));
3777 c.op_to_vfn =
3778 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3779 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
b2612722 3780 FW_PARAMS_CMD_PFN_V(adap->pf) |
49216c1c
HS
3781 FW_PARAMS_CMD_VFN_V(0));
3782 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3783 c.param[0].mnem =
3784 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3785 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3786 c.param[0].val = (__force __be32)op;
3787
3788 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3789}
3790
19689609
HS
3791void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3792 unsigned int *pif_req_wrptr,
3793 unsigned int *pif_rsp_wrptr)
3794{
3795 int i, j;
3796 u32 cfg, val, req, rsp;
3797
3798 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3799 if (cfg & LADBGEN_F)
3800 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3801
3802 val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3803 req = POLADBGWRPTR_G(val);
3804 rsp = PILADBGWRPTR_G(val);
3805 if (pif_req_wrptr)
3806 *pif_req_wrptr = req;
3807 if (pif_rsp_wrptr)
3808 *pif_rsp_wrptr = rsp;
3809
3810 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3811 for (j = 0; j < 6; j++) {
3812 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3813 PILADBGRDPTR_V(rsp));
3814 *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3815 *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3816 req++;
3817 rsp++;
3818 }
3819 req = (req + 2) & POLADBGRDPTR_M;
3820 rsp = (rsp + 2) & PILADBGRDPTR_M;
3821 }
3822 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3823}
3824
26fae93f
HS
3825void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3826{
3827 u32 cfg;
3828 int i, j, idx;
3829
3830 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3831 if (cfg & LADBGEN_F)
3832 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3833
3834 for (i = 0; i < CIM_MALA_SIZE; i++) {
3835 for (j = 0; j < 5; j++) {
3836 idx = 8 * i + j;
3837 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3838 PILADBGRDPTR_V(idx));
3839 *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3840 *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3841 }
3842 }
3843 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3844}
3845
797ff0f5
HS
3846void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3847{
3848 unsigned int i, j;
3849
3850 for (i = 0; i < 8; i++) {
3851 u32 *p = la_buf + i;
3852
3853 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3854 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3855 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3856 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3857 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3858 }
3859}
3860
c3168cab
GG
3861#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
3862 FW_PORT_CAP32_ANEG)
3863
3864/**
3865 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3866 * @caps16: a 16-bit Port Capabilities value
3867 *
3868 * Returns the equivalent 32-bit Port Capabilities value.
3869 */
3870static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
3871{
3872 fw_port_cap32_t caps32 = 0;
3873
3874 #define CAP16_TO_CAP32(__cap) \
3875 do { \
3876 if (caps16 & FW_PORT_CAP_##__cap) \
3877 caps32 |= FW_PORT_CAP32_##__cap; \
3878 } while (0)
3879
3880 CAP16_TO_CAP32(SPEED_100M);
3881 CAP16_TO_CAP32(SPEED_1G);
3882 CAP16_TO_CAP32(SPEED_25G);
3883 CAP16_TO_CAP32(SPEED_10G);
3884 CAP16_TO_CAP32(SPEED_40G);
3885 CAP16_TO_CAP32(SPEED_100G);
3886 CAP16_TO_CAP32(FC_RX);
3887 CAP16_TO_CAP32(FC_TX);
3888 CAP16_TO_CAP32(ANEG);
3889 CAP16_TO_CAP32(MDIX);
3890 CAP16_TO_CAP32(MDIAUTO);
3891 CAP16_TO_CAP32(FEC_RS);
3892 CAP16_TO_CAP32(FEC_BASER_RS);
3893 CAP16_TO_CAP32(802_3_PAUSE);
3894 CAP16_TO_CAP32(802_3_ASM_DIR);
3895
3896 #undef CAP16_TO_CAP32
3897
3898 return caps32;
3899}
3900
3901/**
3902 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3903 * @caps32: a 32-bit Port Capabilities value
3904 *
3905 * Returns the equivalent 16-bit Port Capabilities value. Note that
3906 * not all 32-bit Port Capabilities can be represented in the 16-bit
3907 * Port Capabilities and some fields/values may not make it.
3908 */
3909static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
3910{
3911 fw_port_cap16_t caps16 = 0;
3912
3913 #define CAP32_TO_CAP16(__cap) \
3914 do { \
3915 if (caps32 & FW_PORT_CAP32_##__cap) \
3916 caps16 |= FW_PORT_CAP_##__cap; \
3917 } while (0)
3918
3919 CAP32_TO_CAP16(SPEED_100M);
3920 CAP32_TO_CAP16(SPEED_1G);
3921 CAP32_TO_CAP16(SPEED_10G);
3922 CAP32_TO_CAP16(SPEED_25G);
3923 CAP32_TO_CAP16(SPEED_40G);
3924 CAP32_TO_CAP16(SPEED_100G);
3925 CAP32_TO_CAP16(FC_RX);
3926 CAP32_TO_CAP16(FC_TX);
3927 CAP32_TO_CAP16(802_3_PAUSE);
3928 CAP32_TO_CAP16(802_3_ASM_DIR);
3929 CAP32_TO_CAP16(ANEG);
3930 CAP32_TO_CAP16(MDIX);
3931 CAP32_TO_CAP16(MDIAUTO);
3932 CAP32_TO_CAP16(FEC_RS);
3933 CAP32_TO_CAP16(FEC_BASER_RS);
3934
3935 #undef CAP32_TO_CAP16
3936
3937 return caps16;
3938}
56d36be4 3939
158a5c0a 3940/* Translate Firmware Port Capabilities Pause specification to Common Code */
c3168cab 3941static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
158a5c0a 3942{
c3168cab 3943 enum cc_pause cc_pause = 0;
158a5c0a 3944
c3168cab 3945 if (fw_pause & FW_PORT_CAP32_FC_RX)
158a5c0a 3946 cc_pause |= PAUSE_RX;
c3168cab 3947 if (fw_pause & FW_PORT_CAP32_FC_TX)
158a5c0a
CL
3948 cc_pause |= PAUSE_TX;
3949
3950 return cc_pause;
3951}
3952
3953/* Translate Common Code Pause specification into Firmware Port Capabilities */
c3168cab 3954static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
158a5c0a 3955{
c3168cab 3956 fw_port_cap32_t fw_pause = 0;
158a5c0a
CL
3957
3958 if (cc_pause & PAUSE_RX)
c3168cab 3959 fw_pause |= FW_PORT_CAP32_FC_RX;
158a5c0a 3960 if (cc_pause & PAUSE_TX)
c3168cab 3961 fw_pause |= FW_PORT_CAP32_FC_TX;
158a5c0a
CL
3962
3963 return fw_pause;
3964}
3965
3966/* Translate Firmware Forward Error Correction specification to Common Code */
c3168cab 3967static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
158a5c0a 3968{
c3168cab 3969 enum cc_fec cc_fec = 0;
158a5c0a 3970
c3168cab 3971 if (fw_fec & FW_PORT_CAP32_FEC_RS)
158a5c0a 3972 cc_fec |= FEC_RS;
c3168cab 3973 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
158a5c0a
CL
3974 cc_fec |= FEC_BASER_RS;
3975
3976 return cc_fec;
3977}
3978
3979/* Translate Common Code Forward Error Correction specification to Firmware */
c3168cab 3980static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
158a5c0a 3981{
c3168cab 3982 fw_port_cap32_t fw_fec = 0;
158a5c0a
CL
3983
3984 if (cc_fec & FEC_RS)
c3168cab 3985 fw_fec |= FW_PORT_CAP32_FEC_RS;
158a5c0a 3986 if (cc_fec & FEC_BASER_RS)
c3168cab 3987 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
158a5c0a
CL
3988
3989 return fw_fec;
3990}
3991
56d36be4 3992/**
4036da90 3993 * t4_link_l1cfg - apply link configuration to MAC/PHY
158a5c0a
CL
3994 * @adapter: the adapter
3995 * @mbox: the Firmware Mailbox to use
3996 * @port: the Port ID
3997 * @lc: the Port's Link Configuration
56d36be4
DM
3998 *
3999 * Set up a port's MAC and PHY according to a desired link configuration.
4000 * - If the PHY can auto-negotiate first decide what to advertise, then
4001 * enable/disable auto-negotiation as desired, and reset.
4002 * - If the PHY does not auto-negotiate just reset it.
4003 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4004 * otherwise do it later based on the outcome of auto-negotiation.
4005 */
c3168cab
GG
4006int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
4007 unsigned int port, struct link_config *lc)
56d36be4 4008{
c3168cab
GG
4009 unsigned int fw_caps = adapter->params.fw_caps_support;
4010 struct fw_port_cmd cmd;
4011 unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO);
4012 fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
56d36be4
DM
4013
4014 lc->link_ok = 0;
56d36be4 4015
158a5c0a
CL
4016 /* Convert driver coding of Pause Frame Flow Control settings into the
4017 * Firmware's API.
4018 */
4019 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4020
4021 /* Convert Common Code Forward Error Control settings into the
4022 * Firmware's API. If the current Requested FEC has "Automatic"
4023 * (IEEE 802.3) specified, then we use whatever the Firmware
4024 * sent us as part of it's IEEE 802.3-based interpratation of
4025 * the Transceiver Module EPROM FEC parameters. Otherwise we
4026 * use whatever is in the current Requested FEC settings.
4027 */
4028 if (lc->requested_fec & FEC_AUTO)
c3168cab 4029 cc_fec = fwcap_to_cc_fec(lc->def_acaps);
158a5c0a
CL
4030 else
4031 cc_fec = lc->requested_fec;
4032 fw_fec = cc_to_fwcap_fec(cc_fec);
3bb4858f 4033
158a5c0a
CL
4034 /* Figure out what our Requested Port Capabilities are going to be.
4035 */
c3168cab
GG
4036 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4037 rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
4038 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
158a5c0a
CL
4039 lc->fec = cc_fec;
4040 } else if (lc->autoneg == AUTONEG_DISABLE) {
c3168cab
GG
4041 rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4042 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
158a5c0a
CL
4043 lc->fec = cc_fec;
4044 } else {
c3168cab 4045 rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
158a5c0a 4046 }
3bb4858f 4047
158a5c0a
CL
4048 /* And send that on to the Firmware ...
4049 */
c3168cab
GG
4050 memset(&cmd, 0, sizeof(cmd));
4051 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4052 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4053 FW_PORT_CMD_PORTID_V(port));
4054 cmd.action_to_len16 =
4055 cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4056 ? FW_PORT_ACTION_L1_CFG
4057 : FW_PORT_ACTION_L1_CFG32) |
4058 FW_LEN16(cmd));
4059 if (fw_caps == FW_CAPS16)
4060 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4061 else
4062 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4063 return t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
56d36be4
DM
4064}
4065
4066/**
4067 * t4_restart_aneg - restart autonegotiation
4068 * @adap: the adapter
4069 * @mbox: mbox to use for the FW command
4070 * @port: the port id
4071 *
4072 * Restarts autonegotiation for the selected port.
4073 */
4074int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4075{
4076 struct fw_port_cmd c;
4077
4078 memset(&c, 0, sizeof(c));
f404f80c
HS
4079 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4080 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4081 FW_PORT_CMD_PORTID_V(port));
4082 c.action_to_len16 =
4083 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
4084 FW_LEN16(c));
c3168cab 4085 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG);
56d36be4
DM
4086 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4087}
4088
8caa1e84
VP
4089typedef void (*int_handler_t)(struct adapter *adap);
4090
56d36be4
DM
4091struct intr_info {
4092 unsigned int mask; /* bits to check in interrupt status */
4093 const char *msg; /* message to print or NULL */
4094 short stat_idx; /* stat counter to increment or -1 */
4095 unsigned short fatal; /* whether the condition reported is fatal */
8caa1e84 4096 int_handler_t int_handler; /* platform-specific int handler */
56d36be4
DM
4097};
4098
4099/**
4100 * t4_handle_intr_status - table driven interrupt handler
4101 * @adapter: the adapter that generated the interrupt
4102 * @reg: the interrupt status register to process
4103 * @acts: table of interrupt actions
4104 *
4105 * A table driven interrupt handler that applies a set of masks to an
4106 * interrupt status word and performs the corresponding actions if the
25985edc 4107 * interrupts described by the mask have occurred. The actions include
56d36be4
DM
4108 * optionally emitting a warning or alert message. The table is terminated
4109 * by an entry specifying mask 0. Returns the number of fatal interrupt
4110 * conditions.
4111 */
4112static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4113 const struct intr_info *acts)
4114{
4115 int fatal = 0;
4116 unsigned int mask = 0;
4117 unsigned int status = t4_read_reg(adapter, reg);
4118
4119 for ( ; acts->mask; ++acts) {
4120 if (!(status & acts->mask))
4121 continue;
4122 if (acts->fatal) {
4123 fatal++;
4124 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4125 status & acts->mask);
4126 } else if (acts->msg && printk_ratelimit())
4127 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4128 status & acts->mask);
8caa1e84
VP
4129 if (acts->int_handler)
4130 acts->int_handler(adapter);
56d36be4
DM
4131 mask |= acts->mask;
4132 }
4133 status &= mask;
4134 if (status) /* clear processed interrupts */
4135 t4_write_reg(adapter, reg, status);
4136 return fatal;
4137}
4138
4139/*
4140 * Interrupt handler for the PCIE module.
4141 */
4142static void pcie_intr_handler(struct adapter *adapter)
4143{
005b5717 4144 static const struct intr_info sysbus_intr_info[] = {
f061de42
HS
4145 { RNPP_F, "RXNP array parity error", -1, 1 },
4146 { RPCP_F, "RXPC array parity error", -1, 1 },
4147 { RCIP_F, "RXCIF array parity error", -1, 1 },
4148 { RCCP_F, "Rx completions control array parity error", -1, 1 },
4149 { RFTP_F, "RXFT array parity error", -1, 1 },
56d36be4
DM
4150 { 0 }
4151 };
005b5717 4152 static const struct intr_info pcie_port_intr_info[] = {
f061de42
HS
4153 { TPCP_F, "TXPC array parity error", -1, 1 },
4154 { TNPP_F, "TXNP array parity error", -1, 1 },
4155 { TFTP_F, "TXFT array parity error", -1, 1 },
4156 { TCAP_F, "TXCA array parity error", -1, 1 },
4157 { TCIP_F, "TXCIF array parity error", -1, 1 },
4158 { RCAP_F, "RXCA array parity error", -1, 1 },
4159 { OTDD_F, "outbound request TLP discarded", -1, 1 },
4160 { RDPE_F, "Rx data parity error", -1, 1 },
4161 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
56d36be4
DM
4162 { 0 }
4163 };
005b5717 4164 static const struct intr_info pcie_intr_info[] = {
f061de42
HS
4165 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
4166 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
4167 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
4168 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4169 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4170 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4171 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4172 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
4173 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
4174 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4175 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
4176 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4177 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4178 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
4179 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4180 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4181 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
4182 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4183 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4184 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4185 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4186 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
4187 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
4188 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4189 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
4190 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
4191 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
4192 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
4193 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
4194 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
4195 -1, 0 },
56d36be4
DM
4196 { 0 }
4197 };
4198
0a57a536 4199 static struct intr_info t5_pcie_intr_info[] = {
f061de42 4200 { MSTGRPPERR_F, "Master Response Read Queue parity error",
0a57a536 4201 -1, 1 },
f061de42
HS
4202 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
4203 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
4204 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4205 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4206 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4207 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4208 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
0a57a536 4209 -1, 1 },
f061de42 4210 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
0a57a536 4211 -1, 1 },
f061de42
HS
4212 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4213 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
4214 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4215 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4216 { DREQWRPERR_F, "PCI DMA channel write request parity error",
0a57a536 4217 -1, 1 },
f061de42
HS
4218 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4219 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4220 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
4221 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4222 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4223 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4224 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4225 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
4226 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
4227 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4228 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
0a57a536 4229 -1, 1 },
f061de42
HS
4230 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
4231 -1, 1 },
4232 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
4233 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
4234 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4235 { READRSPERR_F, "Outbound read error", -1, 0 },
0a57a536
SR
4236 { 0 }
4237 };
4238
56d36be4
DM
4239 int fat;
4240
9bb59b96
HS
4241 if (is_t4(adapter->params.chip))
4242 fat = t4_handle_intr_status(adapter,
f061de42
HS
4243 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
4244 sysbus_intr_info) +
9bb59b96 4245 t4_handle_intr_status(adapter,
f061de42
HS
4246 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
4247 pcie_port_intr_info) +
4248 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
9bb59b96
HS
4249 pcie_intr_info);
4250 else
f061de42 4251 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
9bb59b96 4252 t5_pcie_intr_info);
0a57a536 4253
56d36be4
DM
4254 if (fat)
4255 t4_fatal_err(adapter);
4256}
4257
4258/*
4259 * TP interrupt handler.
4260 */
4261static void tp_intr_handler(struct adapter *adapter)
4262{
005b5717 4263 static const struct intr_info tp_intr_info[] = {
56d36be4 4264 { 0x3fffffff, "TP parity error", -1, 1 },
837e4a42 4265 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
56d36be4
DM
4266 { 0 }
4267 };
4268
837e4a42 4269 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
56d36be4
DM
4270 t4_fatal_err(adapter);
4271}
4272
4273/*
4274 * SGE interrupt handler.
4275 */
4276static void sge_intr_handler(struct adapter *adapter)
4277{
4278 u64 v;
3ccc6cf7 4279 u32 err;
56d36be4 4280
005b5717 4281 static const struct intr_info sge_intr_info[] = {
f612b815 4282 { ERR_CPL_EXCEED_IQE_SIZE_F,
56d36be4 4283 "SGE received CPL exceeding IQE size", -1, 1 },
f612b815 4284 { ERR_INVALID_CIDX_INC_F,
56d36be4 4285 "SGE GTS CIDX increment too large", -1, 0 },
f612b815
HS
4286 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
4287 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
f612b815 4288 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
56d36be4 4289 "SGE IQID > 1023 received CPL for FL", -1, 0 },
f612b815 4290 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
56d36be4 4291 0 },
f612b815 4292 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
56d36be4 4293 0 },
f612b815 4294 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
56d36be4 4295 0 },
f612b815 4296 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
56d36be4 4297 0 },
f612b815 4298 { ERR_ING_CTXT_PRIO_F,
56d36be4 4299 "SGE too many priority ingress contexts", -1, 0 },
f612b815
HS
4300 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
4301 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
56d36be4
DM
4302 { 0 }
4303 };
4304
3ccc6cf7
HS
4305 static struct intr_info t4t5_sge_intr_info[] = {
4306 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
4307 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
4308 { ERR_EGR_CTXT_PRIO_F,
4309 "SGE too many priority egress contexts", -1, 0 },
4310 { 0 }
4311 };
4312
f612b815
HS
4313 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
4314 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
56d36be4
DM
4315 if (v) {
4316 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
8caa1e84 4317 (unsigned long long)v);
f612b815
HS
4318 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
4319 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
56d36be4
DM
4320 }
4321
3ccc6cf7
HS
4322 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4323 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4324 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4325 t4t5_sge_intr_info);
4326
4327 err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4328 if (err & ERROR_QID_VALID_F) {
4329 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4330 ERROR_QID_G(err));
4331 if (err & UNCAPTURED_ERROR_F)
4332 dev_err(adapter->pdev_dev,
4333 "SGE UNCAPTURED_ERROR set (clearing)\n");
4334 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4335 UNCAPTURED_ERROR_F);
4336 }
4337
4338 if (v != 0)
56d36be4
DM
4339 t4_fatal_err(adapter);
4340}
4341
89c3a86c
HS
4342#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4343 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4344#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4345 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4346
56d36be4
DM
4347/*
4348 * CIM interrupt handler.
4349 */
4350static void cim_intr_handler(struct adapter *adapter)
4351{
005b5717 4352 static const struct intr_info cim_intr_info[] = {
89c3a86c
HS
4353 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4354 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4355 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4356 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4357 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4358 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4359 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
d86cc04e 4360 { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
56d36be4
DM
4361 { 0 }
4362 };
005b5717 4363 static const struct intr_info cim_upintr_info[] = {
89c3a86c
HS
4364 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4365 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4366 { ILLWRINT_F, "CIM illegal write", -1, 1 },
4367 { ILLRDINT_F, "CIM illegal read", -1, 1 },
4368 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4369 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4370 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4371 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4372 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4373 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4374 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4375 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4376 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4377 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4378 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4379 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4380 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4381 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4382 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4383 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4384 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4385 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4386 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4387 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4388 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4389 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4390 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4391 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
56d36be4
DM
4392 { 0 }
4393 };
4394
d86cc04e 4395 u32 val, fw_err;
56d36be4
DM
4396 int fat;
4397
d86cc04e
RL
4398 fw_err = t4_read_reg(adapter, PCIE_FW_A);
4399 if (fw_err & PCIE_FW_ERR_F)
31d55c2d
HS
4400 t4_report_fw_error(adapter);
4401
d86cc04e
RL
4402 /* When the Firmware detects an internal error which normally
4403 * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4404 * in order to make sure the Host sees the Firmware Crash. So
4405 * if we have a Timer0 interrupt and don't see a Firmware Crash,
4406 * ignore the Timer0 interrupt.
4407 */
4408
4409 val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4410 if (val & TIMER0INT_F)
4411 if (!(fw_err & PCIE_FW_ERR_F) ||
4412 (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4413 t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4414 TIMER0INT_F);
4415
89c3a86c 4416 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
56d36be4 4417 cim_intr_info) +
89c3a86c 4418 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
56d36be4
DM
4419 cim_upintr_info);
4420 if (fat)
4421 t4_fatal_err(adapter);
4422}
4423
4424/*
4425 * ULP RX interrupt handler.
4426 */
4427static void ulprx_intr_handler(struct adapter *adapter)
4428{
005b5717 4429 static const struct intr_info ulprx_intr_info[] = {
91e9a1ec 4430 { 0x1800000, "ULPRX context error", -1, 1 },
56d36be4
DM
4431 { 0x7fffff, "ULPRX parity error", -1, 1 },
4432 { 0 }
4433 };
4434
0d804338 4435 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
56d36be4
DM
4436 t4_fatal_err(adapter);
4437}
4438
4439/*
4440 * ULP TX interrupt handler.
4441 */
4442static void ulptx_intr_handler(struct adapter *adapter)
4443{
005b5717 4444 static const struct intr_info ulptx_intr_info[] = {
837e4a42 4445 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
56d36be4 4446 0 },
837e4a42 4447 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
56d36be4 4448 0 },
837e4a42 4449 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
56d36be4 4450 0 },
837e4a42 4451 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
56d36be4
DM
4452 0 },
4453 { 0xfffffff, "ULPTX parity error", -1, 1 },
4454 { 0 }
4455 };
4456
837e4a42 4457 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
56d36be4
DM
4458 t4_fatal_err(adapter);
4459}
4460
4461/*
4462 * PM TX interrupt handler.
4463 */
4464static void pmtx_intr_handler(struct adapter *adapter)
4465{
005b5717 4466 static const struct intr_info pmtx_intr_info[] = {
837e4a42
HS
4467 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4468 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4469 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4470 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4471 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4472 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4473 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4474 -1, 1 },
4475 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4476 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
56d36be4
DM
4477 { 0 }
4478 };
4479
837e4a42 4480 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
56d36be4
DM
4481 t4_fatal_err(adapter);
4482}
4483
4484/*
4485 * PM RX interrupt handler.
4486 */
4487static void pmrx_intr_handler(struct adapter *adapter)
4488{
005b5717 4489 static const struct intr_info pmrx_intr_info[] = {
837e4a42
HS
4490 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4491 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4492 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4493 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4494 -1, 1 },
4495 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4496 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
56d36be4
DM
4497 { 0 }
4498 };
4499
837e4a42 4500 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
56d36be4
DM
4501 t4_fatal_err(adapter);
4502}
4503
4504/*
4505 * CPL switch interrupt handler.
4506 */
4507static void cplsw_intr_handler(struct adapter *adapter)
4508{
005b5717 4509 static const struct intr_info cplsw_intr_info[] = {
0d804338
HS
4510 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4511 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4512 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4513 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4514 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4515 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
56d36be4
DM
4516 { 0 }
4517 };
4518
0d804338 4519 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
56d36be4
DM
4520 t4_fatal_err(adapter);
4521}
4522
4523/*
4524 * LE interrupt handler.
4525 */
4526static void le_intr_handler(struct adapter *adap)
4527{
3ccc6cf7 4528 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
005b5717 4529 static const struct intr_info le_intr_info[] = {
0d804338
HS
4530 { LIPMISS_F, "LE LIP miss", -1, 0 },
4531 { LIP0_F, "LE 0 LIP error", -1, 0 },
4532 { PARITYERR_F, "LE parity error", -1, 1 },
4533 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4534 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
56d36be4
DM
4535 { 0 }
4536 };
4537
3ccc6cf7
HS
4538 static struct intr_info t6_le_intr_info[] = {
4539 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4540 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4541 { TCAMINTPERR_F, "LE parity error", -1, 1 },
4542 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4543 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4544 { 0 }
4545 };
4546
4547 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4548 (chip <= CHELSIO_T5) ?
4549 le_intr_info : t6_le_intr_info))
56d36be4
DM
4550 t4_fatal_err(adap);
4551}
4552
4553/*
4554 * MPS interrupt handler.
4555 */
4556static void mps_intr_handler(struct adapter *adapter)
4557{
005b5717 4558 static const struct intr_info mps_rx_intr_info[] = {
56d36be4
DM
4559 { 0xffffff, "MPS Rx parity error", -1, 1 },
4560 { 0 }
4561 };
005b5717 4562 static const struct intr_info mps_tx_intr_info[] = {
837e4a42
HS
4563 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4564 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4565 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4566 -1, 1 },
4567 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4568 -1, 1 },
4569 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
4570 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4571 { FRMERR_F, "MPS Tx framing error", -1, 1 },
56d36be4
DM
4572 { 0 }
4573 };
ef18e3b9
GG
4574 static const struct intr_info t6_mps_tx_intr_info[] = {
4575 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4576 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4577 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4578 -1, 1 },
4579 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4580 -1, 1 },
4581 /* MPS Tx Bubble is normal for T6 */
4582 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4583 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4584 { 0 }
4585 };
005b5717 4586 static const struct intr_info mps_trc_intr_info[] = {
837e4a42
HS
4587 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4588 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4589 -1, 1 },
4590 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
56d36be4
DM
4591 { 0 }
4592 };
005b5717 4593 static const struct intr_info mps_stat_sram_intr_info[] = {
56d36be4
DM
4594 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4595 { 0 }
4596 };
005b5717 4597 static const struct intr_info mps_stat_tx_intr_info[] = {
56d36be4
DM
4598 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4599 { 0 }
4600 };
005b5717 4601 static const struct intr_info mps_stat_rx_intr_info[] = {
56d36be4
DM
4602 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4603 { 0 }
4604 };
005b5717 4605 static const struct intr_info mps_cls_intr_info[] = {
837e4a42
HS
4606 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4607 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4608 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
56d36be4
DM
4609 { 0 }
4610 };
4611
4612 int fat;
4613
837e4a42 4614 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
56d36be4 4615 mps_rx_intr_info) +
837e4a42 4616 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
ef18e3b9
GG
4617 is_t6(adapter->params.chip)
4618 ? t6_mps_tx_intr_info
4619 : mps_tx_intr_info) +
837e4a42 4620 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
56d36be4 4621 mps_trc_intr_info) +
837e4a42 4622 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
56d36be4 4623 mps_stat_sram_intr_info) +
837e4a42 4624 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
56d36be4 4625 mps_stat_tx_intr_info) +
837e4a42 4626 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
56d36be4 4627 mps_stat_rx_intr_info) +
837e4a42 4628 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
56d36be4
DM
4629 mps_cls_intr_info);
4630
837e4a42
HS
4631 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4632 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
56d36be4
DM
4633 if (fat)
4634 t4_fatal_err(adapter);
4635}
4636
89c3a86c
HS
4637#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4638 ECC_UE_INT_CAUSE_F)
56d36be4
DM
4639
4640/*
4641 * EDC/MC interrupt handler.
4642 */
4643static void mem_intr_handler(struct adapter *adapter, int idx)
4644{
822dd8a8 4645 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
56d36be4
DM
4646
4647 unsigned int addr, cnt_addr, v;
4648
4649 if (idx <= MEM_EDC1) {
89c3a86c
HS
4650 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4651 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
822dd8a8
HS
4652 } else if (idx == MEM_MC) {
4653 if (is_t4(adapter->params.chip)) {
89c3a86c
HS
4654 addr = MC_INT_CAUSE_A;
4655 cnt_addr = MC_ECC_STATUS_A;
822dd8a8 4656 } else {
89c3a86c
HS
4657 addr = MC_P_INT_CAUSE_A;
4658 cnt_addr = MC_P_ECC_STATUS_A;
822dd8a8 4659 }
56d36be4 4660 } else {
89c3a86c
HS
4661 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4662 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
56d36be4
DM
4663 }
4664
4665 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
89c3a86c 4666 if (v & PERR_INT_CAUSE_F)
56d36be4
DM
4667 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4668 name[idx]);
89c3a86c
HS
4669 if (v & ECC_CE_INT_CAUSE_F) {
4670 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
56d36be4 4671
bf8ebb67
HS
4672 t4_edc_err_read(adapter, idx);
4673
89c3a86c 4674 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
56d36be4
DM
4675 if (printk_ratelimit())
4676 dev_warn(adapter->pdev_dev,
4677 "%u %s correctable ECC data error%s\n",
4678 cnt, name[idx], cnt > 1 ? "s" : "");
4679 }
89c3a86c 4680 if (v & ECC_UE_INT_CAUSE_F)
56d36be4
DM
4681 dev_alert(adapter->pdev_dev,
4682 "%s uncorrectable ECC data error\n", name[idx]);
4683
4684 t4_write_reg(adapter, addr, v);
89c3a86c 4685 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
56d36be4
DM
4686 t4_fatal_err(adapter);
4687}
4688
4689/*
4690 * MA interrupt handler.
4691 */
4692static void ma_intr_handler(struct adapter *adap)
4693{
89c3a86c 4694 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
56d36be4 4695
89c3a86c 4696 if (status & MEM_PERR_INT_CAUSE_F) {
56d36be4
DM
4697 dev_alert(adap->pdev_dev,
4698 "MA parity error, parity status %#x\n",
89c3a86c 4699 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
9bb59b96
HS
4700 if (is_t5(adap->params.chip))
4701 dev_alert(adap->pdev_dev,
4702 "MA parity error, parity status %#x\n",
4703 t4_read_reg(adap,
89c3a86c 4704 MA_PARITY_ERROR_STATUS2_A));
9bb59b96 4705 }
89c3a86c
HS
4706 if (status & MEM_WRAP_INT_CAUSE_F) {
4707 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
56d36be4
DM
4708 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4709 "client %u to address %#x\n",
89c3a86c
HS
4710 MEM_WRAP_CLIENT_NUM_G(v),
4711 MEM_WRAP_ADDRESS_G(v) << 4);
56d36be4 4712 }
89c3a86c 4713 t4_write_reg(adap, MA_INT_CAUSE_A, status);
56d36be4
DM
4714 t4_fatal_err(adap);
4715}
4716
4717/*
4718 * SMB interrupt handler.
4719 */
4720static void smb_intr_handler(struct adapter *adap)
4721{
005b5717 4722 static const struct intr_info smb_intr_info[] = {
0d804338
HS
4723 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4724 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4725 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
56d36be4
DM
4726 { 0 }
4727 };
4728
0d804338 4729 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
56d36be4
DM
4730 t4_fatal_err(adap);
4731}
4732
4733/*
4734 * NC-SI interrupt handler.
4735 */
4736static void ncsi_intr_handler(struct adapter *adap)
4737{
005b5717 4738 static const struct intr_info ncsi_intr_info[] = {
0d804338
HS
4739 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4740 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4741 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4742 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
56d36be4
DM
4743 { 0 }
4744 };
4745
0d804338 4746 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
56d36be4
DM
4747 t4_fatal_err(adap);
4748}
4749
4750/*
4751 * XGMAC interrupt handler.
4752 */
4753static void xgmac_intr_handler(struct adapter *adap, int port)
4754{
0a57a536
SR
4755 u32 v, int_cause_reg;
4756
d14807dd 4757 if (is_t4(adap->params.chip))
0d804338 4758 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
0a57a536 4759 else
0d804338 4760 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
0a57a536
SR
4761
4762 v = t4_read_reg(adap, int_cause_reg);
56d36be4 4763
0d804338 4764 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
56d36be4
DM
4765 if (!v)
4766 return;
4767
0d804338 4768 if (v & TXFIFO_PRTY_ERR_F)
56d36be4
DM
4769 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4770 port);
0d804338 4771 if (v & RXFIFO_PRTY_ERR_F)
56d36be4
DM
4772 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4773 port);
0d804338 4774 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
56d36be4
DM
4775 t4_fatal_err(adap);
4776}
4777
4778/*
4779 * PL interrupt handler.
4780 */
4781static void pl_intr_handler(struct adapter *adap)
4782{
005b5717 4783 static const struct intr_info pl_intr_info[] = {
0d804338
HS
4784 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
4785 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
56d36be4
DM
4786 { 0 }
4787 };
4788
0d804338 4789 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
56d36be4
DM
4790 t4_fatal_err(adap);
4791}
4792
0d804338
HS
4793#define PF_INTR_MASK (PFSW_F)
4794#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
4795 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
38b6ec50 4796 CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
56d36be4
DM
4797
4798/**
4799 * t4_slow_intr_handler - control path interrupt handler
4800 * @adapter: the adapter
4801 *
4802 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4803 * The designation 'slow' is because it involves register reads, while
4804 * data interrupts typically don't involve any MMIOs.
4805 */
4806int t4_slow_intr_handler(struct adapter *adapter)
4807{
0d804338 4808 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
56d36be4
DM
4809
4810 if (!(cause & GLBL_INTR_MASK))
4811 return 0;
0d804338 4812 if (cause & CIM_F)
56d36be4 4813 cim_intr_handler(adapter);
0d804338 4814 if (cause & MPS_F)
56d36be4 4815 mps_intr_handler(adapter);
0d804338 4816 if (cause & NCSI_F)
56d36be4 4817 ncsi_intr_handler(adapter);
0d804338 4818 if (cause & PL_F)
56d36be4 4819 pl_intr_handler(adapter);
0d804338 4820 if (cause & SMB_F)
56d36be4 4821 smb_intr_handler(adapter);
0d804338 4822 if (cause & XGMAC0_F)
56d36be4 4823 xgmac_intr_handler(adapter, 0);
0d804338 4824 if (cause & XGMAC1_F)
56d36be4 4825 xgmac_intr_handler(adapter, 1);
0d804338 4826 if (cause & XGMAC_KR0_F)
56d36be4 4827 xgmac_intr_handler(adapter, 2);
0d804338 4828 if (cause & XGMAC_KR1_F)
56d36be4 4829 xgmac_intr_handler(adapter, 3);
0d804338 4830 if (cause & PCIE_F)
56d36be4 4831 pcie_intr_handler(adapter);
0d804338 4832 if (cause & MC_F)
56d36be4 4833 mem_intr_handler(adapter, MEM_MC);
3ccc6cf7 4834 if (is_t5(adapter->params.chip) && (cause & MC1_F))
822dd8a8 4835 mem_intr_handler(adapter, MEM_MC1);
0d804338 4836 if (cause & EDC0_F)
56d36be4 4837 mem_intr_handler(adapter, MEM_EDC0);
0d804338 4838 if (cause & EDC1_F)
56d36be4 4839 mem_intr_handler(adapter, MEM_EDC1);
0d804338 4840 if (cause & LE_F)
56d36be4 4841 le_intr_handler(adapter);
0d804338 4842 if (cause & TP_F)
56d36be4 4843 tp_intr_handler(adapter);
0d804338 4844 if (cause & MA_F)
56d36be4 4845 ma_intr_handler(adapter);
0d804338 4846 if (cause & PM_TX_F)
56d36be4 4847 pmtx_intr_handler(adapter);
0d804338 4848 if (cause & PM_RX_F)
56d36be4 4849 pmrx_intr_handler(adapter);
0d804338 4850 if (cause & ULP_RX_F)
56d36be4 4851 ulprx_intr_handler(adapter);
0d804338 4852 if (cause & CPL_SWITCH_F)
56d36be4 4853 cplsw_intr_handler(adapter);
0d804338 4854 if (cause & SGE_F)
56d36be4 4855 sge_intr_handler(adapter);
0d804338 4856 if (cause & ULP_TX_F)
56d36be4
DM
4857 ulptx_intr_handler(adapter);
4858
4859 /* Clear the interrupts just processed for which we are the master. */
0d804338
HS
4860 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
4861 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
56d36be4
DM
4862 return 1;
4863}
4864
4865/**
4866 * t4_intr_enable - enable interrupts
4867 * @adapter: the adapter whose interrupts should be enabled
4868 *
4869 * Enable PF-specific interrupts for the calling function and the top-level
4870 * interrupt concentrator for global interrupts. Interrupts are already
4871 * enabled at each module, here we just enable the roots of the interrupt
4872 * hierarchies.
4873 *
4874 * Note: this function should be called only when the driver manages
4875 * non PF-specific interrupts from the various HW modules. Only one PCI
4876 * function at a time should be doing this.
4877 */
4878void t4_intr_enable(struct adapter *adapter)
4879{
3ccc6cf7 4880 u32 val = 0;
d86bd29e
HS
4881 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4882 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4883 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
56d36be4 4884
3ccc6cf7
HS
4885 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4886 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
f612b815
HS
4887 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
4888 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
3ccc6cf7 4889 ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
f612b815
HS
4890 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
4891 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
4892 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
3ccc6cf7 4893 DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
0d804338
HS
4894 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
4895 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
56d36be4
DM
4896}
4897
4898/**
4899 * t4_intr_disable - disable interrupts
4900 * @adapter: the adapter whose interrupts should be disabled
4901 *
4902 * Disable interrupts. We only disable the top-level interrupt
4903 * concentrators. The caller must be a PCI function managing global
4904 * interrupts.
4905 */
4906void t4_intr_disable(struct adapter *adapter)
4907{
025d0973
GP
4908 u32 whoami, pf;
4909
4910 if (pci_channel_offline(adapter->pdev))
4911 return;
4912
4913 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4914 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
d86bd29e 4915 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
56d36be4 4916
0d804338
HS
4917 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
4918 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
56d36be4
DM
4919}
4920
f988008a
GG
4921unsigned int t4_chip_rss_size(struct adapter *adap)
4922{
4923 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
4924 return RSS_NENTRIES;
4925 else
4926 return T6_RSS_NENTRIES;
4927}
4928
56d36be4
DM
4929/**
4930 * t4_config_rss_range - configure a portion of the RSS mapping table
4931 * @adapter: the adapter
4932 * @mbox: mbox to use for the FW command
4933 * @viid: virtual interface whose RSS subtable is to be written
4934 * @start: start entry in the table to write
4935 * @n: how many table entries to write
4936 * @rspq: values for the response queue lookup table
4937 * @nrspq: number of values in @rspq
4938 *
4939 * Programs the selected part of the VI's RSS mapping table with the
4940 * provided values. If @nrspq < @n the supplied values are used repeatedly
4941 * until the full table range is populated.
4942 *
4943 * The caller must ensure the values in @rspq are in the range allowed for
4944 * @viid.
4945 */
4946int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4947 int start, int n, const u16 *rspq, unsigned int nrspq)
4948{
4949 int ret;
4950 const u16 *rsp = rspq;
4951 const u16 *rsp_end = rspq + nrspq;
4952 struct fw_rss_ind_tbl_cmd cmd;
4953
4954 memset(&cmd, 0, sizeof(cmd));
f404f80c 4955 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
e2ac9628 4956 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
b2e1a3f0 4957 FW_RSS_IND_TBL_CMD_VIID_V(viid));
f404f80c 4958 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
56d36be4
DM
4959
4960 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
4961 while (n > 0) {
4962 int nq = min(n, 32);
4963 __be32 *qp = &cmd.iq0_to_iq2;
4964
f404f80c
HS
4965 cmd.niqid = cpu_to_be16(nq);
4966 cmd.startidx = cpu_to_be16(start);
56d36be4
DM
4967
4968 start += nq;
4969 n -= nq;
4970
4971 while (nq > 0) {
4972 unsigned int v;
4973
b2e1a3f0 4974 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
56d36be4
DM
4975 if (++rsp >= rsp_end)
4976 rsp = rspq;
b2e1a3f0 4977 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
56d36be4
DM
4978 if (++rsp >= rsp_end)
4979 rsp = rspq;
b2e1a3f0 4980 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
56d36be4
DM
4981 if (++rsp >= rsp_end)
4982 rsp = rspq;
4983
f404f80c 4984 *qp++ = cpu_to_be32(v);
56d36be4
DM
4985 nq -= 3;
4986 }
4987
4988 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4989 if (ret)
4990 return ret;
4991 }
4992 return 0;
4993}
4994
4995/**
4996 * t4_config_glbl_rss - configure the global RSS mode
4997 * @adapter: the adapter
4998 * @mbox: mbox to use for the FW command
4999 * @mode: global RSS mode
5000 * @flags: mode-specific flags
5001 *
5002 * Sets the global RSS mode.
5003 */
5004int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5005 unsigned int flags)
5006{
5007 struct fw_rss_glb_config_cmd c;
5008
5009 memset(&c, 0, sizeof(c));
f404f80c
HS
5010 c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
5011 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5012 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
56d36be4 5013 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
f404f80c
HS
5014 c.u.manual.mode_pkd =
5015 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
56d36be4
DM
5016 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5017 c.u.basicvirtual.mode_pkd =
f404f80c
HS
5018 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5019 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
56d36be4
DM
5020 } else
5021 return -EINVAL;
5022 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5023}
5024
c035e183
HS
5025/**
5026 * t4_config_vi_rss - configure per VI RSS settings
5027 * @adapter: the adapter
5028 * @mbox: mbox to use for the FW command
5029 * @viid: the VI id
5030 * @flags: RSS flags
5031 * @defq: id of the default RSS queue for the VI.
5032 *
5033 * Configures VI-specific RSS properties.
5034 */
5035int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5036 unsigned int flags, unsigned int defq)
5037{
5038 struct fw_rss_vi_config_cmd c;
5039
5040 memset(&c, 0, sizeof(c));
5041 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5042 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5043 FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
5044 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5045 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5046 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
5047 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5048}
5049
688ea5fe
HS
5050/* Read an RSS table row */
5051static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5052{
5053 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
5054 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5055 5, 0, val);
5056}
5057
5058/**
5059 * t4_read_rss - read the contents of the RSS mapping table
5060 * @adapter: the adapter
5061 * @map: holds the contents of the RSS mapping table
5062 *
5063 * Reads the contents of the RSS hash->queue mapping table.
5064 */
5065int t4_read_rss(struct adapter *adapter, u16 *map)
5066{
f988008a 5067 int i, ret, nentries;
688ea5fe 5068 u32 val;
688ea5fe 5069
f988008a
GG
5070 nentries = t4_chip_rss_size(adapter);
5071 for (i = 0; i < nentries / 2; ++i) {
688ea5fe
HS
5072 ret = rd_rss_row(adapter, i, &val);
5073 if (ret)
5074 return ret;
5075 *map++ = LKPTBLQUEUE0_G(val);
5076 *map++ = LKPTBLQUEUE1_G(val);
5077 }
5078 return 0;
5079}
5080
0b2c2a93
HS
5081static unsigned int t4_use_ldst(struct adapter *adap)
5082{
ebb5568f 5083 return (adap->flags & FW_OK) && !adap->use_bd;
0b2c2a93
HS
5084}
5085
c1e9af0c 5086/**
5ccf9d04
RL
5087 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5088 * @adap: the adapter
5089 * @cmd: TP fw ldst address space type
5090 * @vals: where the indirect register values are stored/written
5091 * @nregs: how many indirect registers to read/write
5092 * @start_idx: index of first indirect register to read/write
5093 * @rw: Read (1) or Write (0)
5094 * @sleep_ok: if true we may sleep while awaiting command completion
c1e9af0c 5095 *
5ccf9d04 5096 * Access TP indirect registers through LDST
c1e9af0c 5097 */
5ccf9d04
RL
5098static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5099 unsigned int nregs, unsigned int start_index,
5100 unsigned int rw, bool sleep_ok)
c1e9af0c 5101{
5ccf9d04
RL
5102 int ret = 0;
5103 unsigned int i;
c1e9af0c
HS
5104 struct fw_ldst_cmd c;
5105
5ccf9d04 5106 for (i = 0; i < nregs; i++) {
c1e9af0c
HS
5107 memset(&c, 0, sizeof(c));
5108 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5109 FW_CMD_REQUEST_F |
5110 (rw ? FW_CMD_READ_F :
5111 FW_CMD_WRITE_F) |
5112 FW_LDST_CMD_ADDRSPACE_V(cmd));
5113 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5114
5115 c.u.addrval.addr = cpu_to_be32(start_index + i);
5116 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5ccf9d04
RL
5117 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5118 sleep_ok);
5119 if (ret)
5120 return ret;
5121
5122 if (rw)
c1e9af0c
HS
5123 vals[i] = be32_to_cpu(c.u.addrval.val);
5124 }
5ccf9d04
RL
5125 return 0;
5126}
5127
5128/**
5129 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5130 * @adap: the adapter
5131 * @reg_addr: Address Register
5132 * @reg_data: Data register
5133 * @buff: where the indirect register values are stored/written
5134 * @nregs: how many indirect registers to read/write
5135 * @start_index: index of first indirect register to read/write
5136 * @rw: READ(1) or WRITE(0)
5137 * @sleep_ok: if true we may sleep while awaiting command completion
5138 *
5139 * Read/Write TP indirect registers through LDST if possible.
5140 * Else, use backdoor access
5141 **/
5142static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5143 u32 *buff, u32 nregs, u32 start_index, int rw,
5144 bool sleep_ok)
5145{
5146 int rc = -EINVAL;
5147 int cmd;
5148
5149 switch (reg_addr) {
5150 case TP_PIO_ADDR_A:
5151 cmd = FW_LDST_ADDRSPC_TP_PIO;
5152 break;
4359cf33
RL
5153 case TP_TM_PIO_ADDR_A:
5154 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5155 break;
5ccf9d04
RL
5156 case TP_MIB_INDEX_A:
5157 cmd = FW_LDST_ADDRSPC_TP_MIB;
5158 break;
5159 default:
5160 goto indirect_access;
5161 }
5162
5163 if (t4_use_ldst(adap))
5164 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5165 sleep_ok);
5166
5167indirect_access:
5168
5169 if (rc) {
5170 if (rw)
5171 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5172 start_index);
5173 else
5174 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5175 start_index);
5176 }
5177}
5178
5179/**
5180 * t4_tp_pio_read - Read TP PIO registers
5181 * @adap: the adapter
5182 * @buff: where the indirect register values are written
5183 * @nregs: how many indirect registers to read
5184 * @start_index: index of first indirect register to read
5185 * @sleep_ok: if true we may sleep while awaiting command completion
5186 *
5187 * Read TP PIO Registers
5188 **/
5189void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5190 u32 start_index, bool sleep_ok)
5191{
5192 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5193 start_index, 1, sleep_ok);
5194}
5195
5196/**
5197 * t4_tp_pio_write - Write TP PIO registers
5198 * @adap: the adapter
5199 * @buff: where the indirect register values are stored
5200 * @nregs: how many indirect registers to write
5201 * @start_index: index of first indirect register to write
5202 * @sleep_ok: if true we may sleep while awaiting command completion
5203 *
5204 * Write TP PIO Registers
5205 **/
5206static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5207 u32 start_index, bool sleep_ok)
5208{
5209 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5210 start_index, 0, sleep_ok);
5211}
5212
4359cf33
RL
5213/**
5214 * t4_tp_tm_pio_read - Read TP TM PIO registers
5215 * @adap: the adapter
5216 * @buff: where the indirect register values are written
5217 * @nregs: how many indirect registers to read
5218 * @start_index: index of first indirect register to read
5219 * @sleep_ok: if true we may sleep while awaiting command completion
5220 *
5221 * Read TP TM PIO Registers
5222 **/
5223void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5224 u32 start_index, bool sleep_ok)
5225{
5226 t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
5227 nregs, start_index, 1, sleep_ok);
5228}
5229
5ccf9d04
RL
5230/**
5231 * t4_tp_mib_read - Read TP MIB registers
5232 * @adap: the adapter
5233 * @buff: where the indirect register values are written
5234 * @nregs: how many indirect registers to read
5235 * @start_index: index of first indirect register to read
5236 * @sleep_ok: if true we may sleep while awaiting command completion
5237 *
5238 * Read TP MIB Registers
5239 **/
5240void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5241 bool sleep_ok)
5242{
5243 t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
5244 start_index, 1, sleep_ok);
c1e9af0c
HS
5245}
5246
688ea5fe
HS
5247/**
5248 * t4_read_rss_key - read the global RSS key
5249 * @adap: the adapter
5250 * @key: 10-entry array holding the 320-bit RSS key
5ccf9d04 5251 * @sleep_ok: if true we may sleep while awaiting command completion
688ea5fe
HS
5252 *
5253 * Reads the global 320-bit RSS key.
5254 */
5ccf9d04 5255void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
688ea5fe 5256{
5ccf9d04 5257 t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
688ea5fe
HS
5258}
5259
5260/**
5261 * t4_write_rss_key - program one of the RSS keys
5262 * @adap: the adapter
5263 * @key: 10-entry array holding the 320-bit RSS key
5264 * @idx: which RSS key to write
5ccf9d04 5265 * @sleep_ok: if true we may sleep while awaiting command completion
688ea5fe
HS
5266 *
5267 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5268 * 0..15 the corresponding entry in the RSS key table is written,
5269 * otherwise the global RSS key is written.
5270 */
5ccf9d04
RL
5271void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5272 bool sleep_ok)
688ea5fe 5273{
3ccc6cf7
HS
5274 u8 rss_key_addr_cnt = 16;
5275 u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
5276
5277 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5278 * allows access to key addresses 16-63 by using KeyWrAddrX
5279 * as index[5:4](upper 2) into key table
5280 */
5281 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5282 (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
5283 rss_key_addr_cnt = 32;
5284
5ccf9d04 5285 t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
3ccc6cf7
HS
5286
5287 if (idx >= 0 && idx < rss_key_addr_cnt) {
5288 if (rss_key_addr_cnt > 16)
5289 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5290 KEYWRADDRX_V(idx >> 4) |
5291 T6_VFWRADDR_V(idx) | KEYWREN_F);
5292 else
5293 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5294 KEYWRADDR_V(idx) | KEYWREN_F);
5295 }
688ea5fe
HS
5296}
5297
5298/**
5299 * t4_read_rss_pf_config - read PF RSS Configuration Table
5300 * @adapter: the adapter
5301 * @index: the entry in the PF RSS table to read
5302 * @valp: where to store the returned value
5ccf9d04 5303 * @sleep_ok: if true we may sleep while awaiting command completion
688ea5fe
HS
5304 *
5305 * Reads the PF RSS Configuration Table at the specified index and returns
5306 * the value found there.
5307 */
5308void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5ccf9d04 5309 u32 *valp, bool sleep_ok)
688ea5fe 5310{
5ccf9d04 5311 t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
688ea5fe
HS
5312}
5313
5314/**
5315 * t4_read_rss_vf_config - read VF RSS Configuration Table
5316 * @adapter: the adapter
5317 * @index: the entry in the VF RSS table to read
5318 * @vfl: where to store the returned VFL
5319 * @vfh: where to store the returned VFH
5ccf9d04 5320 * @sleep_ok: if true we may sleep while awaiting command completion
688ea5fe
HS
5321 *
5322 * Reads the VF RSS Configuration Table at the specified index and returns
5323 * the (VFL, VFH) values found there.
5324 */
5325void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5ccf9d04 5326 u32 *vfl, u32 *vfh, bool sleep_ok)
688ea5fe
HS
5327{
5328 u32 vrt, mask, data;
5329
3ccc6cf7
HS
5330 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5331 mask = VFWRADDR_V(VFWRADDR_M);
5332 data = VFWRADDR_V(index);
5333 } else {
5334 mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
5335 data = T6_VFWRADDR_V(index);
5336 }
688ea5fe
HS
5337
5338 /* Request that the index'th VF Table values be read into VFL/VFH.
5339 */
5340 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
5341 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
5342 vrt |= data | VFRDEN_F;
5343 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
5344
5345 /* Grab the VFL/VFH values ...
5346 */
5ccf9d04
RL
5347 t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
5348 t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
688ea5fe
HS
5349}
5350
5351/**
5352 * t4_read_rss_pf_map - read PF RSS Map
5353 * @adapter: the adapter
5ccf9d04 5354 * @sleep_ok: if true we may sleep while awaiting command completion
688ea5fe
HS
5355 *
5356 * Reads the PF RSS Map register and returns its value.
5357 */
5ccf9d04 5358u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
688ea5fe
HS
5359{
5360 u32 pfmap;
5361
5ccf9d04 5362 t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
688ea5fe
HS
5363 return pfmap;
5364}
5365
5366/**
5367 * t4_read_rss_pf_mask - read PF RSS Mask
5368 * @adapter: the adapter
5ccf9d04 5369 * @sleep_ok: if true we may sleep while awaiting command completion
688ea5fe
HS
5370 *
5371 * Reads the PF RSS Mask register and returns its value.
5372 */
5ccf9d04 5373u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
688ea5fe
HS
5374{
5375 u32 pfmask;
5376
5ccf9d04 5377 t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
688ea5fe
HS
5378 return pfmask;
5379}
5380
56d36be4
DM
5381/**
5382 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5383 * @adap: the adapter
5384 * @v4: holds the TCP/IP counter values
5385 * @v6: holds the TCP/IPv6 counter values
5ccf9d04 5386 * @sleep_ok: if true we may sleep while awaiting command completion
56d36be4
DM
5387 *
5388 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5389 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5390 */
5391void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5ccf9d04 5392 struct tp_tcp_stats *v6, bool sleep_ok)
56d36be4 5393{
837e4a42 5394 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
56d36be4 5395
837e4a42 5396#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
56d36be4
DM
5397#define STAT(x) val[STAT_IDX(x)]
5398#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5399
5400 if (v4) {
5ccf9d04
RL
5401 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5402 TP_MIB_TCP_OUT_RST_A, sleep_ok);
a4cfd929
HS
5403 v4->tcp_out_rsts = STAT(OUT_RST);
5404 v4->tcp_in_segs = STAT64(IN_SEG);
5405 v4->tcp_out_segs = STAT64(OUT_SEG);
5406 v4->tcp_retrans_segs = STAT64(RXT_SEG);
56d36be4
DM
5407 }
5408 if (v6) {
5ccf9d04
RL
5409 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5410 TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
a4cfd929
HS
5411 v6->tcp_out_rsts = STAT(OUT_RST);
5412 v6->tcp_in_segs = STAT64(IN_SEG);
5413 v6->tcp_out_segs = STAT64(OUT_SEG);
5414 v6->tcp_retrans_segs = STAT64(RXT_SEG);
56d36be4
DM
5415 }
5416#undef STAT64
5417#undef STAT
5418#undef STAT_IDX
5419}
5420
a4cfd929
HS
5421/**
5422 * t4_tp_get_err_stats - read TP's error MIB counters
5423 * @adap: the adapter
5424 * @st: holds the counter values
5ccf9d04 5425 * @sleep_ok: if true we may sleep while awaiting command completion
a4cfd929
HS
5426 *
5427 * Returns the values of TP's error counters.
5428 */
5ccf9d04
RL
5429void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5430 bool sleep_ok)
a4cfd929 5431{
df459ebc
HS
5432 int nchan = adap->params.arch.nchan;
5433
5ccf9d04
RL
5434 t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
5435 sleep_ok);
5436 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
5437 sleep_ok);
5438 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
5439 sleep_ok);
5440 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5441 TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
5442 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5443 TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
5444 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
5445 sleep_ok);
5446 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5447 TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
5448 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5449 TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
5450 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
5451 sleep_ok);
a4cfd929
HS
5452}
5453
a6222975
HS
5454/**
5455 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5456 * @adap: the adapter
5457 * @st: holds the counter values
5ccf9d04 5458 * @sleep_ok: if true we may sleep while awaiting command completion
a6222975
HS
5459 *
5460 * Returns the values of TP's CPL counters.
5461 */
5ccf9d04
RL
5462void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5463 bool sleep_ok)
a6222975 5464{
df459ebc
HS
5465 int nchan = adap->params.arch.nchan;
5466
5ccf9d04 5467 t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
df459ebc 5468
5ccf9d04 5469 t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
a6222975
HS
5470}
5471
a4cfd929
HS
5472/**
5473 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5474 * @adap: the adapter
5475 * @st: holds the counter values
5ccf9d04 5476 * @sleep_ok: if true we may sleep while awaiting command completion
a4cfd929
HS
5477 *
5478 * Returns the values of TP's RDMA counters.
5479 */
5ccf9d04
RL
5480void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5481 bool sleep_ok)
a4cfd929 5482{
5ccf9d04
RL
5483 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
5484 sleep_ok);
a4cfd929
HS
5485}
5486
a6222975
HS
5487/**
5488 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5489 * @adap: the adapter
5490 * @idx: the port index
5491 * @st: holds the counter values
5ccf9d04 5492 * @sleep_ok: if true we may sleep while awaiting command completion
a6222975
HS
5493 *
5494 * Returns the values of TP's FCoE counters for the selected port.
5495 */
5496void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5ccf9d04 5497 struct tp_fcoe_stats *st, bool sleep_ok)
a6222975
HS
5498{
5499 u32 val[2];
5500
5ccf9d04
RL
5501 t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
5502 sleep_ok);
5503
5504 t4_tp_mib_read(adap, &st->frames_drop, 1,
5505 TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
5506
5507 t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
5508 sleep_ok);
5509
a6222975
HS
5510 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5511}
5512
a4cfd929
HS
5513/**
5514 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5515 * @adap: the adapter
5516 * @st: holds the counter values
5ccf9d04 5517 * @sleep_ok: if true we may sleep while awaiting command completion
a4cfd929
HS
5518 *
5519 * Returns the values of TP's counters for non-TCP directly-placed packets.
5520 */
5ccf9d04
RL
5521void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5522 bool sleep_ok)
a4cfd929
HS
5523{
5524 u32 val[4];
5525
5ccf9d04 5526 t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
a4cfd929
HS
5527 st->frames = val[0];
5528 st->drops = val[1];
5529 st->octets = ((u64)val[2] << 32) | val[3];
5530}
5531
56d36be4
DM
5532/**
5533 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5534 * @adap: the adapter
5535 * @mtus: where to store the MTU values
5536 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5537 *
5538 * Reads the HW path MTU table.
5539 */
5540void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5541{
5542 u32 v;
5543 int i;
5544
5545 for (i = 0; i < NMTUS; ++i) {
837e4a42
HS
5546 t4_write_reg(adap, TP_MTU_TABLE_A,
5547 MTUINDEX_V(0xff) | MTUVALUE_V(i));
5548 v = t4_read_reg(adap, TP_MTU_TABLE_A);
5549 mtus[i] = MTUVALUE_G(v);
56d36be4 5550 if (mtu_log)
837e4a42 5551 mtu_log[i] = MTUWIDTH_G(v);
56d36be4
DM
5552 }
5553}
5554
bad43792
HS
5555/**
5556 * t4_read_cong_tbl - reads the congestion control table
5557 * @adap: the adapter
5558 * @incr: where to store the alpha values
5559 *
5560 * Reads the additive increments programmed into the HW congestion
5561 * control table.
5562 */
5563void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5564{
5565 unsigned int mtu, w;
5566
5567 for (mtu = 0; mtu < NMTUS; ++mtu)
5568 for (w = 0; w < NCCTRL_WIN; ++w) {
5569 t4_write_reg(adap, TP_CCTRL_TABLE_A,
5570 ROWINDEX_V(0xffff) | (mtu << 5) | w);
5571 incr[mtu][w] = (u16)t4_read_reg(adap,
5572 TP_CCTRL_TABLE_A) & 0x1fff;
5573 }
5574}
5575
636f9d37
VP
5576/**
5577 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5578 * @adap: the adapter
5579 * @addr: the indirect TP register address
5580 * @mask: specifies the field within the register to modify
5581 * @val: new value for the field
5582 *
5583 * Sets a field of an indirect TP register to the given value.
5584 */
5585void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5586 unsigned int mask, unsigned int val)
5587{
837e4a42
HS
5588 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5589 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5590 t4_write_reg(adap, TP_PIO_DATA_A, val);
636f9d37
VP
5591}
5592
56d36be4
DM
5593/**
5594 * init_cong_ctrl - initialize congestion control parameters
5595 * @a: the alpha values for congestion control
5596 * @b: the beta values for congestion control
5597 *
5598 * Initialize the congestion control parameters.
5599 */
91744948 5600static void init_cong_ctrl(unsigned short *a, unsigned short *b)
56d36be4
DM
5601{
5602 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5603 a[9] = 2;
5604 a[10] = 3;
5605 a[11] = 4;
5606 a[12] = 5;
5607 a[13] = 6;
5608 a[14] = 7;
5609 a[15] = 8;
5610 a[16] = 9;
5611 a[17] = 10;
5612 a[18] = 14;
5613 a[19] = 17;
5614 a[20] = 21;
5615 a[21] = 25;
5616 a[22] = 30;
5617 a[23] = 35;
5618 a[24] = 45;
5619 a[25] = 60;
5620 a[26] = 80;
5621 a[27] = 100;
5622 a[28] = 200;
5623 a[29] = 300;
5624 a[30] = 400;
5625 a[31] = 500;
5626
5627 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5628 b[9] = b[10] = 1;
5629 b[11] = b[12] = 2;
5630 b[13] = b[14] = b[15] = b[16] = 3;
5631 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5632 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5633 b[28] = b[29] = 6;
5634 b[30] = b[31] = 7;
5635}
5636
5637/* The minimum additive increment value for the congestion control table */
5638#define CC_MIN_INCR 2U
5639
5640/**
5641 * t4_load_mtus - write the MTU and congestion control HW tables
5642 * @adap: the adapter
5643 * @mtus: the values for the MTU table
5644 * @alpha: the values for the congestion control alpha parameter
5645 * @beta: the values for the congestion control beta parameter
5646 *
5647 * Write the HW MTU table with the supplied MTUs and the high-speed
5648 * congestion control table with the supplied alpha, beta, and MTUs.
5649 * We write the two tables together because the additive increments
5650 * depend on the MTUs.
5651 */
5652void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5653 const unsigned short *alpha, const unsigned short *beta)
5654{
5655 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5656 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5657 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5658 28672, 40960, 57344, 81920, 114688, 163840, 229376
5659 };
5660
5661 unsigned int i, w;
5662
5663 for (i = 0; i < NMTUS; ++i) {
5664 unsigned int mtu = mtus[i];
5665 unsigned int log2 = fls(mtu);
5666
5667 if (!(mtu & ((1 << log2) >> 2))) /* round */
5668 log2--;
837e4a42
HS
5669 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5670 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
56d36be4
DM
5671
5672 for (w = 0; w < NCCTRL_WIN; ++w) {
5673 unsigned int inc;
5674
5675 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5676 CC_MIN_INCR);
5677
837e4a42 5678 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
56d36be4
DM
5679 (w << 16) | (beta[w] << 13) | inc);
5680 }
5681 }
5682}
5683
7864026b
HS
5684/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5685 * clocks. The formula is
5686 *
5687 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5688 *
5689 * which is equivalent to
5690 *
5691 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5692 */
5693static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5694{
5695 u64 v = bytes256 * adap->params.vpd.cclk;
5696
5697 return v * 62 + v / 2;
5698}
5699
5700/**
5701 * t4_get_chan_txrate - get the current per channel Tx rates
5702 * @adap: the adapter
5703 * @nic_rate: rates for NIC traffic
5704 * @ofld_rate: rates for offloaded traffic
5705 *
5706 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5707 * for each channel.
5708 */
5709void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5710{
5711 u32 v;
5712
5713 v = t4_read_reg(adap, TP_TX_TRATE_A);
5714 nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5715 nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5716 if (adap->params.arch.nchan == NCHAN) {
5717 nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5718 nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5719 }
5720
5721 v = t4_read_reg(adap, TP_TX_ORATE_A);
5722 ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5723 ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5724 if (adap->params.arch.nchan == NCHAN) {
5725 ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5726 ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5727 }
5728}
5729
8e3d04fd
HS
5730/**
5731 * t4_set_trace_filter - configure one of the tracing filters
5732 * @adap: the adapter
5733 * @tp: the desired trace filter parameters
5734 * @idx: which filter to configure
5735 * @enable: whether to enable or disable the filter
5736 *
5737 * Configures one of the tracing filters available in HW. If @enable is
5738 * %0 @tp is not examined and may be %NULL. The user is responsible to
5739 * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5740 */
5741int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5742 int idx, int enable)
5743{
5744 int i, ofst = idx * 4;
5745 u32 data_reg, mask_reg, cfg;
5746 u32 multitrc = TRCMULTIFILTER_F;
5747
5748 if (!enable) {
5749 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5750 return 0;
5751 }
5752
5753 cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5754 if (cfg & TRCMULTIFILTER_F) {
5755 /* If multiple tracers are enabled, then maximum
5756 * capture size is 2.5KB (FIFO size of a single channel)
5757 * minus 2 flits for CPL_TRACE_PKT header.
5758 */
5759 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5760 return -EINVAL;
5761 } else {
5762 /* If multiple tracers are disabled, to avoid deadlocks
5763 * maximum packet capture size of 9600 bytes is recommended.
5764 * Also in this mode, only trace0 can be enabled and running.
5765 */
5766 multitrc = 0;
5767 if (tp->snap_len > 9600 || idx)
5768 return -EINVAL;
5769 }
5770
5771 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5772 tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5773 tp->min_len > TFMINPKTSIZE_M)
5774 return -EINVAL;
5775
5776 /* stop the tracer we'll be changing */
5777 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5778
5779 idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
5780 data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
5781 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
5782
5783 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5784 t4_write_reg(adap, data_reg, tp->data[i]);
5785 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5786 }
5787 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
5788 TFCAPTUREMAX_V(tp->snap_len) |
5789 TFMINPKTSIZE_V(tp->min_len));
5790 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
5791 TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
5792 (is_t4(adap->params.chip) ?
5793 TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
5794 T5_TFPORT_V(tp->port) | T5_TFEN_F |
5795 T5_TFINVERTMATCH_V(tp->invert)));
5796
5797 return 0;
5798}
5799
5800/**
5801 * t4_get_trace_filter - query one of the tracing filters
5802 * @adap: the adapter
5803 * @tp: the current trace filter parameters
5804 * @idx: which trace filter to query
5805 * @enabled: non-zero if the filter is enabled
5806 *
5807 * Returns the current settings of one of the HW tracing filters.
5808 */
5809void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5810 int *enabled)
5811{
5812 u32 ctla, ctlb;
5813 int i, ofst = idx * 4;
5814 u32 data_reg, mask_reg;
5815
5816 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
5817 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
5818
5819 if (is_t4(adap->params.chip)) {
5820 *enabled = !!(ctla & TFEN_F);
5821 tp->port = TFPORT_G(ctla);
5822 tp->invert = !!(ctla & TFINVERTMATCH_F);
5823 } else {
5824 *enabled = !!(ctla & T5_TFEN_F);
5825 tp->port = T5_TFPORT_G(ctla);
5826 tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
5827 }
5828 tp->snap_len = TFCAPTUREMAX_G(ctlb);
5829 tp->min_len = TFMINPKTSIZE_G(ctlb);
5830 tp->skip_ofst = TFOFFSET_G(ctla);
5831 tp->skip_len = TFLENGTH_G(ctla);
5832
5833 ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
5834 data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
5835 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
5836
5837 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5838 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5839 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5840 }
5841}
5842
b3bbe36a
HS
5843/**
5844 * t4_pmtx_get_stats - returns the HW stats from PMTX
5845 * @adap: the adapter
5846 * @cnt: where to store the count statistics
5847 * @cycles: where to store the cycle statistics
5848 *
5849 * Returns performance statistics from PMTX.
5850 */
5851void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5852{
5853 int i;
5854 u32 data[2];
5855
44588560 5856 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
b3bbe36a
HS
5857 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
5858 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
5859 if (is_t4(adap->params.chip)) {
5860 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
5861 } else {
5862 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
5863 PM_TX_DBG_DATA_A, data, 2,
5864 PM_TX_DBG_STAT_MSB_A);
5865 cycles[i] = (((u64)data[0] << 32) | data[1]);
5866 }
5867 }
5868}
5869
5870/**
5871 * t4_pmrx_get_stats - returns the HW stats from PMRX
5872 * @adap: the adapter
5873 * @cnt: where to store the count statistics
5874 * @cycles: where to store the cycle statistics
5875 *
5876 * Returns performance statistics from PMRX.
5877 */
5878void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5879{
5880 int i;
5881 u32 data[2];
5882
44588560 5883 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
b3bbe36a
HS
5884 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
5885 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
5886 if (is_t4(adap->params.chip)) {
5887 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
5888 } else {
5889 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
5890 PM_RX_DBG_DATA_A, data, 2,
5891 PM_RX_DBG_STAT_MSB_A);
5892 cycles[i] = (((u64)data[0] << 32) | data[1]);
5893 }
5894 }
5895}
5896
56d36be4 5897/**
8f46d467 5898 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
56d36be4 5899 * @adap: the adapter
193c4c28 5900 * @pidx: the port index
56d36be4 5901 *
8f46d467
AV
5902 * Computes and returns a bitmap indicating which MPS buffer groups are
5903 * associated with the given Port. Bit i is set if buffer group i is
5904 * used by the Port.
56d36be4 5905 */
8f46d467
AV
5906static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
5907 int pidx)
56d36be4 5908{
8f46d467 5909 unsigned int chip_version, nports;
193c4c28 5910
8f46d467
AV
5911 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
5912 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
193c4c28
AV
5913
5914 switch (chip_version) {
5915 case CHELSIO_T4:
5916 case CHELSIO_T5:
5917 switch (nports) {
5918 case 1: return 0xf;
5919 case 2: return 3 << (2 * pidx);
5920 case 4: return 1 << pidx;
5921 }
5922 break;
5923
5924 case CHELSIO_T6:
5925 switch (nports) {
5926 case 2: return 1 << (2 * pidx);
5927 }
5928 break;
5929 }
5930
8f46d467 5931 dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
193c4c28 5932 chip_version, nports);
8f46d467 5933
193c4c28
AV
5934 return 0;
5935}
5936
8f46d467
AV
5937/**
5938 * t4_get_mps_bg_map - return the buffer groups associated with a port
5939 * @adapter: the adapter
5940 * @pidx: the port index
5941 *
5942 * Returns a bitmap indicating which MPS buffer groups are associated
5943 * with the given Port. Bit i is set if buffer group i is used by the
5944 * Port.
5945 */
5946unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
5947{
5948 u8 *mps_bg_map;
5949 unsigned int nports;
5950
5951 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
5952 if (pidx >= nports) {
5953 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
5954 pidx, nports);
5955 return 0;
5956 }
5957
5958 /* If we've already retrieved/computed this, just return the result.
5959 */
5960 mps_bg_map = adapter->params.mps_bg_map;
5961 if (mps_bg_map[pidx])
5962 return mps_bg_map[pidx];
5963
5964 /* Newer Firmware can tell us what the MPS Buffer Group Map is.
5965 * If we're talking to such Firmware, let it tell us. If the new
5966 * API isn't supported, revert back to old hardcoded way. The value
5967 * obtained from Firmware is encoded in below format:
5968 *
5969 * val = (( MPSBGMAP[Port 3] << 24 ) |
5970 * ( MPSBGMAP[Port 2] << 16 ) |
5971 * ( MPSBGMAP[Port 1] << 8 ) |
5972 * ( MPSBGMAP[Port 0] << 0 ))
5973 */
5974 if (adapter->flags & FW_OK) {
5975 u32 param, val;
5976 int ret;
5977
5978 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5979 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
5980 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
5981 0, 1, &param, &val);
5982 if (!ret) {
5983 int p;
5984
5985 /* Store the BG Map for all of the Ports in order to
5986 * avoid more calls to the Firmware in the future.
5987 */
5988 for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
5989 mps_bg_map[p] = val & 0xff;
5990
5991 return mps_bg_map[pidx];
5992 }
5993 }
5994
5995 /* Either we're not talking to the Firmware or we're dealing with
5996 * older Firmware which doesn't support the new API to get the MPS
5997 * Buffer Group Map. Fall back to computing it ourselves.
5998 */
5999 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6000 return mps_bg_map[pidx];
6001}
6002
193c4c28
AV
6003/**
6004 * t4_get_tp_ch_map - return TP ingress channels associated with a port
6005 * @adapter: the adapter
6006 * @pidx: the port index
6007 *
6008 * Returns a bitmap indicating which TP Ingress Channels are associated
6009 * with a given Port. Bit i is set if TP Ingress Channel i is used by
6010 * the Port.
6011 */
6012unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
6013{
6014 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
6015 unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
6016
6017 if (pidx >= nports) {
6018 dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
6019 pidx, nports);
6020 return 0;
6021 }
6022
6023 switch (chip_version) {
6024 case CHELSIO_T4:
6025 case CHELSIO_T5:
6026 /* Note that this happens to be the same values as the MPS
6027 * Buffer Group Map for these Chips. But we replicate the code
6028 * here because they're really separate concepts.
6029 */
6030 switch (nports) {
6031 case 1: return 0xf;
6032 case 2: return 3 << (2 * pidx);
6033 case 4: return 1 << pidx;
6034 }
6035 break;
6036
6037 case CHELSIO_T6:
6038 switch (nports) {
6039 case 2: return 1 << pidx;
6040 }
6041 break;
6042 }
6043
6044 dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
6045 chip_version, nports);
6046 return 0;
56d36be4
DM
6047}
6048
72aca4bf
KS
6049/**
6050 * t4_get_port_type_description - return Port Type string description
6051 * @port_type: firmware Port Type enumeration
6052 */
6053const char *t4_get_port_type_description(enum fw_port_type port_type)
6054{
6055 static const char *const port_type_description[] = {
89eb9835
GG
6056 "Fiber_XFI",
6057 "Fiber_XAUI",
6058 "BT_SGMII",
6059 "BT_XFI",
6060 "BT_XAUI",
72aca4bf
KS
6061 "KX4",
6062 "CX4",
6063 "KX",
6064 "KR",
89eb9835
GG
6065 "SFP",
6066 "BP_AP",
6067 "BP4_AP",
6068 "QSFP_10G",
6069 "QSA",
6070 "QSFP",
6071 "BP40_BA",
6072 "KR4_100G",
6073 "CR4_QSFP",
6074 "CR_QSFP",
6075 "CR2_QSFP",
6076 "SFP28",
6077 "KR_SFP28",
b39ab140 6078 "KR_XLAUI"
72aca4bf
KS
6079 };
6080
6081 if (port_type < ARRAY_SIZE(port_type_description))
6082 return port_type_description[port_type];
6083 return "UNKNOWN";
6084}
6085
a4cfd929
HS
6086/**
6087 * t4_get_port_stats_offset - collect port stats relative to a previous
6088 * snapshot
6089 * @adap: The adapter
6090 * @idx: The port
6091 * @stats: Current stats to fill
6092 * @offset: Previous stats snapshot
6093 */
6094void t4_get_port_stats_offset(struct adapter *adap, int idx,
6095 struct port_stats *stats,
6096 struct port_stats *offset)
6097{
6098 u64 *s, *o;
6099 int i;
6100
6101 t4_get_port_stats(adap, idx, stats);
6102 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
6103 i < (sizeof(struct port_stats) / sizeof(u64));
6104 i++, s++, o++)
6105 *s -= *o;
6106}
6107
56d36be4
DM
6108/**
6109 * t4_get_port_stats - collect port statistics
6110 * @adap: the adapter
6111 * @idx: the port index
6112 * @p: the stats structure to fill
6113 *
6114 * Collect statistics related to the given port from HW.
6115 */
6116void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6117{
145ef8a5 6118 u32 bgmap = t4_get_mps_bg_map(adap, idx);
f750e82e 6119 u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
56d36be4
DM
6120
6121#define GET_STAT(name) \
0a57a536 6122 t4_read_reg64(adap, \
d14807dd 6123 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
0a57a536 6124 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
56d36be4
DM
6125#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6126
6127 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6128 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6129 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6130 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6131 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6132 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6133 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6134 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6135 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6136 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6137 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6138 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6139 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6140 p->tx_drop = GET_STAT(TX_PORT_DROP);
6141 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6142 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6143 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6144 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6145 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6146 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6147 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6148 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6149 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6150
f750e82e 6151 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
2de489f4
GG
6152 if (stat_ctl & COUNTPAUSESTATTX_F)
6153 p->tx_frames_64 -= p->tx_pause;
f750e82e
GG
6154 if (stat_ctl & COUNTPAUSEMCTX_F)
6155 p->tx_mcast_frames -= p->tx_pause;
6156 }
56d36be4
DM
6157 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6158 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6159 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6160 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6161 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6162 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6163 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6164 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6165 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6166 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6167 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6168 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6169 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6170 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6171 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6172 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6173 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6174 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6175 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6176 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6177 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6178 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6179 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6180 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6181 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6182 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6183 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6184
f750e82e 6185 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
2de489f4
GG
6186 if (stat_ctl & COUNTPAUSESTATRX_F)
6187 p->rx_frames_64 -= p->rx_pause;
f750e82e
GG
6188 if (stat_ctl & COUNTPAUSEMCRX_F)
6189 p->rx_mcast_frames -= p->rx_pause;
6190 }
6191
56d36be4
DM
6192 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6193 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6194 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6195 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6196 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6197 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6198 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6199 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6200
6201#undef GET_STAT
6202#undef GET_STAT_COM
6203}
6204
56d36be4 6205/**
65046e84 6206 * t4_get_lb_stats - collect loopback port statistics
56d36be4 6207 * @adap: the adapter
65046e84
HS
6208 * @idx: the loopback port index
6209 * @p: the stats structure to fill
56d36be4 6210 *
65046e84 6211 * Return HW statistics for the given loopback port.
56d36be4 6212 */
65046e84 6213void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
56d36be4 6214{
65046e84 6215 u32 bgmap = t4_get_mps_bg_map(adap, idx);
56d36be4 6216
65046e84
HS
6217#define GET_STAT(name) \
6218 t4_read_reg64(adap, \
0d804338 6219 (is_t4(adap->params.chip) ? \
65046e84
HS
6220 PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
6221 T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
6222#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
56d36be4 6223
65046e84
HS
6224 p->octets = GET_STAT(BYTES);
6225 p->frames = GET_STAT(FRAMES);
6226 p->bcast_frames = GET_STAT(BCAST);
6227 p->mcast_frames = GET_STAT(MCAST);
6228 p->ucast_frames = GET_STAT(UCAST);
6229 p->error_frames = GET_STAT(ERROR);
6230
6231 p->frames_64 = GET_STAT(64B);
6232 p->frames_65_127 = GET_STAT(65B_127B);
6233 p->frames_128_255 = GET_STAT(128B_255B);
6234 p->frames_256_511 = GET_STAT(256B_511B);
6235 p->frames_512_1023 = GET_STAT(512B_1023B);
6236 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6237 p->frames_1519_max = GET_STAT(1519B_MAX);
6238 p->drop = GET_STAT(DROP_FRAMES);
6239
6240 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6241 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6242 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6243 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6244 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6245 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6246 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6247 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
56d36be4 6248
65046e84
HS
6249#undef GET_STAT
6250#undef GET_STAT_COM
56d36be4
DM
6251}
6252
f2b7e78d
VP
6253/* t4_mk_filtdelwr - create a delete filter WR
6254 * @ftid: the filter ID
6255 * @wr: the filter work request to populate
6256 * @qid: ingress queue to receive the delete notification
6257 *
6258 * Creates a filter work request to delete the supplied filter. If @qid is
6259 * negative the delete notification is suppressed.
6260 */
6261void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6262{
6263 memset(wr, 0, sizeof(*wr));
f404f80c
HS
6264 wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
6265 wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
6266 wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
6267 FW_FILTER_WR_NOREPLY_V(qid < 0));
6268 wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
f2b7e78d 6269 if (qid >= 0)
f404f80c
HS
6270 wr->rx_chan_rx_rpl_iq =
6271 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
f2b7e78d
VP
6272}
6273
56d36be4 6274#define INIT_CMD(var, cmd, rd_wr) do { \
f404f80c
HS
6275 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
6276 FW_CMD_REQUEST_F | \
6277 FW_CMD_##rd_wr##_F); \
6278 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
56d36be4
DM
6279} while (0)
6280
8caa1e84
VP
6281int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6282 u32 addr, u32 val)
6283{
f404f80c 6284 u32 ldst_addrspace;
8caa1e84
VP
6285 struct fw_ldst_cmd c;
6286
6287 memset(&c, 0, sizeof(c));
f404f80c
HS
6288 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
6289 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6290 FW_CMD_REQUEST_F |
6291 FW_CMD_WRITE_F |
6292 ldst_addrspace);
6293 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6294 c.u.addrval.addr = cpu_to_be32(addr);
6295 c.u.addrval.val = cpu_to_be32(val);
8caa1e84
VP
6296
6297 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6298}
6299
56d36be4
DM
6300/**
6301 * t4_mdio_rd - read a PHY register through MDIO
6302 * @adap: the adapter
6303 * @mbox: mailbox to use for the FW command
6304 * @phy_addr: the PHY address
6305 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6306 * @reg: the register to read
6307 * @valp: where to store the value
6308 *
6309 * Issues a FW command through the given mailbox to read a PHY register.
6310 */
6311int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6312 unsigned int mmd, unsigned int reg, u16 *valp)
6313{
6314 int ret;
f404f80c 6315 u32 ldst_addrspace;
56d36be4
DM
6316 struct fw_ldst_cmd c;
6317
6318 memset(&c, 0, sizeof(c));
f404f80c
HS
6319 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6320 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6321 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6322 ldst_addrspace);
6323 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6324 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6325 FW_LDST_CMD_MMD_V(mmd));
6326 c.u.mdio.raddr = cpu_to_be16(reg);
56d36be4
DM
6327
6328 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6329 if (ret == 0)
f404f80c 6330 *valp = be16_to_cpu(c.u.mdio.rval);
56d36be4
DM
6331 return ret;
6332}
6333
6334/**
6335 * t4_mdio_wr - write a PHY register through MDIO
6336 * @adap: the adapter
6337 * @mbox: mailbox to use for the FW command
6338 * @phy_addr: the PHY address
6339 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6340 * @reg: the register to write
6341 * @valp: value to write
6342 *
6343 * Issues a FW command through the given mailbox to write a PHY register.
6344 */
6345int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6346 unsigned int mmd, unsigned int reg, u16 val)
6347{
f404f80c 6348 u32 ldst_addrspace;
56d36be4
DM
6349 struct fw_ldst_cmd c;
6350
6351 memset(&c, 0, sizeof(c));
f404f80c
HS
6352 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6353 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6354 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6355 ldst_addrspace);
6356 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6357 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6358 FW_LDST_CMD_MMD_V(mmd));
6359 c.u.mdio.raddr = cpu_to_be16(reg);
6360 c.u.mdio.rval = cpu_to_be16(val);
56d36be4
DM
6361
6362 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6363}
6364
68bce192
KS
6365/**
6366 * t4_sge_decode_idma_state - decode the idma state
6367 * @adap: the adapter
6368 * @state: the state idma is stuck in
6369 */
6370void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6371{
6372 static const char * const t4_decode[] = {
6373 "IDMA_IDLE",
6374 "IDMA_PUSH_MORE_CPL_FIFO",
6375 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6376 "Not used",
6377 "IDMA_PHYSADDR_SEND_PCIEHDR",
6378 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6379 "IDMA_PHYSADDR_SEND_PAYLOAD",
6380 "IDMA_SEND_FIFO_TO_IMSG",
6381 "IDMA_FL_REQ_DATA_FL_PREP",
6382 "IDMA_FL_REQ_DATA_FL",
6383 "IDMA_FL_DROP",
6384 "IDMA_FL_H_REQ_HEADER_FL",
6385 "IDMA_FL_H_SEND_PCIEHDR",
6386 "IDMA_FL_H_PUSH_CPL_FIFO",
6387 "IDMA_FL_H_SEND_CPL",
6388 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6389 "IDMA_FL_H_SEND_IP_HDR",
6390 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6391 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6392 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6393 "IDMA_FL_D_SEND_PCIEHDR",
6394 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6395 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6396 "IDMA_FL_SEND_PCIEHDR",
6397 "IDMA_FL_PUSH_CPL_FIFO",
6398 "IDMA_FL_SEND_CPL",
6399 "IDMA_FL_SEND_PAYLOAD_FIRST",
6400 "IDMA_FL_SEND_PAYLOAD",
6401 "IDMA_FL_REQ_NEXT_DATA_FL",
6402 "IDMA_FL_SEND_NEXT_PCIEHDR",
6403 "IDMA_FL_SEND_PADDING",
6404 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6405 "IDMA_FL_SEND_FIFO_TO_IMSG",
6406 "IDMA_FL_REQ_DATAFL_DONE",
6407 "IDMA_FL_REQ_HEADERFL_DONE",
6408 };
6409 static const char * const t5_decode[] = {
6410 "IDMA_IDLE",
6411 "IDMA_ALMOST_IDLE",
6412 "IDMA_PUSH_MORE_CPL_FIFO",
6413 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6414 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6415 "IDMA_PHYSADDR_SEND_PCIEHDR",
6416 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6417 "IDMA_PHYSADDR_SEND_PAYLOAD",
6418 "IDMA_SEND_FIFO_TO_IMSG",
6419 "IDMA_FL_REQ_DATA_FL",
6420 "IDMA_FL_DROP",
6421 "IDMA_FL_DROP_SEND_INC",
6422 "IDMA_FL_H_REQ_HEADER_FL",
6423 "IDMA_FL_H_SEND_PCIEHDR",
6424 "IDMA_FL_H_PUSH_CPL_FIFO",
6425 "IDMA_FL_H_SEND_CPL",
6426 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6427 "IDMA_FL_H_SEND_IP_HDR",
6428 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6429 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6430 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6431 "IDMA_FL_D_SEND_PCIEHDR",
6432 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6433 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6434 "IDMA_FL_SEND_PCIEHDR",
6435 "IDMA_FL_PUSH_CPL_FIFO",
6436 "IDMA_FL_SEND_CPL",
6437 "IDMA_FL_SEND_PAYLOAD_FIRST",
6438 "IDMA_FL_SEND_PAYLOAD",
6439 "IDMA_FL_REQ_NEXT_DATA_FL",
6440 "IDMA_FL_SEND_NEXT_PCIEHDR",
6441 "IDMA_FL_SEND_PADDING",
6442 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6443 };
6df39753
HS
6444 static const char * const t6_decode[] = {
6445 "IDMA_IDLE",
6446 "IDMA_PUSH_MORE_CPL_FIFO",
6447 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6448 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6449 "IDMA_PHYSADDR_SEND_PCIEHDR",
6450 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6451 "IDMA_PHYSADDR_SEND_PAYLOAD",
6452 "IDMA_FL_REQ_DATA_FL",
6453 "IDMA_FL_DROP",
6454 "IDMA_FL_DROP_SEND_INC",
6455 "IDMA_FL_H_REQ_HEADER_FL",
6456 "IDMA_FL_H_SEND_PCIEHDR",
6457 "IDMA_FL_H_PUSH_CPL_FIFO",
6458 "IDMA_FL_H_SEND_CPL",
6459 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6460 "IDMA_FL_H_SEND_IP_HDR",
6461 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6462 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6463 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6464 "IDMA_FL_D_SEND_PCIEHDR",
6465 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6466 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6467 "IDMA_FL_SEND_PCIEHDR",
6468 "IDMA_FL_PUSH_CPL_FIFO",
6469 "IDMA_FL_SEND_CPL",
6470 "IDMA_FL_SEND_PAYLOAD_FIRST",
6471 "IDMA_FL_SEND_PAYLOAD",
6472 "IDMA_FL_REQ_NEXT_DATA_FL",
6473 "IDMA_FL_SEND_NEXT_PCIEHDR",
6474 "IDMA_FL_SEND_PADDING",
6475 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6476 };
68bce192 6477 static const u32 sge_regs[] = {
f061de42
HS
6478 SGE_DEBUG_DATA_LOW_INDEX_2_A,
6479 SGE_DEBUG_DATA_LOW_INDEX_3_A,
6480 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
68bce192
KS
6481 };
6482 const char **sge_idma_decode;
6483 int sge_idma_decode_nstates;
6484 int i;
6df39753
HS
6485 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6486
6487 /* Select the right set of decode strings to dump depending on the
6488 * adapter chip type.
6489 */
6490 switch (chip_version) {
6491 case CHELSIO_T4:
6492 sge_idma_decode = (const char **)t4_decode;
6493 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6494 break;
6495
6496 case CHELSIO_T5:
6497 sge_idma_decode = (const char **)t5_decode;
6498 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6499 break;
6500
6501 case CHELSIO_T6:
6502 sge_idma_decode = (const char **)t6_decode;
6503 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6504 break;
6505
6506 default:
6507 dev_err(adapter->pdev_dev,
6508 "Unsupported chip version %d\n", chip_version);
6509 return;
6510 }
68bce192
KS
6511
6512 if (is_t4(adapter->params.chip)) {
6513 sge_idma_decode = (const char **)t4_decode;
6514 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6515 } else {
6516 sge_idma_decode = (const char **)t5_decode;
6517 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6518 }
6519
6520 if (state < sge_idma_decode_nstates)
6521 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6522 else
6523 CH_WARN(adapter, "idma state %d unknown\n", state);
6524
6525 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6526 CH_WARN(adapter, "SGE register %#x value %#x\n",
6527 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6528}
6529
5d700ecb
HS
6530/**
6531 * t4_sge_ctxt_flush - flush the SGE context cache
6532 * @adap: the adapter
6533 * @mbox: mailbox to use for the FW command
736c3b94 6534 * @ctx_type: Egress or Ingress
5d700ecb
HS
6535 *
6536 * Issues a FW command through the given mailbox to flush the
6537 * SGE context cache.
6538 */
736c3b94 6539int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
5d700ecb
HS
6540{
6541 int ret;
6542 u32 ldst_addrspace;
6543 struct fw_ldst_cmd c;
6544
6545 memset(&c, 0, sizeof(c));
736c3b94
RL
6546 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
6547 FW_LDST_ADDRSPC_SGE_EGRC :
6548 FW_LDST_ADDRSPC_SGE_INGC);
5d700ecb
HS
6549 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6550 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6551 ldst_addrspace);
6552 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6553 c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
6554
6555 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6556 return ret;
6557}
6558
56d36be4 6559/**
636f9d37
VP
6560 * t4_fw_hello - establish communication with FW
6561 * @adap: the adapter
6562 * @mbox: mailbox to use for the FW command
6563 * @evt_mbox: mailbox to receive async FW events
6564 * @master: specifies the caller's willingness to be the device master
6565 * @state: returns the current device state (if non-NULL)
56d36be4 6566 *
636f9d37
VP
6567 * Issues a command to establish communication with FW. Returns either
6568 * an error (negative integer) or the mailbox of the Master PF.
56d36be4
DM
6569 */
6570int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6571 enum dev_master master, enum dev_state *state)
6572{
6573 int ret;
6574 struct fw_hello_cmd c;
636f9d37
VP
6575 u32 v;
6576 unsigned int master_mbox;
6577 int retries = FW_CMD_HELLO_RETRIES;
56d36be4 6578
636f9d37
VP
6579retry:
6580 memset(&c, 0, sizeof(c));
56d36be4 6581 INIT_CMD(c, HELLO, WRITE);
f404f80c 6582 c.err_to_clearinit = cpu_to_be32(
5167865a
HS
6583 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6584 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
f404f80c
HS
6585 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6586 mbox : FW_HELLO_CMD_MBMASTER_M) |
5167865a
HS
6587 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6588 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6589 FW_HELLO_CMD_CLEARINIT_F);
56d36be4 6590
636f9d37
VP
6591 /*
6592 * Issue the HELLO command to the firmware. If it's not successful
6593 * but indicates that we got a "busy" or "timeout" condition, retry
31d55c2d
HS
6594 * the HELLO until we exhaust our retry limit. If we do exceed our
6595 * retry limit, check to see if the firmware left us any error
6596 * information and report that if so.
636f9d37 6597 */
56d36be4 6598 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
636f9d37
VP
6599 if (ret < 0) {
6600 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6601 goto retry;
f061de42 6602 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
31d55c2d 6603 t4_report_fw_error(adap);
636f9d37
VP
6604 return ret;
6605 }
6606
f404f80c 6607 v = be32_to_cpu(c.err_to_clearinit);
5167865a 6608 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
636f9d37 6609 if (state) {
5167865a 6610 if (v & FW_HELLO_CMD_ERR_F)
56d36be4 6611 *state = DEV_STATE_ERR;
5167865a 6612 else if (v & FW_HELLO_CMD_INIT_F)
636f9d37 6613 *state = DEV_STATE_INIT;
56d36be4
DM
6614 else
6615 *state = DEV_STATE_UNINIT;
6616 }
636f9d37
VP
6617
6618 /*
6619 * If we're not the Master PF then we need to wait around for the
6620 * Master PF Driver to finish setting up the adapter.
6621 *
6622 * Note that we also do this wait if we're a non-Master-capable PF and
6623 * there is no current Master PF; a Master PF may show up momentarily
6624 * and we wouldn't want to fail pointlessly. (This can happen when an
6625 * OS loads lots of different drivers rapidly at the same time). In
6626 * this case, the Master PF returned by the firmware will be
b2e1a3f0 6627 * PCIE_FW_MASTER_M so the test below will work ...
636f9d37 6628 */
5167865a 6629 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
636f9d37
VP
6630 master_mbox != mbox) {
6631 int waiting = FW_CMD_HELLO_TIMEOUT;
6632
6633 /*
6634 * Wait for the firmware to either indicate an error or
6635 * initialized state. If we see either of these we bail out
6636 * and report the issue to the caller. If we exhaust the
6637 * "hello timeout" and we haven't exhausted our retries, try
6638 * again. Otherwise bail with a timeout error.
6639 */
6640 for (;;) {
6641 u32 pcie_fw;
6642
6643 msleep(50);
6644 waiting -= 50;
6645
6646 /*
6647 * If neither Error nor Initialialized are indicated
6648 * by the firmware keep waiting till we exaust our
6649 * timeout ... and then retry if we haven't exhausted
6650 * our retries ...
6651 */
f061de42
HS
6652 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6653 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
636f9d37
VP
6654 if (waiting <= 0) {
6655 if (retries-- > 0)
6656 goto retry;
6657
6658 return -ETIMEDOUT;
6659 }
6660 continue;
6661 }
6662
6663 /*
6664 * We either have an Error or Initialized condition
6665 * report errors preferentially.
6666 */
6667 if (state) {
f061de42 6668 if (pcie_fw & PCIE_FW_ERR_F)
636f9d37 6669 *state = DEV_STATE_ERR;
f061de42 6670 else if (pcie_fw & PCIE_FW_INIT_F)
636f9d37
VP
6671 *state = DEV_STATE_INIT;
6672 }
6673
6674 /*
6675 * If we arrived before a Master PF was selected and
6676 * there's not a valid Master PF, grab its identity
6677 * for our caller.
6678 */
b2e1a3f0 6679 if (master_mbox == PCIE_FW_MASTER_M &&
f061de42 6680 (pcie_fw & PCIE_FW_MASTER_VLD_F))
b2e1a3f0 6681 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
636f9d37
VP
6682 break;
6683 }
6684 }
6685
6686 return master_mbox;
56d36be4
DM
6687}
6688
6689/**
6690 * t4_fw_bye - end communication with FW
6691 * @adap: the adapter
6692 * @mbox: mailbox to use for the FW command
6693 *
6694 * Issues a command to terminate communication with FW.
6695 */
6696int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6697{
6698 struct fw_bye_cmd c;
6699
0062b15c 6700 memset(&c, 0, sizeof(c));
56d36be4
DM
6701 INIT_CMD(c, BYE, WRITE);
6702 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6703}
6704
6705/**
6706 * t4_init_cmd - ask FW to initialize the device
6707 * @adap: the adapter
6708 * @mbox: mailbox to use for the FW command
6709 *
6710 * Issues a command to FW to partially initialize the device. This
6711 * performs initialization that generally doesn't depend on user input.
6712 */
6713int t4_early_init(struct adapter *adap, unsigned int mbox)
6714{
6715 struct fw_initialize_cmd c;
6716
0062b15c 6717 memset(&c, 0, sizeof(c));
56d36be4
DM
6718 INIT_CMD(c, INITIALIZE, WRITE);
6719 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6720}
6721
6722/**
6723 * t4_fw_reset - issue a reset to FW
6724 * @adap: the adapter
6725 * @mbox: mailbox to use for the FW command
6726 * @reset: specifies the type of reset to perform
6727 *
6728 * Issues a reset command of the specified type to FW.
6729 */
6730int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6731{
6732 struct fw_reset_cmd c;
6733
0062b15c 6734 memset(&c, 0, sizeof(c));
56d36be4 6735 INIT_CMD(c, RESET, WRITE);
f404f80c 6736 c.val = cpu_to_be32(reset);
56d36be4
DM
6737 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6738}
6739
26f7cbc0
VP
6740/**
6741 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6742 * @adap: the adapter
6743 * @mbox: mailbox to use for the FW RESET command (if desired)
6744 * @force: force uP into RESET even if FW RESET command fails
6745 *
6746 * Issues a RESET command to firmware (if desired) with a HALT indication
6747 * and then puts the microprocessor into RESET state. The RESET command
6748 * will only be issued if a legitimate mailbox is provided (mbox <=
b2e1a3f0 6749 * PCIE_FW_MASTER_M).
26f7cbc0
VP
6750 *
6751 * This is generally used in order for the host to safely manipulate the
6752 * adapter without fear of conflicting with whatever the firmware might
6753 * be doing. The only way out of this state is to RESTART the firmware
6754 * ...
6755 */
de5b8677 6756static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
26f7cbc0
VP
6757{
6758 int ret = 0;
6759
6760 /*
6761 * If a legitimate mailbox is provided, issue a RESET command
6762 * with a HALT indication.
6763 */
b2e1a3f0 6764 if (mbox <= PCIE_FW_MASTER_M) {
26f7cbc0
VP
6765 struct fw_reset_cmd c;
6766
6767 memset(&c, 0, sizeof(c));
6768 INIT_CMD(c, RESET, WRITE);
f404f80c
HS
6769 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
6770 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
26f7cbc0
VP
6771 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6772 }
6773
6774 /*
6775 * Normally we won't complete the operation if the firmware RESET
6776 * command fails but if our caller insists we'll go ahead and put the
6777 * uP into RESET. This can be useful if the firmware is hung or even
6778 * missing ... We'll have to take the risk of putting the uP into
6779 * RESET without the cooperation of firmware in that case.
6780 *
6781 * We also force the firmware's HALT flag to be on in case we bypassed
6782 * the firmware RESET command above or we're dealing with old firmware
6783 * which doesn't have the HALT capability. This will serve as a flag
6784 * for the incoming firmware to know that it's coming out of a HALT
6785 * rather than a RESET ... if it's new enough to understand that ...
6786 */
6787 if (ret == 0 || force) {
89c3a86c 6788 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
f061de42 6789 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
b2e1a3f0 6790 PCIE_FW_HALT_F);
26f7cbc0
VP
6791 }
6792
6793 /*
6794 * And we always return the result of the firmware RESET command
6795 * even when we force the uP into RESET ...
6796 */
6797 return ret;
6798}
6799
6800/**
6801 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6802 * @adap: the adapter
6803 * @reset: if we want to do a RESET to restart things
6804 *
6805 * Restart firmware previously halted by t4_fw_halt(). On successful
6806 * return the previous PF Master remains as the new PF Master and there
6807 * is no need to issue a new HELLO command, etc.
6808 *
6809 * We do this in two ways:
6810 *
6811 * 1. If we're dealing with newer firmware we'll simply want to take
6812 * the chip's microprocessor out of RESET. This will cause the
6813 * firmware to start up from its start vector. And then we'll loop
6814 * until the firmware indicates it's started again (PCIE_FW.HALT
6815 * reset to 0) or we timeout.
6816 *
6817 * 2. If we're dealing with older firmware then we'll need to RESET
6818 * the chip since older firmware won't recognize the PCIE_FW.HALT
6819 * flag and automatically RESET itself on startup.
6820 */
de5b8677 6821static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
26f7cbc0
VP
6822{
6823 if (reset) {
6824 /*
6825 * Since we're directing the RESET instead of the firmware
6826 * doing it automatically, we need to clear the PCIE_FW.HALT
6827 * bit.
6828 */
f061de42 6829 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
26f7cbc0
VP
6830
6831 /*
6832 * If we've been given a valid mailbox, first try to get the
6833 * firmware to do the RESET. If that works, great and we can
6834 * return success. Otherwise, if we haven't been given a
6835 * valid mailbox or the RESET command failed, fall back to
6836 * hitting the chip with a hammer.
6837 */
b2e1a3f0 6838 if (mbox <= PCIE_FW_MASTER_M) {
89c3a86c 6839 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
26f7cbc0
VP
6840 msleep(100);
6841 if (t4_fw_reset(adap, mbox,
0d804338 6842 PIORST_F | PIORSTMODE_F) == 0)
26f7cbc0
VP
6843 return 0;
6844 }
6845
0d804338 6846 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
26f7cbc0
VP
6847 msleep(2000);
6848 } else {
6849 int ms;
6850
89c3a86c 6851 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
26f7cbc0 6852 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
f061de42 6853 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
26f7cbc0
VP
6854 return 0;
6855 msleep(100);
6856 ms += 100;
6857 }
6858 return -ETIMEDOUT;
6859 }
6860 return 0;
6861}
6862
6863/**
6864 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6865 * @adap: the adapter
6866 * @mbox: mailbox to use for the FW RESET command (if desired)
6867 * @fw_data: the firmware image to write
6868 * @size: image size
6869 * @force: force upgrade even if firmware doesn't cooperate
6870 *
6871 * Perform all of the steps necessary for upgrading an adapter's
6872 * firmware image. Normally this requires the cooperation of the
6873 * existing firmware in order to halt all existing activities
6874 * but if an invalid mailbox token is passed in we skip that step
6875 * (though we'll still put the adapter microprocessor into RESET in
6876 * that case).
6877 *
6878 * On successful return the new firmware will have been loaded and
6879 * the adapter will have been fully RESET losing all previous setup
6880 * state. On unsuccessful return the adapter may be completely hosed ...
6881 * positive errno indicates that the adapter is ~probably~ intact, a
6882 * negative errno indicates that things are looking bad ...
6883 */
22c0b963
HS
6884int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6885 const u8 *fw_data, unsigned int size, int force)
26f7cbc0
VP
6886{
6887 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6888 int reset, ret;
6889
79af221d
HS
6890 if (!t4_fw_matches_chip(adap, fw_hdr))
6891 return -EINVAL;
6892
26747211
AV
6893 /* Disable FW_OK flag so that mbox commands with FW_OK flag set
6894 * wont be sent when we are flashing FW.
6895 */
6896 adap->flags &= ~FW_OK;
6897
26f7cbc0
VP
6898 ret = t4_fw_halt(adap, mbox, force);
6899 if (ret < 0 && !force)
26747211 6900 goto out;
26f7cbc0
VP
6901
6902 ret = t4_load_fw(adap, fw_data, size);
6903 if (ret < 0)
26747211 6904 goto out;
26f7cbc0 6905
4da18741
AV
6906 /*
6907 * If there was a Firmware Configuration File stored in FLASH,
6908 * there's a good chance that it won't be compatible with the new
6909 * Firmware. In order to prevent difficult to diagnose adapter
6910 * initialization issues, we clear out the Firmware Configuration File
6911 * portion of the FLASH . The user will need to re-FLASH a new
6912 * Firmware Configuration File which is compatible with the new
6913 * Firmware if that's desired.
6914 */
6915 (void)t4_load_cfg(adap, NULL, 0);
6916
26f7cbc0
VP
6917 /*
6918 * Older versions of the firmware don't understand the new
6919 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6920 * restart. So for newly loaded older firmware we'll have to do the
6921 * RESET for it so it starts up on a clean slate. We can tell if
6922 * the newly loaded firmware will handle this right by checking
6923 * its header flags to see if it advertises the capability.
6924 */
f404f80c 6925 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
26747211
AV
6926 ret = t4_fw_restart(adap, mbox, reset);
6927
6928 /* Grab potentially new Firmware Device Log parameters so we can see
6929 * how healthy the new Firmware is. It's okay to contact the new
6930 * Firmware for these parameters even though, as far as it's
6931 * concerned, we've never said "HELLO" to it ...
6932 */
6933 (void)t4_init_devlog_params(adap);
6934out:
6935 adap->flags |= FW_OK;
6936 return ret;
26f7cbc0
VP
6937}
6938
acac5962
HS
6939/**
6940 * t4_fl_pkt_align - return the fl packet alignment
6941 * @adap: the adapter
6942 *
6943 * T4 has a single field to specify the packing and padding boundary.
6944 * T5 onwards has separate fields for this and hence the alignment for
6945 * next packet offset is maximum of these two.
6946 *
6947 */
6948int t4_fl_pkt_align(struct adapter *adap)
6949{
6950 u32 sge_control, sge_control2;
6951 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
6952
6953 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
6954
6955 /* T4 uses a single control field to specify both the PCIe Padding and
6956 * Packing Boundary. T5 introduced the ability to specify these
6957 * separately. The actual Ingress Packet Data alignment boundary
6958 * within Packed Buffer Mode is the maximum of these two
6959 * specifications. (Note that it makes no real practical sense to
6960 * have the Pading Boudary be larger than the Packing Boundary but you
6961 * could set the chip up that way and, in fact, legacy T4 code would
6962 * end doing this because it would initialize the Padding Boundary and
6963 * leave the Packing Boundary initialized to 0 (16 bytes).)
6964 * Padding Boundary values in T6 starts from 8B,
6965 * where as it is 32B for T4 and T5.
6966 */
6967 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
6968 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
6969 else
6970 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
6971
6972 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
6973
6974 fl_align = ingpadboundary;
6975 if (!is_t4(adap->params.chip)) {
6976 /* T5 has a weird interpretation of one of the PCIe Packing
6977 * Boundary values. No idea why ...
6978 */
6979 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
6980 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
6981 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
6982 ingpackboundary = 16;
6983 else
6984 ingpackboundary = 1 << (ingpackboundary +
6985 INGPACKBOUNDARY_SHIFT_X);
6986
6987 fl_align = max(ingpadboundary, ingpackboundary);
6988 }
6989 return fl_align;
6990}
6991
636f9d37
VP
6992/**
6993 * t4_fixup_host_params - fix up host-dependent parameters
6994 * @adap: the adapter
6995 * @page_size: the host's Base Page Size
6996 * @cache_line_size: the host's Cache Line Size
6997 *
6998 * Various registers in T4 contain values which are dependent on the
6999 * host's Base Page and Cache Line Sizes. This function will fix all of
7000 * those registers with the appropriate values as passed in ...
7001 */
7002int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7003 unsigned int cache_line_size)
7004{
7005 unsigned int page_shift = fls(page_size) - 1;
7006 unsigned int sge_hps = page_shift - 10;
7007 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7008 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7009 unsigned int fl_align_log = fls(fl_align) - 1;
7010
f612b815
HS
7011 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
7012 HOSTPAGESIZEPF0_V(sge_hps) |
7013 HOSTPAGESIZEPF1_V(sge_hps) |
7014 HOSTPAGESIZEPF2_V(sge_hps) |
7015 HOSTPAGESIZEPF3_V(sge_hps) |
7016 HOSTPAGESIZEPF4_V(sge_hps) |
7017 HOSTPAGESIZEPF5_V(sge_hps) |
7018 HOSTPAGESIZEPF6_V(sge_hps) |
7019 HOSTPAGESIZEPF7_V(sge_hps));
636f9d37 7020
ce8f407a 7021 if (is_t4(adap->params.chip)) {
f612b815
HS
7022 t4_set_reg_field(adap, SGE_CONTROL_A,
7023 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7024 EGRSTATUSPAGESIZE_F,
7025 INGPADBOUNDARY_V(fl_align_log -
7026 INGPADBOUNDARY_SHIFT_X) |
7027 EGRSTATUSPAGESIZE_V(stat_len != 64));
ce8f407a 7028 } else {
bb58d079
AV
7029 unsigned int pack_align;
7030 unsigned int ingpad, ingpack;
7031 unsigned int pcie_cap;
7032
ce8f407a
HS
7033 /* T5 introduced the separation of the Free List Padding and
7034 * Packing Boundaries. Thus, we can select a smaller Padding
7035 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7036 * Bandwidth, and use a Packing Boundary which is large enough
7037 * to avoid false sharing between CPUs, etc.
7038 *
7039 * For the PCI Link, the smaller the Padding Boundary the
7040 * better. For the Memory Controller, a smaller Padding
7041 * Boundary is better until we cross under the Memory Line
7042 * Size (the minimum unit of transfer to/from Memory). If we
7043 * have a Padding Boundary which is smaller than the Memory
7044 * Line Size, that'll involve a Read-Modify-Write cycle on the
bb58d079
AV
7045 * Memory Controller which is never good.
7046 */
7047
7048 /* We want the Packing Boundary to be based on the Cache Line
7049 * Size in order to help avoid False Sharing performance
7050 * issues between CPUs, etc. We also want the Packing
7051 * Boundary to incorporate the PCI-E Maximum Payload Size. We
7052 * get best performance when the Packing Boundary is a
7053 * multiple of the Maximum Payload Size.
7054 */
7055 pack_align = fl_align;
7056 pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
7057 if (pcie_cap) {
7058 unsigned int mps, mps_log;
7059 u16 devctl;
7060
7061 /* The PCIe Device Control Maximum Payload Size field
7062 * [bits 7:5] encodes sizes as powers of 2 starting at
7063 * 128 bytes.
7064 */
7065 pci_read_config_word(adap->pdev,
7066 pcie_cap + PCI_EXP_DEVCTL,
7067 &devctl);
7068 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7069 mps = 1 << mps_log;
7070 if (mps > pack_align)
7071 pack_align = mps;
7072 }
7073
7074 /* N.B. T5/T6 have a crazy special interpretation of the "0"
7075 * value for the Packing Boundary. This corresponds to 16
7076 * bytes instead of the expected 32 bytes. So if we want 32
7077 * bytes, the best we can really do is 64 bytes ...
7078 */
7079 if (pack_align <= 16) {
7080 ingpack = INGPACKBOUNDARY_16B_X;
7081 fl_align = 16;
7082 } else if (pack_align == 32) {
7083 ingpack = INGPACKBOUNDARY_64B_X;
ce8f407a 7084 fl_align = 64;
bb58d079
AV
7085 } else {
7086 unsigned int pack_align_log = fls(pack_align) - 1;
7087
7088 ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
7089 fl_align = pack_align;
ce8f407a 7090 }
acac5962 7091
bb58d079
AV
7092 /* Use the smallest Ingress Padding which isn't smaller than
7093 * the Memory Controller Read/Write Size. We'll take that as
7094 * being 8 bytes since we don't know of any system with a
7095 * wider Memory Controller Bus Width.
7096 */
acac5962 7097 if (is_t5(adap->params.chip))
bb58d079 7098 ingpad = INGPADBOUNDARY_32B_X;
acac5962 7099 else
bb58d079 7100 ingpad = T6_INGPADBOUNDARY_8B_X;
acac5962 7101
f612b815
HS
7102 t4_set_reg_field(adap, SGE_CONTROL_A,
7103 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7104 EGRSTATUSPAGESIZE_F,
acac5962 7105 INGPADBOUNDARY_V(ingpad) |
f612b815 7106 EGRSTATUSPAGESIZE_V(stat_len != 64));
ce8f407a
HS
7107 t4_set_reg_field(adap, SGE_CONTROL2_A,
7108 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
bb58d079 7109 INGPACKBOUNDARY_V(ingpack));
ce8f407a 7110 }
636f9d37
VP
7111 /*
7112 * Adjust various SGE Free List Host Buffer Sizes.
7113 *
7114 * This is something of a crock since we're using fixed indices into
7115 * the array which are also known by the sge.c code and the T4
7116 * Firmware Configuration File. We need to come up with a much better
7117 * approach to managing this array. For now, the first four entries
7118 * are:
7119 *
7120 * 0: Host Page Size
7121 * 1: 64KB
7122 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7123 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7124 *
7125 * For the single-MTU buffers in unpacked mode we need to include
7126 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7127 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
dbedd44e 7128 * Padding boundary. All of these are accommodated in the Factory
636f9d37
VP
7129 * Default Firmware Configuration File but we need to adjust it for
7130 * this host's cache line size.
7131 */
f612b815
HS
7132 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
7133 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
7134 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
636f9d37 7135 & ~(fl_align-1));
f612b815
HS
7136 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
7137 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
636f9d37
VP
7138 & ~(fl_align-1));
7139
0d804338 7140 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
636f9d37
VP
7141
7142 return 0;
7143}
7144
7145/**
7146 * t4_fw_initialize - ask FW to initialize the device
7147 * @adap: the adapter
7148 * @mbox: mailbox to use for the FW command
7149 *
7150 * Issues a command to FW to partially initialize the device. This
7151 * performs initialization that generally doesn't depend on user input.
7152 */
7153int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7154{
7155 struct fw_initialize_cmd c;
7156
7157 memset(&c, 0, sizeof(c));
7158 INIT_CMD(c, INITIALIZE, WRITE);
7159 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7160}
7161
56d36be4 7162/**
01b69614 7163 * t4_query_params_rw - query FW or device parameters
56d36be4
DM
7164 * @adap: the adapter
7165 * @mbox: mailbox to use for the FW command
7166 * @pf: the PF
7167 * @vf: the VF
7168 * @nparams: the number of parameters
7169 * @params: the parameter names
7170 * @val: the parameter values
01b69614 7171 * @rw: Write and read flag
8f46d467 7172 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
56d36be4
DM
7173 *
7174 * Reads the value of FW or device parameters. Up to 7 parameters can be
7175 * queried at once.
7176 */
01b69614
HS
7177int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7178 unsigned int vf, unsigned int nparams, const u32 *params,
8f46d467 7179 u32 *val, int rw, bool sleep_ok)
56d36be4
DM
7180{
7181 int i, ret;
7182 struct fw_params_cmd c;
7183 __be32 *p = &c.param[0].mnem;
7184
7185 if (nparams > 7)
7186 return -EINVAL;
7187
7188 memset(&c, 0, sizeof(c));
f404f80c
HS
7189 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7190 FW_CMD_REQUEST_F | FW_CMD_READ_F |
7191 FW_PARAMS_CMD_PFN_V(pf) |
7192 FW_PARAMS_CMD_VFN_V(vf));
7193 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7194
01b69614
HS
7195 for (i = 0; i < nparams; i++) {
7196 *p++ = cpu_to_be32(*params++);
7197 if (rw)
7198 *p = cpu_to_be32(*(val + i));
7199 p++;
7200 }
56d36be4 7201
8f46d467 7202 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
56d36be4
DM
7203 if (ret == 0)
7204 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
f404f80c 7205 *val++ = be32_to_cpu(*p);
56d36be4
DM
7206 return ret;
7207}
7208
01b69614
HS
7209int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7210 unsigned int vf, unsigned int nparams, const u32 *params,
7211 u32 *val)
7212{
8f46d467
AV
7213 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7214 true);
7215}
7216
7217int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7218 unsigned int vf, unsigned int nparams, const u32 *params,
7219 u32 *val)
7220{
7221 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7222 false);
01b69614
HS
7223}
7224
688848b1 7225/**
01b69614 7226 * t4_set_params_timeout - sets FW or device parameters
688848b1
AB
7227 * @adap: the adapter
7228 * @mbox: mailbox to use for the FW command
7229 * @pf: the PF
7230 * @vf: the VF
7231 * @nparams: the number of parameters
7232 * @params: the parameter names
7233 * @val: the parameter values
01b69614 7234 * @timeout: the timeout time
688848b1 7235 *
688848b1
AB
7236 * Sets the value of FW or device parameters. Up to 7 parameters can be
7237 * specified at once.
7238 */
01b69614 7239int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
688848b1
AB
7240 unsigned int pf, unsigned int vf,
7241 unsigned int nparams, const u32 *params,
01b69614 7242 const u32 *val, int timeout)
688848b1
AB
7243{
7244 struct fw_params_cmd c;
7245 __be32 *p = &c.param[0].mnem;
7246
7247 if (nparams > 7)
7248 return -EINVAL;
7249
7250 memset(&c, 0, sizeof(c));
e2ac9628 7251 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
01b69614
HS
7252 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7253 FW_PARAMS_CMD_PFN_V(pf) |
7254 FW_PARAMS_CMD_VFN_V(vf));
688848b1
AB
7255 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7256
7257 while (nparams--) {
7258 *p++ = cpu_to_be32(*params++);
7259 *p++ = cpu_to_be32(*val++);
7260 }
7261
01b69614 7262 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
688848b1
AB
7263}
7264
56d36be4
DM
7265/**
7266 * t4_set_params - sets FW or device parameters
7267 * @adap: the adapter
7268 * @mbox: mailbox to use for the FW command
7269 * @pf: the PF
7270 * @vf: the VF
7271 * @nparams: the number of parameters
7272 * @params: the parameter names
7273 * @val: the parameter values
7274 *
7275 * Sets the value of FW or device parameters. Up to 7 parameters can be
7276 * specified at once.
7277 */
7278int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7279 unsigned int vf, unsigned int nparams, const u32 *params,
7280 const u32 *val)
7281{
01b69614
HS
7282 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7283 FW_CMD_MAX_TIMEOUT);
56d36be4
DM
7284}
7285
7286/**
7287 * t4_cfg_pfvf - configure PF/VF resource limits
7288 * @adap: the adapter
7289 * @mbox: mailbox to use for the FW command
7290 * @pf: the PF being configured
7291 * @vf: the VF being configured
7292 * @txq: the max number of egress queues
7293 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7294 * @rxqi: the max number of interrupt-capable ingress queues
7295 * @rxq: the max number of interruptless ingress queues
7296 * @tc: the PCI traffic class
7297 * @vi: the max number of virtual interfaces
7298 * @cmask: the channel access rights mask for the PF/VF
7299 * @pmask: the port access rights mask for the PF/VF
7300 * @nexact: the maximum number of exact MPS filters
7301 * @rcaps: read capabilities
7302 * @wxcaps: write/execute capabilities
7303 *
7304 * Configures resource limits and capabilities for a physical or virtual
7305 * function.
7306 */
7307int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7308 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7309 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7310 unsigned int vi, unsigned int cmask, unsigned int pmask,
7311 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7312{
7313 struct fw_pfvf_cmd c;
7314
7315 memset(&c, 0, sizeof(c));
f404f80c
HS
7316 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
7317 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
7318 FW_PFVF_CMD_VFN_V(vf));
7319 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7320 c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
7321 FW_PFVF_CMD_NIQ_V(rxq));
7322 c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
7323 FW_PFVF_CMD_PMASK_V(pmask) |
7324 FW_PFVF_CMD_NEQ_V(txq));
7325 c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
7326 FW_PFVF_CMD_NVI_V(vi) |
7327 FW_PFVF_CMD_NEXACTF_V(nexact));
7328 c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
7329 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
7330 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
56d36be4
DM
7331 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7332}
7333
7334/**
7335 * t4_alloc_vi - allocate a virtual interface
7336 * @adap: the adapter
7337 * @mbox: mailbox to use for the FW command
7338 * @port: physical port associated with the VI
7339 * @pf: the PF owning the VI
7340 * @vf: the VF owning the VI
7341 * @nmac: number of MAC addresses needed (1 to 5)
7342 * @mac: the MAC addresses of the VI
7343 * @rss_size: size of RSS table slice associated with this VI
7344 *
7345 * Allocates a virtual interface for the given physical port. If @mac is
7346 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7347 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7348 * stored consecutively so the space needed is @nmac * 6 bytes.
7349 * Returns a negative error number or the non-negative VI id.
7350 */
7351int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7352 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7353 unsigned int *rss_size)
7354{
7355 int ret;
7356 struct fw_vi_cmd c;
7357
7358 memset(&c, 0, sizeof(c));
f404f80c
HS
7359 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
7360 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
7361 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
7362 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
2b5fb1f2 7363 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
56d36be4
DM
7364 c.nmac = nmac - 1;
7365
7366 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7367 if (ret)
7368 return ret;
7369
7370 if (mac) {
7371 memcpy(mac, c.mac, sizeof(c.mac));
7372 switch (nmac) {
7373 case 5:
7374 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7375 case 4:
7376 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7377 case 3:
7378 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7379 case 2:
7380 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7381 }
7382 }
7383 if (rss_size)
f404f80c
HS
7384 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
7385 return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
56d36be4
DM
7386}
7387
4f3a0fcf
HS
7388/**
7389 * t4_free_vi - free a virtual interface
7390 * @adap: the adapter
7391 * @mbox: mailbox to use for the FW command
7392 * @pf: the PF owning the VI
7393 * @vf: the VF owning the VI
7394 * @viid: virtual interface identifiler
7395 *
7396 * Free a previously allocated virtual interface.
7397 */
7398int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7399 unsigned int vf, unsigned int viid)
7400{
7401 struct fw_vi_cmd c;
7402
7403 memset(&c, 0, sizeof(c));
7404 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
7405 FW_CMD_REQUEST_F |
7406 FW_CMD_EXEC_F |
7407 FW_VI_CMD_PFN_V(pf) |
7408 FW_VI_CMD_VFN_V(vf));
7409 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
7410 c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
7411
7412 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
56d36be4
DM
7413}
7414
56d36be4
DM
7415/**
7416 * t4_set_rxmode - set Rx properties of a virtual interface
7417 * @adap: the adapter
7418 * @mbox: mailbox to use for the FW command
7419 * @viid: the VI id
7420 * @mtu: the new MTU or -1
7421 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7422 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7423 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 7424 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
7425 * @sleep_ok: if true we may sleep while awaiting command completion
7426 *
7427 * Sets Rx properties of a virtual interface.
7428 */
7429int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
7430 int mtu, int promisc, int all_multi, int bcast, int vlanex,
7431 bool sleep_ok)
56d36be4
DM
7432{
7433 struct fw_vi_rxmode_cmd c;
7434
7435 /* convert to FW values */
7436 if (mtu < 0)
7437 mtu = FW_RXMODE_MTU_NO_CHG;
7438 if (promisc < 0)
2b5fb1f2 7439 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
56d36be4 7440 if (all_multi < 0)
2b5fb1f2 7441 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
56d36be4 7442 if (bcast < 0)
2b5fb1f2 7443 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
f8f5aafa 7444 if (vlanex < 0)
2b5fb1f2 7445 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
56d36be4
DM
7446
7447 memset(&c, 0, sizeof(c));
f404f80c
HS
7448 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7449 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7450 FW_VI_RXMODE_CMD_VIID_V(viid));
7451 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7452 c.mtu_to_vlanexen =
7453 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
7454 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
7455 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
7456 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
7457 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
56d36be4
DM
7458 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7459}
7460
846eac3f
GG
7461/**
7462 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
7463 * @adap: the adapter
7464 * @viid: the VI id
7465 * @addr: the MAC address
7466 * @mask: the mask
7467 * @idx: index of the entry in mps tcam
7468 * @lookup_type: MAC address for inner (1) or outer (0) header
7469 * @port_id: the port index
7470 * @sleep_ok: call is allowed to sleep
7471 *
7472 * Removes the mac entry at the specified index using raw mac interface.
7473 *
7474 * Returns a negative error number on failure.
7475 */
7476int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
7477 const u8 *addr, const u8 *mask, unsigned int idx,
7478 u8 lookup_type, u8 port_id, bool sleep_ok)
7479{
7480 struct fw_vi_mac_cmd c;
7481 struct fw_vi_mac_raw *p = &c.u.raw;
7482 u32 val;
7483
7484 memset(&c, 0, sizeof(c));
7485 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7486 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7487 FW_CMD_EXEC_V(0) |
7488 FW_VI_MAC_CMD_VIID_V(viid));
7489 val = FW_CMD_LEN16_V(1) |
7490 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7491 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7492 FW_CMD_LEN16_V(val));
7493
7494 p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
7495 FW_VI_MAC_ID_BASED_FREE);
7496
7497 /* Lookup Type. Outer header: 0, Inner header: 1 */
7498 p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7499 DATAPORTNUM_V(port_id));
7500 /* Lookup mask and port mask */
7501 p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7502 DATAPORTNUM_V(DATAPORTNUM_M));
7503
7504 /* Copy the address and the mask */
7505 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7506 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7507
7508 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7509}
7510
7511/**
7512 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
7513 * @adap: the adapter
7514 * @viid: the VI id
7515 * @mac: the MAC address
7516 * @mask: the mask
7517 * @idx: index at which to add this entry
7518 * @port_id: the port index
7519 * @lookup_type: MAC address for inner (1) or outer (0) header
7520 * @sleep_ok: call is allowed to sleep
7521 *
7522 * Adds the mac entry at the specified index using raw mac interface.
7523 *
7524 * Returns a negative error number or the allocated index for this mac.
7525 */
7526int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
7527 const u8 *addr, const u8 *mask, unsigned int idx,
7528 u8 lookup_type, u8 port_id, bool sleep_ok)
7529{
7530 int ret = 0;
7531 struct fw_vi_mac_cmd c;
7532 struct fw_vi_mac_raw *p = &c.u.raw;
7533 u32 val;
7534
7535 memset(&c, 0, sizeof(c));
7536 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7537 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7538 FW_VI_MAC_CMD_VIID_V(viid));
7539 val = FW_CMD_LEN16_V(1) |
7540 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7541 c.freemacs_to_len16 = cpu_to_be32(val);
7542
7543 /* Specify that this is an inner mac address */
7544 p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
7545
7546 /* Lookup Type. Outer header: 0, Inner header: 1 */
7547 p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7548 DATAPORTNUM_V(port_id));
7549 /* Lookup mask and port mask */
7550 p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7551 DATAPORTNUM_V(DATAPORTNUM_M));
7552
7553 /* Copy the address and the mask */
7554 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7555 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7556
7557 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7558 if (ret == 0) {
7559 ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
7560 if (ret != idx)
7561 ret = -ENOMEM;
7562 }
7563
7564 return ret;
7565}
7566
56d36be4
DM
7567/**
7568 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7569 * @adap: the adapter
7570 * @mbox: mailbox to use for the FW command
7571 * @viid: the VI id
7572 * @free: if true any existing filters for this VI id are first removed
7573 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7574 * @addr: the MAC address(es)
7575 * @idx: where to store the index of each allocated filter
7576 * @hash: pointer to hash address filter bitmap
7577 * @sleep_ok: call is allowed to sleep
7578 *
7579 * Allocates an exact-match filter for each of the supplied addresses and
7580 * sets it to the corresponding address. If @idx is not %NULL it should
7581 * have at least @naddr entries, each of which will be set to the index of
7582 * the filter allocated for the corresponding MAC address. If a filter
7583 * could not be allocated for an address its index is set to 0xffff.
7584 * If @hash is not %NULL addresses that fail to allocate an exact filter
7585 * are hashed and update the hash filter bitmap pointed at by @hash.
7586 *
7587 * Returns a negative error number or the number of filters allocated.
7588 */
7589int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7590 unsigned int viid, bool free, unsigned int naddr,
7591 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7592{
3ccc6cf7 7593 int offset, ret = 0;
56d36be4 7594 struct fw_vi_mac_cmd c;
3ccc6cf7
HS
7595 unsigned int nfilters = 0;
7596 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
7597 unsigned int rem = naddr;
56d36be4 7598
3ccc6cf7 7599 if (naddr > max_naddr)
56d36be4
DM
7600 return -EINVAL;
7601
3ccc6cf7
HS
7602 for (offset = 0; offset < naddr ; /**/) {
7603 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
7604 rem : ARRAY_SIZE(c.u.exact));
7605 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7606 u.exact[fw_naddr]), 16);
7607 struct fw_vi_mac_exact *p;
7608 int i;
56d36be4 7609
3ccc6cf7
HS
7610 memset(&c, 0, sizeof(c));
7611 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7612 FW_CMD_REQUEST_F |
7613 FW_CMD_WRITE_F |
7614 FW_CMD_EXEC_V(free) |
7615 FW_VI_MAC_CMD_VIID_V(viid));
7616 c.freemacs_to_len16 =
7617 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
7618 FW_CMD_LEN16_V(len16));
7619
7620 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7621 p->valid_to_idx =
7622 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7623 FW_VI_MAC_CMD_IDX_V(
7624 FW_VI_MAC_ADD_MAC));
7625 memcpy(p->macaddr, addr[offset + i],
7626 sizeof(p->macaddr));
7627 }
56d36be4 7628
3ccc6cf7
HS
7629 /* It's okay if we run out of space in our MAC address arena.
7630 * Some of the addresses we submit may get stored so we need
7631 * to run through the reply to see what the results were ...
7632 */
7633 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7634 if (ret && ret != -FW_ENOMEM)
7635 break;
56d36be4 7636
3ccc6cf7
HS
7637 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7638 u16 index = FW_VI_MAC_CMD_IDX_G(
7639 be16_to_cpu(p->valid_to_idx));
7640
7641 if (idx)
7642 idx[offset + i] = (index >= max_naddr ?
7643 0xffff : index);
7644 if (index < max_naddr)
7645 nfilters++;
7646 else if (hash)
7647 *hash |= (1ULL <<
7648 hash_mac_addr(addr[offset + i]));
7649 }
56d36be4 7650
3ccc6cf7
HS
7651 free = false;
7652 offset += fw_naddr;
7653 rem -= fw_naddr;
56d36be4 7654 }
3ccc6cf7
HS
7655
7656 if (ret == 0 || ret == -FW_ENOMEM)
7657 ret = nfilters;
56d36be4
DM
7658 return ret;
7659}
7660
fc08a01a
HS
7661/**
7662 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
7663 * @adap: the adapter
7664 * @mbox: mailbox to use for the FW command
7665 * @viid: the VI id
7666 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7667 * @addr: the MAC address(es)
7668 * @sleep_ok: call is allowed to sleep
7669 *
7670 * Frees the exact-match filter for each of the supplied addresses
7671 *
7672 * Returns a negative error number or the number of filters freed.
7673 */
7674int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
7675 unsigned int viid, unsigned int naddr,
7676 const u8 **addr, bool sleep_ok)
7677{
7678 int offset, ret = 0;
7679 struct fw_vi_mac_cmd c;
7680 unsigned int nfilters = 0;
7681 unsigned int max_naddr = is_t4(adap->params.chip) ?
7682 NUM_MPS_CLS_SRAM_L_INSTANCES :
7683 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
7684 unsigned int rem = naddr;
7685
7686 if (naddr > max_naddr)
7687 return -EINVAL;
7688
7689 for (offset = 0; offset < (int)naddr ; /**/) {
7690 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7691 ? rem
7692 : ARRAY_SIZE(c.u.exact));
7693 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7694 u.exact[fw_naddr]), 16);
7695 struct fw_vi_mac_exact *p;
7696 int i;
7697
7698 memset(&c, 0, sizeof(c));
7699 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7700 FW_CMD_REQUEST_F |
7701 FW_CMD_WRITE_F |
7702 FW_CMD_EXEC_V(0) |
7703 FW_VI_MAC_CMD_VIID_V(viid));
7704 c.freemacs_to_len16 =
7705 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7706 FW_CMD_LEN16_V(len16));
7707
7708 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
7709 p->valid_to_idx = cpu_to_be16(
7710 FW_VI_MAC_CMD_VALID_F |
7711 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
7712 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7713 }
7714
7715 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7716 if (ret)
7717 break;
7718
7719 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7720 u16 index = FW_VI_MAC_CMD_IDX_G(
7721 be16_to_cpu(p->valid_to_idx));
7722
7723 if (index < max_naddr)
7724 nfilters++;
7725 }
7726
7727 offset += fw_naddr;
7728 rem -= fw_naddr;
7729 }
7730
7731 if (ret == 0)
7732 ret = nfilters;
7733 return ret;
7734}
7735
56d36be4
DM
7736/**
7737 * t4_change_mac - modifies the exact-match filter for a MAC address
7738 * @adap: the adapter
7739 * @mbox: mailbox to use for the FW command
7740 * @viid: the VI id
7741 * @idx: index of existing filter for old value of MAC address, or -1
7742 * @addr: the new MAC address value
7743 * @persist: whether a new MAC allocation should be persistent
7744 * @add_smt: if true also add the address to the HW SMT
7745 *
7746 * Modifies an exact-match filter and sets it to the new MAC address.
7747 * Note that in general it is not possible to modify the value of a given
7748 * filter so the generic way to modify an address filter is to free the one
7749 * being used by the old address value and allocate a new filter for the
7750 * new address value. @idx can be -1 if the address is a new addition.
7751 *
7752 * Returns a negative error number or the index of the filter with the new
7753 * MAC value.
7754 */
7755int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7756 int idx, const u8 *addr, bool persist, bool add_smt)
7757{
7758 int ret, mode;
7759 struct fw_vi_mac_cmd c;
7760 struct fw_vi_mac_exact *p = c.u.exact;
3ccc6cf7 7761 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
56d36be4
DM
7762
7763 if (idx < 0) /* new allocation */
7764 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7765 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7766
7767 memset(&c, 0, sizeof(c));
f404f80c
HS
7768 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7769 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7770 FW_VI_MAC_CMD_VIID_V(viid));
7771 c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
7772 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7773 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
7774 FW_VI_MAC_CMD_IDX_V(idx));
56d36be4
DM
7775 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7776
7777 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7778 if (ret == 0) {
f404f80c 7779 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
0a57a536 7780 if (ret >= max_mac_addr)
56d36be4
DM
7781 ret = -ENOMEM;
7782 }
7783 return ret;
7784}
7785
7786/**
7787 * t4_set_addr_hash - program the MAC inexact-match hash filter
7788 * @adap: the adapter
7789 * @mbox: mailbox to use for the FW command
7790 * @viid: the VI id
7791 * @ucast: whether the hash filter should also match unicast addresses
7792 * @vec: the value to be written to the hash filter
7793 * @sleep_ok: call is allowed to sleep
7794 *
7795 * Sets the 64-bit inexact-match hash filter for a virtual interface.
7796 */
7797int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7798 bool ucast, u64 vec, bool sleep_ok)
7799{
7800 struct fw_vi_mac_cmd c;
7801
7802 memset(&c, 0, sizeof(c));
f404f80c
HS
7803 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7804 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7805 FW_VI_ENABLE_CMD_VIID_V(viid));
7806 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
7807 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
7808 FW_CMD_LEN16_V(1));
56d36be4
DM
7809 c.u.hash.hashvec = cpu_to_be64(vec);
7810 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7811}
7812
688848b1
AB
7813/**
7814 * t4_enable_vi_params - enable/disable a virtual interface
7815 * @adap: the adapter
7816 * @mbox: mailbox to use for the FW command
7817 * @viid: the VI id
7818 * @rx_en: 1=enable Rx, 0=disable Rx
7819 * @tx_en: 1=enable Tx, 0=disable Tx
7820 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
7821 *
7822 * Enables/disables a virtual interface. Note that setting DCB Enable
7823 * only makes sense when enabling a Virtual Interface ...
7824 */
7825int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7826 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7827{
7828 struct fw_vi_enable_cmd c;
7829
7830 memset(&c, 0, sizeof(c));
f404f80c
HS
7831 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7832 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7833 FW_VI_ENABLE_CMD_VIID_V(viid));
7834 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
7835 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
7836 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
7837 FW_LEN16(c));
30f00847 7838 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
688848b1
AB
7839}
7840
56d36be4
DM
7841/**
7842 * t4_enable_vi - enable/disable a virtual interface
7843 * @adap: the adapter
7844 * @mbox: mailbox to use for the FW command
7845 * @viid: the VI id
7846 * @rx_en: 1=enable Rx, 0=disable Rx
7847 * @tx_en: 1=enable Tx, 0=disable Tx
7848 *
7849 * Enables/disables a virtual interface.
7850 */
7851int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7852 bool rx_en, bool tx_en)
7853{
688848b1 7854 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
56d36be4
DM
7855}
7856
7857/**
7858 * t4_identify_port - identify a VI's port by blinking its LED
7859 * @adap: the adapter
7860 * @mbox: mailbox to use for the FW command
7861 * @viid: the VI id
7862 * @nblinks: how many times to blink LED at 2.5 Hz
7863 *
7864 * Identifies a VI's port by blinking its LED.
7865 */
7866int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7867 unsigned int nblinks)
7868{
7869 struct fw_vi_enable_cmd c;
7870
0062b15c 7871 memset(&c, 0, sizeof(c));
f404f80c
HS
7872 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7873 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7874 FW_VI_ENABLE_CMD_VIID_V(viid));
7875 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
7876 c.blinkdur = cpu_to_be16(nblinks);
56d36be4 7877 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
56d36be4
DM
7878}
7879
ebf4dc2b
HS
7880/**
7881 * t4_iq_stop - stop an ingress queue and its FLs
7882 * @adap: the adapter
7883 * @mbox: mailbox to use for the FW command
7884 * @pf: the PF owning the queues
7885 * @vf: the VF owning the queues
7886 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7887 * @iqid: ingress queue id
7888 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7889 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7890 *
7891 * Stops an ingress queue and its associated FLs, if any. This causes
7892 * any current or future data/messages destined for these queues to be
7893 * tossed.
7894 */
7895int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7896 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7897 unsigned int fl0id, unsigned int fl1id)
7898{
7899 struct fw_iq_cmd c;
7900
7901 memset(&c, 0, sizeof(c));
7902 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7903 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7904 FW_IQ_CMD_VFN_V(vf));
7905 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
7906 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7907 c.iqid = cpu_to_be16(iqid);
7908 c.fl0id = cpu_to_be16(fl0id);
7909 c.fl1id = cpu_to_be16(fl1id);
7910 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7911}
7912
56d36be4
DM
7913/**
7914 * t4_iq_free - free an ingress queue and its FLs
7915 * @adap: the adapter
7916 * @mbox: mailbox to use for the FW command
7917 * @pf: the PF owning the queues
7918 * @vf: the VF owning the queues
7919 * @iqtype: the ingress queue type
7920 * @iqid: ingress queue id
7921 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7922 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7923 *
7924 * Frees an ingress queue and its associated FLs, if any.
7925 */
7926int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7927 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7928 unsigned int fl0id, unsigned int fl1id)
7929{
7930 struct fw_iq_cmd c;
7931
7932 memset(&c, 0, sizeof(c));
f404f80c
HS
7933 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7934 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7935 FW_IQ_CMD_VFN_V(vf));
7936 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
7937 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7938 c.iqid = cpu_to_be16(iqid);
7939 c.fl0id = cpu_to_be16(fl0id);
7940 c.fl1id = cpu_to_be16(fl1id);
56d36be4
DM
7941 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7942}
7943
7944/**
7945 * t4_eth_eq_free - free an Ethernet egress queue
7946 * @adap: the adapter
7947 * @mbox: mailbox to use for the FW command
7948 * @pf: the PF owning the queue
7949 * @vf: the VF owning the queue
7950 * @eqid: egress queue id
7951 *
7952 * Frees an Ethernet egress queue.
7953 */
7954int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7955 unsigned int vf, unsigned int eqid)
7956{
7957 struct fw_eq_eth_cmd c;
7958
7959 memset(&c, 0, sizeof(c));
f404f80c
HS
7960 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
7961 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7962 FW_EQ_ETH_CMD_PFN_V(pf) |
7963 FW_EQ_ETH_CMD_VFN_V(vf));
7964 c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
7965 c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
56d36be4
DM
7966 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7967}
7968
7969/**
7970 * t4_ctrl_eq_free - free a control egress queue
7971 * @adap: the adapter
7972 * @mbox: mailbox to use for the FW command
7973 * @pf: the PF owning the queue
7974 * @vf: the VF owning the queue
7975 * @eqid: egress queue id
7976 *
7977 * Frees a control egress queue.
7978 */
7979int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7980 unsigned int vf, unsigned int eqid)
7981{
7982 struct fw_eq_ctrl_cmd c;
7983
7984 memset(&c, 0, sizeof(c));
f404f80c
HS
7985 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
7986 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7987 FW_EQ_CTRL_CMD_PFN_V(pf) |
7988 FW_EQ_CTRL_CMD_VFN_V(vf));
7989 c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
7990 c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
56d36be4
DM
7991 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7992}
7993
7994/**
7995 * t4_ofld_eq_free - free an offload egress queue
7996 * @adap: the adapter
7997 * @mbox: mailbox to use for the FW command
7998 * @pf: the PF owning the queue
7999 * @vf: the VF owning the queue
8000 * @eqid: egress queue id
8001 *
8002 * Frees a control egress queue.
8003 */
8004int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8005 unsigned int vf, unsigned int eqid)
8006{
8007 struct fw_eq_ofld_cmd c;
8008
8009 memset(&c, 0, sizeof(c));
f404f80c
HS
8010 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
8011 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8012 FW_EQ_OFLD_CMD_PFN_V(pf) |
8013 FW_EQ_OFLD_CMD_VFN_V(vf));
8014 c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
8015 c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
56d36be4
DM
8016 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8017}
8018
ddc7740d
HS
8019/**
8020 * t4_link_down_rc_str - return a string for a Link Down Reason Code
8021 * @adap: the adapter
8022 * @link_down_rc: Link Down Reason Code
8023 *
8024 * Returns a string representation of the Link Down Reason Code.
8025 */
8026static const char *t4_link_down_rc_str(unsigned char link_down_rc)
8027{
8028 static const char * const reason[] = {
8029 "Link Down",
8030 "Remote Fault",
8031 "Auto-negotiation Failure",
8032 "Reserved",
8033 "Insufficient Airflow",
8034 "Unable To Determine Reason",
8035 "No RX Signal Detected",
8036 "Reserved",
8037 };
8038
8039 if (link_down_rc >= ARRAY_SIZE(reason))
8040 return "Bad Reason Code";
8041
8042 return reason[link_down_rc];
8043}
8044
c3168cab
GG
8045/**
8046 * Return the highest speed set in the port capabilities, in Mb/s.
8047 */
8048static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
8049{
8050 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8051 do { \
8052 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8053 return __speed; \
8054 } while (0)
8055
8056 TEST_SPEED_RETURN(400G, 400000);
8057 TEST_SPEED_RETURN(200G, 200000);
8058 TEST_SPEED_RETURN(100G, 100000);
8059 TEST_SPEED_RETURN(50G, 50000);
8060 TEST_SPEED_RETURN(40G, 40000);
8061 TEST_SPEED_RETURN(25G, 25000);
8062 TEST_SPEED_RETURN(10G, 10000);
8063 TEST_SPEED_RETURN(1G, 1000);
8064 TEST_SPEED_RETURN(100M, 100);
8065
8066 #undef TEST_SPEED_RETURN
8067
8068 return 0;
8069}
8070
8071/**
8072 * fwcap_to_fwspeed - return highest speed in Port Capabilities
8073 * @acaps: advertised Port Capabilities
8074 *
8075 * Get the highest speed for the port from the advertised Port
8076 * Capabilities. It will be either the highest speed from the list of
8077 * speeds or whatever user has set using ethtool.
8078 */
8079static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
8080{
8081 #define TEST_SPEED_RETURN(__caps_speed) \
8082 do { \
8083 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8084 return FW_PORT_CAP32_SPEED_##__caps_speed; \
8085 } while (0)
8086
8087 TEST_SPEED_RETURN(400G);
8088 TEST_SPEED_RETURN(200G);
8089 TEST_SPEED_RETURN(100G);
8090 TEST_SPEED_RETURN(50G);
8091 TEST_SPEED_RETURN(40G);
8092 TEST_SPEED_RETURN(25G);
8093 TEST_SPEED_RETURN(10G);
8094 TEST_SPEED_RETURN(1G);
8095 TEST_SPEED_RETURN(100M);
8096
8097 #undef TEST_SPEED_RETURN
8098
8099 return 0;
8100}
8101
8102/**
8103 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
8104 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
8105 *
8106 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8107 * 32-bit Port Capabilities value.
8108 */
8109static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
8110{
8111 fw_port_cap32_t linkattr = 0;
8112
8113 /* Unfortunately the format of the Link Status in the old
8114 * 16-bit Port Information message isn't the same as the
8115 * 16-bit Port Capabilities bitfield used everywhere else ...
8116 */
8117 if (lstatus & FW_PORT_CMD_RXPAUSE_F)
8118 linkattr |= FW_PORT_CAP32_FC_RX;
8119 if (lstatus & FW_PORT_CMD_TXPAUSE_F)
8120 linkattr |= FW_PORT_CAP32_FC_TX;
8121 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
8122 linkattr |= FW_PORT_CAP32_SPEED_100M;
8123 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
8124 linkattr |= FW_PORT_CAP32_SPEED_1G;
8125 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
8126 linkattr |= FW_PORT_CAP32_SPEED_10G;
8127 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
8128 linkattr |= FW_PORT_CAP32_SPEED_25G;
8129 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
8130 linkattr |= FW_PORT_CAP32_SPEED_40G;
8131 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
8132 linkattr |= FW_PORT_CAP32_SPEED_100G;
8133
8134 return linkattr;
8135}
8136
56d36be4 8137/**
23853a0a
HS
8138 * t4_handle_get_port_info - process a FW reply message
8139 * @pi: the port info
56d36be4
DM
8140 * @rpl: start of the FW message
8141 *
23853a0a
HS
8142 * Processes a GET_PORT_INFO FW reply message.
8143 */
8144void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8145{
c3168cab
GG
8146 const struct fw_port_cmd *cmd = (const void *)rpl;
8147 int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
8148 struct adapter *adapter = pi->adapter;
8149 struct link_config *lc = &pi->link_cfg;
8150 int link_ok, linkdnrc;
8151 enum fw_port_type port_type;
8152 enum fw_port_module_type mod_type;
8153 unsigned int speed, fc, fec;
8154 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
8155
8156 /* Extract the various fields from the Port Information message.
158a5c0a 8157 */
c3168cab
GG
8158 switch (action) {
8159 case FW_PORT_ACTION_GET_PORT_INFO: {
8160 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
8161
8162 link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
8163 linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
8164 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
8165 mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
8166 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
8167 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
8168 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
8169 linkattr = lstatus_to_fwcap(lstatus);
8170 break;
8171 }
8172
8173 case FW_PORT_ACTION_GET_PORT_INFO32: {
8174 u32 lstatus32;
8175
8176 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
8177 link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
8178 linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
8179 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
8180 mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
8181 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
8182 acaps = be32_to_cpu(cmd->u.info32.acaps32);
8183 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
8184 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
8185 break;
8186 }
8187
8188 default:
8189 dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
8190 be32_to_cpu(cmd->action_to_len16));
8191 return;
8192 }
158a5c0a
CL
8193
8194 fec = fwcap_to_cc_fec(acaps);
c3168cab
GG
8195 fc = fwcap_to_cc_pause(linkattr);
8196 speed = fwcap_to_speed(linkattr);
8197
8198 if (mod_type != pi->mod_type) {
8199 /* With the newer SFP28 and QSFP28 Transceiver Module Types,
8200 * various fundamental Port Capabilities which used to be
8201 * immutable can now change radically. We can now have
8202 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
8203 * all change based on what Transceiver Module is inserted.
8204 * So we need to record the Physical "Port" Capabilities on
8205 * every Transceiver Module change.
8206 */
8207 lc->pcaps = pcaps;
158a5c0a 8208
158a5c0a 8209 /* When a new Transceiver Module is inserted, the Firmware
c3168cab
GG
8210 * will examine its i2c EPROM to determine its type and
8211 * general operating parameters including things like Forward
8212 * Error Control, etc. Various IEEE 802.3 standards dictate
8213 * how to interpret these i2c values to determine default
8214 * "sutomatic" settings. We record these for future use when
8215 * the user explicitly requests these standards-based values.
158a5c0a 8216 */
c3168cab
GG
8217 lc->def_acaps = acaps;
8218
8219 /* Some versions of the early T6 Firmware "cheated" when
8220 * handling different Transceiver Modules by changing the
8221 * underlaying Port Type reported to the Host Drivers. As
8222 * such we need to capture whatever Port Type the Firmware
8223 * sends us and record it in case it's different from what we
8224 * were told earlier. Unfortunately, since Firmware is
8225 * forever, we'll need to keep this code here forever, but in
8226 * later T6 Firmware it should just be an assignment of the
8227 * same value already recorded.
8228 */
8229 pi->port_type = port_type;
158a5c0a 8230
c3168cab
GG
8231 pi->mod_type = mod_type;
8232 t4_os_portmod_changed(adapter, pi->port_id);
23853a0a 8233 }
c3168cab 8234
23853a0a 8235 if (link_ok != lc->link_ok || speed != lc->speed ||
158a5c0a 8236 fc != lc->fc || fec != lc->fec) { /* something changed */
ddc7740d 8237 if (!link_ok && lc->link_ok) {
c3168cab
GG
8238 lc->link_down_rc = linkdnrc;
8239 dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
8240 pi->tx_chan, t4_link_down_rc_str(linkdnrc));
ddc7740d 8241 }
23853a0a
HS
8242 lc->link_ok = link_ok;
8243 lc->speed = speed;
8244 lc->fc = fc;
158a5c0a
CL
8245 lc->fec = fec;
8246
c3168cab
GG
8247 lc->lpacaps = lpacaps;
8248 lc->acaps = acaps & ADVERT_MASK;
8249
8250 if (lc->acaps & FW_PORT_CAP32_ANEG) {
8251 lc->autoneg = AUTONEG_ENABLE;
8252 } else {
8253 /* When Autoneg is disabled, user needs to set
8254 * single speed.
8255 * Similar to cxgb4_ethtool.c: set_link_ksettings
8256 */
8257 lc->acaps = 0;
8258 lc->speed_caps = fwcap_to_fwspeed(acaps);
8259 lc->autoneg = AUTONEG_DISABLE;
8260 }
2061ec3f 8261
c3168cab 8262 t4_os_link_changed(adapter, pi->port_id, link_ok);
23853a0a
HS
8263 }
8264}
8265
2061ec3f
GG
8266/**
8267 * t4_update_port_info - retrieve and update port information if changed
8268 * @pi: the port_info
8269 *
8270 * We issue a Get Port Information Command to the Firmware and, if
8271 * successful, we check to see if anything is different from what we
8272 * last recorded and update things accordingly.
8273 */
8274int t4_update_port_info(struct port_info *pi)
8275{
c3168cab 8276 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
2061ec3f
GG
8277 struct fw_port_cmd port_cmd;
8278 int ret;
8279
8280 memset(&port_cmd, 0, sizeof(port_cmd));
8281 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8282 FW_CMD_REQUEST_F | FW_CMD_READ_F |
c3168cab 8283 FW_PORT_CMD_PORTID_V(pi->tx_chan));
2061ec3f 8284 port_cmd.action_to_len16 = cpu_to_be32(
c3168cab
GG
8285 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
8286 ? FW_PORT_ACTION_GET_PORT_INFO
8287 : FW_PORT_ACTION_GET_PORT_INFO32) |
2061ec3f
GG
8288 FW_LEN16(port_cmd));
8289 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8290 &port_cmd, sizeof(port_cmd), &port_cmd);
8291 if (ret)
8292 return ret;
8293
8294 t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8295 return 0;
8296}
8297
c3168cab
GG
8298/**
8299 * t4_get_link_params - retrieve basic link parameters for given port
8300 * @pi: the port
8301 * @link_okp: value return pointer for link up/down
8302 * @speedp: value return pointer for speed (Mb/s)
8303 * @mtup: value return pointer for mtu
8304 *
8305 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
8306 * and MTU for a specified port. A negative error is returned on
8307 * failure; 0 on success.
8308 */
8309int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
8310 unsigned int *speedp, unsigned int *mtup)
8311{
8312 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8313 struct fw_port_cmd port_cmd;
8314 unsigned int action, link_ok, speed, mtu;
8315 fw_port_cap32_t linkattr;
8316 int ret;
8317
8318 memset(&port_cmd, 0, sizeof(port_cmd));
8319 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8320 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8321 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8322 action = (fw_caps == FW_CAPS16
8323 ? FW_PORT_ACTION_GET_PORT_INFO
8324 : FW_PORT_ACTION_GET_PORT_INFO32);
8325 port_cmd.action_to_len16 = cpu_to_be32(
8326 FW_PORT_CMD_ACTION_V(action) |
8327 FW_LEN16(port_cmd));
8328 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8329 &port_cmd, sizeof(port_cmd), &port_cmd);
8330 if (ret)
8331 return ret;
8332
8333 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8334 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
8335
8336 link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
8337 linkattr = lstatus_to_fwcap(lstatus);
8338 mtu = be16_to_cpu(port_cmd.u.info.mtu);
8339 } else {
8340 u32 lstatus32 =
8341 be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
8342
8343 link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
8344 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
8345 mtu = FW_PORT_CMD_MTU32_G(
8346 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
8347 }
8348 speed = fwcap_to_speed(linkattr);
8349
8350 *link_okp = link_ok;
8351 *speedp = fwcap_to_speed(linkattr);
8352 *mtup = mtu;
8353
8354 return 0;
8355}
8356
23853a0a
HS
8357/**
8358 * t4_handle_fw_rpl - process a FW reply message
8359 * @adap: the adapter
8360 * @rpl: start of the FW message
8361 *
8362 * Processes a FW message, such as link state change messages.
56d36be4
DM
8363 */
8364int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8365{
8366 u8 opcode = *(const u8 *)rpl;
8367
23853a0a
HS
8368 /* This might be a port command ... this simplifies the following
8369 * conditionals ... We can get away with pre-dereferencing
8370 * action_to_len16 because it's in the first 16 bytes and all messages
8371 * will be at least that long.
8372 */
8373 const struct fw_port_cmd *p = (const void *)rpl;
8374 unsigned int action =
8375 FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
8376
c3168cab
GG
8377 if (opcode == FW_PORT_CMD &&
8378 (action == FW_PORT_ACTION_GET_PORT_INFO ||
8379 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
23853a0a 8380 int i;
f404f80c 8381 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
23853a0a
HS
8382 struct port_info *pi = NULL;
8383
8384 for_each_port(adap, i) {
8385 pi = adap2pinfo(adap, i);
8386 if (pi->tx_chan == chan)
8387 break;
56d36be4 8388 }
23853a0a
HS
8389
8390 t4_handle_get_port_info(pi, rpl);
8391 } else {
c3168cab
GG
8392 dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
8393 opcode);
23853a0a 8394 return -EINVAL;
56d36be4
DM
8395 }
8396 return 0;
8397}
8398
1dd06ae8 8399static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
56d36be4
DM
8400{
8401 u16 val;
56d36be4 8402
e5c8ae5f
JL
8403 if (pci_is_pcie(adapter->pdev)) {
8404 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
56d36be4
DM
8405 p->speed = val & PCI_EXP_LNKSTA_CLS;
8406 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8407 }
8408}
8409
8410/**
8411 * init_link_config - initialize a link's SW state
c3168cab 8412 * @lc: pointer to structure holding the link state
158a5c0a
CL
8413 * @pcaps: link Port Capabilities
8414 * @acaps: link current Advertised Port Capabilities
56d36be4
DM
8415 *
8416 * Initializes the SW state maintained for each link, including the link's
8417 * capabilities and default speed/flow-control/autonegotiation settings.
8418 */
c3168cab
GG
8419static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
8420 fw_port_cap32_t acaps)
56d36be4 8421{
c3168cab
GG
8422 lc->pcaps = pcaps;
8423 lc->def_acaps = acaps;
8424 lc->lpacaps = 0;
8425 lc->speed_caps = 0;
56d36be4
DM
8426 lc->speed = 0;
8427 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3bb4858f
GG
8428
8429 /* For Forward Error Control, we default to whatever the Firmware
8430 * tells us the Link is currently advertising.
8431 */
3bb4858f 8432 lc->requested_fec = FEC_AUTO;
c3168cab 8433 lc->fec = fwcap_to_cc_fec(lc->def_acaps);
3bb4858f 8434
c3168cab
GG
8435 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
8436 lc->acaps = lc->pcaps & ADVERT_MASK;
56d36be4
DM
8437 lc->autoneg = AUTONEG_ENABLE;
8438 lc->requested_fc |= PAUSE_AUTONEG;
8439 } else {
c3168cab 8440 lc->acaps = 0;
56d36be4
DM
8441 lc->autoneg = AUTONEG_DISABLE;
8442 }
8443}
8444
8203b509
HS
8445#define CIM_PF_NOACCESS 0xeeeeeeee
8446
8447int t4_wait_dev_ready(void __iomem *regs)
56d36be4 8448{
8203b509
HS
8449 u32 whoami;
8450
0d804338 8451 whoami = readl(regs + PL_WHOAMI_A);
8203b509 8452 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
56d36be4 8453 return 0;
8203b509 8454
56d36be4 8455 msleep(500);
0d804338 8456 whoami = readl(regs + PL_WHOAMI_A);
8203b509 8457 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
56d36be4
DM
8458}
8459
fe2ee139
HS
8460struct flash_desc {
8461 u32 vendor_and_model_id;
8462 u32 size_mb;
8463};
8464
96ac18f1 8465static int t4_get_flash_params(struct adapter *adap)
900a6596 8466{
fe2ee139
HS
8467 /* Table for non-Numonix supported flash parts. Numonix parts are left
8468 * to the preexisting code. All flash parts have 64KB sectors.
8469 */
8470 static struct flash_desc supported_flash[] = {
8471 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8472 };
8473
96ac18f1
GG
8474 unsigned int part, manufacturer;
8475 unsigned int density, size;
8476 u32 flashid = 0;
900a6596 8477 int ret;
96ac18f1
GG
8478
8479 /* Issue a Read ID Command to the Flash part. We decode supported
8480 * Flash parts and their sizes from this. There's a newer Query
8481 * Command which can retrieve detailed geometry information but many
8482 * Flash parts don't support it.
8483 */
900a6596
DM
8484
8485 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
8486 if (!ret)
96ac18f1 8487 ret = sf1_read(adap, 3, 0, 1, &flashid);
0d804338 8488 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
900a6596
DM
8489 if (ret)
8490 return ret;
8491
96ac18f1
GG
8492 /* Check to see if it's one of our non-standard supported Flash parts.
8493 */
8494 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8495 if (supported_flash[part].vendor_and_model_id == flashid) {
8496 adap->params.sf_size = supported_flash[part].size_mb;
fe2ee139
HS
8497 adap->params.sf_nsec =
8498 adap->params.sf_size / SF_SEC_SIZE;
96ac18f1 8499 goto found;
fe2ee139
HS
8500 }
8501
96ac18f1
GG
8502 /* Decode Flash part size. The code below looks repetative with
8503 * common encodings, but that's not guaranteed in the JEDEC
8504 * specification for the Read JADEC ID command. The only thing that
8505 * we're guaranteed by the JADEC specification is where the
8506 * Manufacturer ID is in the returned result. After that each
8507 * Manufacturer ~could~ encode things completely differently.
8508 * Note, all Flash parts must have 64KB sectors.
8509 */
8510 manufacturer = flashid & 0xff;
8511 switch (manufacturer) {
8512 case 0x20: { /* Micron/Numonix */
8513 /* This Density -> Size decoding table is taken from Micron
8514 * Data Sheets.
8515 */
8516 density = (flashid >> 16) & 0xff;
8517 switch (density) {
8518 case 0x14: /* 1MB */
8519 size = 1 << 20;
8520 break;
8521 case 0x15: /* 2MB */
8522 size = 1 << 21;
8523 break;
8524 case 0x16: /* 4MB */
8525 size = 1 << 22;
8526 break;
8527 case 0x17: /* 8MB */
8528 size = 1 << 23;
8529 break;
8530 case 0x18: /* 16MB */
8531 size = 1 << 24;
8532 break;
8533 case 0x19: /* 32MB */
8534 size = 1 << 25;
8535 break;
8536 case 0x20: /* 64MB */
8537 size = 1 << 26;
8538 break;
8539 case 0x21: /* 128MB */
8540 size = 1 << 27;
8541 break;
8542 case 0x22: /* 256MB */
8543 size = 1 << 28;
8544 break;
8545
8546 default:
8547 dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
8548 flashid, density);
5dc87425 8549 return -EINVAL;
96ac18f1
GG
8550 }
8551 break;
8552 }
8553 case 0xc2: { /* Macronix */
8554 /* This Density -> Size decoding table is taken from Macronix
8555 * Data Sheets.
8556 */
8557 density = (flashid >> 16) & 0xff;
8558 switch (density) {
8559 case 0x17: /* 8MB */
8560 size = 1 << 23;
8561 break;
8562 case 0x18: /* 16MB */
8563 size = 1 << 24;
8564 break;
8565 default:
8566 dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
8567 flashid, density);
5dc87425 8568 return -EINVAL;
96ac18f1 8569 }
5dc87425 8570 break;
96ac18f1
GG
8571 }
8572 case 0xef: { /* Winbond */
8573 /* This Density -> Size decoding table is taken from Winbond
8574 * Data Sheets.
8575 */
8576 density = (flashid >> 16) & 0xff;
8577 switch (density) {
8578 case 0x17: /* 8MB */
8579 size = 1 << 23;
8580 break;
8581 case 0x18: /* 16MB */
8582 size = 1 << 24;
8583 break;
8584 default:
8585 dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
8586 flashid, density);
5dc87425 8587 return -EINVAL;
96ac18f1
GG
8588 }
8589 break;
8590 }
8591 default:
8592 dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
8593 flashid);
8594 return -EINVAL;
8595 }
8596
8597 /* Store decoded Flash size and fall through into vetting code. */
8598 adap->params.sf_size = size;
8599 adap->params.sf_nsec = size / SF_SEC_SIZE;
c290607e 8600
96ac18f1 8601found:
c290607e 8602 if (adap->params.sf_size < FLASH_MIN_SIZE)
96ac18f1
GG
8603 dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8604 flashid, adap->params.sf_size, FLASH_MIN_SIZE);
900a6596
DM
8605 return 0;
8606}
8607
56d36be4
DM
8608/**
8609 * t4_prep_adapter - prepare SW and HW for operation
8610 * @adapter: the adapter
8611 * @reset: if true perform a HW reset
8612 *
8613 * Initialize adapter SW state for the various HW modules, set initial
8614 * values for some adapter tunables, take PHYs out of reset, and
8615 * initialize the MDIO interface.
8616 */
91744948 8617int t4_prep_adapter(struct adapter *adapter)
56d36be4 8618{
0a57a536
SR
8619 int ret, ver;
8620 uint16_t device_id;
d14807dd 8621 u32 pl_rev;
56d36be4 8622
56d36be4 8623 get_pci_mode(adapter, &adapter->params.pci);
0d804338 8624 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
56d36be4 8625
96ac18f1 8626 ret = t4_get_flash_params(adapter);
900a6596
DM
8627 if (ret < 0) {
8628 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
8629 return ret;
8630 }
8631
0a57a536
SR
8632 /* Retrieve adapter's device ID
8633 */
8634 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
8635 ver = device_id >> 12;
d14807dd 8636 adapter->params.chip = 0;
0a57a536
SR
8637 switch (ver) {
8638 case CHELSIO_T4:
d14807dd 8639 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
3ccc6cf7
HS
8640 adapter->params.arch.sge_fl_db = DBPRIO_F;
8641 adapter->params.arch.mps_tcam_size =
8642 NUM_MPS_CLS_SRAM_L_INSTANCES;
8643 adapter->params.arch.mps_rplc_size = 128;
8644 adapter->params.arch.nchan = NCHAN;
44588560 8645 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
3ccc6cf7 8646 adapter->params.arch.vfcount = 128;
2216d014
HS
8647 /* Congestion map is for 4 channels so that
8648 * MPS can have 4 priority per port.
8649 */
8650 adapter->params.arch.cng_ch_bits_log = 2;
0a57a536
SR
8651 break;
8652 case CHELSIO_T5:
d14807dd 8653 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
3ccc6cf7
HS
8654 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
8655 adapter->params.arch.mps_tcam_size =
8656 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8657 adapter->params.arch.mps_rplc_size = 128;
8658 adapter->params.arch.nchan = NCHAN;
44588560 8659 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
3ccc6cf7 8660 adapter->params.arch.vfcount = 128;
2216d014 8661 adapter->params.arch.cng_ch_bits_log = 2;
3ccc6cf7
HS
8662 break;
8663 case CHELSIO_T6:
8664 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
8665 adapter->params.arch.sge_fl_db = 0;
8666 adapter->params.arch.mps_tcam_size =
8667 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8668 adapter->params.arch.mps_rplc_size = 256;
8669 adapter->params.arch.nchan = 2;
44588560 8670 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
3ccc6cf7 8671 adapter->params.arch.vfcount = 256;
2216d014
HS
8672 /* Congestion map will be for 2 channels so that
8673 * MPS can have 8 priority per port.
8674 */
8675 adapter->params.arch.cng_ch_bits_log = 3;
0a57a536
SR
8676 break;
8677 default:
8678 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
8679 device_id);
8680 return -EINVAL;
8681 }
8682
f1ff24aa 8683 adapter->params.cim_la_size = CIMLA_SIZE;
56d36be4
DM
8684 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8685
8686 /*
8687 * Default port for debugging in case we can't reach FW.
8688 */
8689 adapter->params.nports = 1;
8690 adapter->params.portvec = 1;
636f9d37 8691 adapter->params.vpd.cclk = 50000;
eca0f6ee 8692
962b5827
BH
8693 /* Set PCIe completion timeout to 4 seconds. */
8694 pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
8695 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
56d36be4
DM
8696 return 0;
8697}
8698
3be0679b
HS
8699/**
8700 * t4_shutdown_adapter - shut down adapter, host & wire
8701 * @adapter: the adapter
8702 *
8703 * Perform an emergency shutdown of the adapter and stop it from
8704 * continuing any further communication on the ports or DMA to the
8705 * host. This is typically used when the adapter and/or firmware
8706 * have crashed and we want to prevent any further accidental
8707 * communication with the rest of the world. This will also force
8708 * the port Link Status to go down -- if register writes work --
8709 * which should help our peers figure out that we're down.
8710 */
8711int t4_shutdown_adapter(struct adapter *adapter)
8712{
8713 int port;
8714
8715 t4_intr_disable(adapter);
8716 t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
8717 for_each_port(adapter, port) {
b3fd8220
RL
8718 u32 a_port_cfg = is_t4(adapter->params.chip) ?
8719 PORT_REG(port, XGMAC_PORT_CFG_A) :
8720 T5_PORT_REG(port, MAC_PORT_CFG_A);
3be0679b
HS
8721
8722 t4_write_reg(adapter, a_port_cfg,
8723 t4_read_reg(adapter, a_port_cfg)
8724 & ~SIGNAL_DET_V(1));
8725 }
8726 t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
8727
8728 return 0;
8729}
8730
e85c9a7a 8731/**
b2612722 8732 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
e85c9a7a
HS
8733 * @adapter: the adapter
8734 * @qid: the Queue ID
8735 * @qtype: the Ingress or Egress type for @qid
66cf188e 8736 * @user: true if this request is for a user mode queue
e85c9a7a
HS
8737 * @pbar2_qoffset: BAR2 Queue Offset
8738 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
8739 *
8740 * Returns the BAR2 SGE Queue Registers information associated with the
8741 * indicated Absolute Queue ID. These are passed back in return value
8742 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
8743 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
8744 *
8745 * This may return an error which indicates that BAR2 SGE Queue
8746 * registers aren't available. If an error is not returned, then the
8747 * following values are returned:
8748 *
8749 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
8750 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
8751 *
8752 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
8753 * require the "Inferred Queue ID" ability may be used. E.g. the
8754 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
8755 * then these "Inferred Queue ID" register may not be used.
8756 */
b2612722 8757int t4_bar2_sge_qregs(struct adapter *adapter,
e85c9a7a
HS
8758 unsigned int qid,
8759 enum t4_bar2_qtype qtype,
66cf188e 8760 int user,
e85c9a7a
HS
8761 u64 *pbar2_qoffset,
8762 unsigned int *pbar2_qid)
8763{
8764 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
8765 u64 bar2_page_offset, bar2_qoffset;
8766 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
8767
66cf188e
H
8768 /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
8769 if (!user && is_t4(adapter->params.chip))
e85c9a7a
HS
8770 return -EINVAL;
8771
8772 /* Get our SGE Page Size parameters.
8773 */
8774 page_shift = adapter->params.sge.hps + 10;
8775 page_size = 1 << page_shift;
8776
8777 /* Get the right Queues per Page parameters for our Queue.
8778 */
8779 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
8780 ? adapter->params.sge.eq_qpp
8781 : adapter->params.sge.iq_qpp);
8782 qpp_mask = (1 << qpp_shift) - 1;
8783
8784 /* Calculate the basics of the BAR2 SGE Queue register area:
8785 * o The BAR2 page the Queue registers will be in.
8786 * o The BAR2 Queue ID.
8787 * o The BAR2 Queue ID Offset into the BAR2 page.
8788 */
513d1a1d 8789 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
e85c9a7a
HS
8790 bar2_qid = qid & qpp_mask;
8791 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
8792
8793 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
8794 * hardware will infer the Absolute Queue ID simply from the writes to
8795 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
8796 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
8797 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
8798 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
8799 * from the BAR2 Page and BAR2 Queue ID.
8800 *
8801 * One important censequence of this is that some BAR2 SGE registers
8802 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
8803 * there. But other registers synthesize the SGE Queue ID purely
8804 * from the writes to the registers -- the Write Combined Doorbell
8805 * Buffer is a good example. These BAR2 SGE Registers are only
8806 * available for those BAR2 SGE Register areas where the SGE Absolute
8807 * Queue ID can be inferred from simple writes.
8808 */
8809 bar2_qoffset = bar2_page_offset;
8810 bar2_qinferred = (bar2_qid_offset < page_size);
8811 if (bar2_qinferred) {
8812 bar2_qoffset += bar2_qid_offset;
8813 bar2_qid = 0;
8814 }
8815
8816 *pbar2_qoffset = bar2_qoffset;
8817 *pbar2_qid = bar2_qid;
8818 return 0;
8819}
8820
ae469b68
HS
8821/**
8822 * t4_init_devlog_params - initialize adapter->params.devlog
8823 * @adap: the adapter
8824 *
8825 * Initialize various fields of the adapter's Firmware Device Log
8826 * Parameters structure.
8827 */
8828int t4_init_devlog_params(struct adapter *adap)
8829{
8830 struct devlog_params *dparams = &adap->params.devlog;
8831 u32 pf_dparams;
8832 unsigned int devlog_meminfo;
8833 struct fw_devlog_cmd devlog_cmd;
8834 int ret;
8835
8836 /* If we're dealing with newer firmware, the Device Log Paramerters
8837 * are stored in a designated register which allows us to access the
8838 * Device Log even if we can't talk to the firmware.
8839 */
8840 pf_dparams =
8841 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
8842 if (pf_dparams) {
8843 unsigned int nentries, nentries128;
8844
8845 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
8846 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
8847
8848 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
8849 nentries = (nentries128 + 1) * 128;
8850 dparams->size = nentries * sizeof(struct fw_devlog_e);
8851
8852 return 0;
8853 }
8854
8855 /* Otherwise, ask the firmware for it's Device Log Parameters.
8856 */
8857 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
f404f80c
HS
8858 devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
8859 FW_CMD_REQUEST_F | FW_CMD_READ_F);
8860 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
ae469b68
HS
8861 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
8862 &devlog_cmd);
8863 if (ret)
8864 return ret;
8865
f404f80c
HS
8866 devlog_meminfo =
8867 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
ae469b68
HS
8868 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
8869 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
f404f80c 8870 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
ae469b68
HS
8871
8872 return 0;
8873}
8874
e85c9a7a
HS
8875/**
8876 * t4_init_sge_params - initialize adap->params.sge
8877 * @adapter: the adapter
8878 *
8879 * Initialize various fields of the adapter's SGE Parameters structure.
8880 */
8881int t4_init_sge_params(struct adapter *adapter)
8882{
8883 struct sge_params *sge_params = &adapter->params.sge;
8884 u32 hps, qpp;
8885 unsigned int s_hps, s_qpp;
8886
8887 /* Extract the SGE Page Size for our PF.
8888 */
f612b815 8889 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
e85c9a7a 8890 s_hps = (HOSTPAGESIZEPF0_S +
b2612722 8891 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
e85c9a7a
HS
8892 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
8893
8894 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
8895 */
8896 s_qpp = (QUEUESPERPAGEPF0_S +
b2612722 8897 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
f612b815
HS
8898 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
8899 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
f061de42 8900 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
f612b815 8901 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
e85c9a7a
HS
8902
8903 return 0;
8904}
8905
dcf7b6f5
KS
8906/**
8907 * t4_init_tp_params - initialize adap->params.tp
8908 * @adap: the adapter
5ccf9d04 8909 * @sleep_ok: if true we may sleep while awaiting command completion
dcf7b6f5
KS
8910 *
8911 * Initialize various fields of the adapter's TP Parameters structure.
8912 */
5ccf9d04 8913int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
dcf7b6f5
KS
8914{
8915 int chan;
8916 u32 v;
8917
837e4a42
HS
8918 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
8919 adap->params.tp.tre = TIMERRESOLUTION_G(v);
8920 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
dcf7b6f5
KS
8921
8922 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
8923 for (chan = 0; chan < NCHAN; chan++)
8924 adap->params.tp.tx_modq[chan] = chan;
8925
8926 /* Cache the adapter's Compressed Filter Mode and global Incress
8927 * Configuration.
8928 */
5ccf9d04
RL
8929 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
8930 TP_VLAN_PRI_MAP_A, sleep_ok);
8931 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
8932 TP_INGRESS_CONFIG_A, sleep_ok);
8933
8eb9f2f9
A
8934 /* For T6, cache the adapter's compressed error vector
8935 * and passing outer header info for encapsulated packets.
8936 */
8937 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
8938 v = t4_read_reg(adap, TP_OUT_CONFIG_A);
8939 adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
8940 }
dcf7b6f5
KS
8941
8942 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
8943 * shift positions of several elements of the Compressed Filter Tuple
8944 * for this adapter which we need frequently ...
8945 */
0ba9a3b6 8946 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
0d804338 8947 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
0ba9a3b6
KS
8948 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
8949 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
8950 adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
dcf7b6f5 8951 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
0d804338 8952 PROTOCOL_F);
0ba9a3b6
KS
8953 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
8954 ETHERTYPE_F);
8955 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
8956 MACMATCH_F);
8957 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
8958 MPSHITTYPE_F);
8959 adap->params.tp.frag_shift = t4_filter_field_shift(adap,
8960 FRAGMENTATION_F);
dcf7b6f5
KS
8961
8962 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
dbedd44e 8963 * represents the presence of an Outer VLAN instead of a VNIC ID.
dcf7b6f5 8964 */
0d804338 8965 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
dcf7b6f5
KS
8966 adap->params.tp.vnic_shift = -1;
8967
0ba9a3b6
KS
8968 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
8969 adap->params.tp.hash_filter_mask = v;
8970 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
8971 adap->params.tp.hash_filter_mask |= ((u64)v << 32);
dcf7b6f5
KS
8972 return 0;
8973}
8974
8975/**
8976 * t4_filter_field_shift - calculate filter field shift
8977 * @adap: the adapter
8978 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8979 *
8980 * Return the shift position of a filter field within the Compressed
8981 * Filter Tuple. The filter field is specified via its selection bit
8982 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
8983 */
8984int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8985{
8986 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8987 unsigned int sel;
8988 int field_shift;
8989
8990 if ((filter_mode & filter_sel) == 0)
8991 return -1;
8992
8993 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8994 switch (filter_mode & sel) {
0d804338
HS
8995 case FCOE_F:
8996 field_shift += FT_FCOE_W;
dcf7b6f5 8997 break;
0d804338
HS
8998 case PORT_F:
8999 field_shift += FT_PORT_W;
dcf7b6f5 9000 break;
0d804338
HS
9001 case VNIC_ID_F:
9002 field_shift += FT_VNIC_ID_W;
dcf7b6f5 9003 break;
0d804338
HS
9004 case VLAN_F:
9005 field_shift += FT_VLAN_W;
dcf7b6f5 9006 break;
0d804338
HS
9007 case TOS_F:
9008 field_shift += FT_TOS_W;
dcf7b6f5 9009 break;
0d804338
HS
9010 case PROTOCOL_F:
9011 field_shift += FT_PROTOCOL_W;
dcf7b6f5 9012 break;
0d804338
HS
9013 case ETHERTYPE_F:
9014 field_shift += FT_ETHERTYPE_W;
dcf7b6f5 9015 break;
0d804338
HS
9016 case MACMATCH_F:
9017 field_shift += FT_MACMATCH_W;
dcf7b6f5 9018 break;
0d804338
HS
9019 case MPSHITTYPE_F:
9020 field_shift += FT_MPSHITTYPE_W;
dcf7b6f5 9021 break;
0d804338
HS
9022 case FRAGMENTATION_F:
9023 field_shift += FT_FRAGMENTATION_W;
dcf7b6f5
KS
9024 break;
9025 }
9026 }
9027 return field_shift;
9028}
9029
c035e183
HS
9030int t4_init_rss_mode(struct adapter *adap, int mbox)
9031{
9032 int i, ret;
9033 struct fw_rss_vi_config_cmd rvc;
9034
9035 memset(&rvc, 0, sizeof(rvc));
9036
9037 for_each_port(adap, i) {
9038 struct port_info *p = adap2pinfo(adap, i);
9039
f404f80c
HS
9040 rvc.op_to_viid =
9041 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
9042 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9043 FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
9044 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
c035e183
HS
9045 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
9046 if (ret)
9047 return ret;
f404f80c 9048 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
c035e183
HS
9049 }
9050 return 0;
9051}
9052
c3e324e3 9053/**
c3168cab 9054 * t4_init_portinfo - allocate a virtual interface and initialize port_info
c3e324e3
HS
9055 * @pi: the port_info
9056 * @mbox: mailbox to use for the FW command
9057 * @port: physical port associated with the VI
9058 * @pf: the PF owning the VI
9059 * @vf: the VF owning the VI
9060 * @mac: the MAC address of the VI
9061 *
9062 * Allocates a virtual interface for the given physical port. If @mac is
9063 * not %NULL it contains the MAC address of the VI as assigned by FW.
9064 * @mac should be large enough to hold an Ethernet address.
9065 * Returns < 0 on error.
9066 */
9067int t4_init_portinfo(struct port_info *pi, int mbox,
9068 int port, int pf, int vf, u8 mac[])
56d36be4 9069{
c3168cab
GG
9070 struct adapter *adapter = pi->adapter;
9071 unsigned int fw_caps = adapter->params.fw_caps_support;
9072 struct fw_port_cmd cmd;
c3e324e3 9073 unsigned int rss_size;
c3168cab
GG
9074 enum fw_port_type port_type;
9075 int mdio_addr;
9076 fw_port_cap32_t pcaps, acaps;
9077 int ret;
56d36be4 9078
c3168cab
GG
9079 /* If we haven't yet determined whether we're talking to Firmware
9080 * which knows the new 32-bit Port Capabilities, it's time to find
9081 * out now. This will also tell new Firmware to send us Port Status
9082 * Updates using the new 32-bit Port Capabilities version of the
9083 * Port Information message.
9084 */
9085 if (fw_caps == FW_CAPS_UNKNOWN) {
9086 u32 param, val;
9087
9088 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
9089 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
9090 val = 1;
9091 ret = t4_set_params(adapter, mbox, pf, vf, 1, &param, &val);
9092 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
9093 adapter->params.fw_caps_support = fw_caps;
9094 }
9095
9096 memset(&cmd, 0, sizeof(cmd));
9097 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
9098 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9099 FW_PORT_CMD_PORTID_V(port));
9100 cmd.action_to_len16 = cpu_to_be32(
9101 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
9102 ? FW_PORT_ACTION_GET_PORT_INFO
9103 : FW_PORT_ACTION_GET_PORT_INFO32) |
9104 FW_LEN16(cmd));
9105 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
c3e324e3
HS
9106 if (ret)
9107 return ret;
9108
c3168cab
GG
9109 /* Extract the various fields from the Port Information message.
9110 */
9111 if (fw_caps == FW_CAPS16) {
9112 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
9113
9114 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
9115 mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
9116 ? FW_PORT_CMD_MDIOADDR_G(lstatus)
9117 : -1);
9118 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
9119 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
9120 } else {
9121 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
9122
9123 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
9124 mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
9125 ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
9126 : -1);
9127 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
9128 acaps = be32_to_cpu(cmd.u.info32.acaps32);
9129 }
9130
c3e324e3
HS
9131 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
9132 if (ret < 0)
9133 return ret;
9134
9135 pi->viid = ret;
9136 pi->tx_chan = port;
9137 pi->lport = port;
9138 pi->rss_size = rss_size;
9139
c3168cab
GG
9140 pi->port_type = port_type;
9141 pi->mdio_addr = mdio_addr;
c3e324e3
HS
9142 pi->mod_type = FW_PORT_MOD_TYPE_NA;
9143
c3168cab 9144 init_link_config(&pi->link_cfg, pcaps, acaps);
c3e324e3
HS
9145 return 0;
9146}
9147
9148int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9149{
9150 u8 addr[6];
9151 int ret, i, j = 0;
56d36be4
DM
9152
9153 for_each_port(adap, i) {
c3e324e3 9154 struct port_info *pi = adap2pinfo(adap, i);
56d36be4
DM
9155
9156 while ((adap->params.portvec & (1 << j)) == 0)
9157 j++;
9158
c3e324e3 9159 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
56d36be4
DM
9160 if (ret)
9161 return ret;
9162
56d36be4 9163 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
56d36be4
DM
9164 j++;
9165 }
9166 return 0;
9167}
f1ff24aa 9168
74b3092c
HS
9169/**
9170 * t4_read_cimq_cfg - read CIM queue configuration
9171 * @adap: the adapter
9172 * @base: holds the queue base addresses in bytes
9173 * @size: holds the queue sizes in bytes
9174 * @thres: holds the queue full thresholds in bytes
9175 *
9176 * Returns the current configuration of the CIM queues, starting with
9177 * the IBQs, then the OBQs.
9178 */
9179void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9180{
9181 unsigned int i, v;
9182 int cim_num_obq = is_t4(adap->params.chip) ?
9183 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9184
9185 for (i = 0; i < CIM_NUM_IBQ; i++) {
9186 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
9187 QUENUMSELECT_V(i));
9188 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9189 /* value is in 256-byte units */
9190 *base++ = CIMQBASE_G(v) * 256;
9191 *size++ = CIMQSIZE_G(v) * 256;
9192 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
9193 }
9194 for (i = 0; i < cim_num_obq; i++) {
9195 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9196 QUENUMSELECT_V(i));
9197 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9198 /* value is in 256-byte units */
9199 *base++ = CIMQBASE_G(v) * 256;
9200 *size++ = CIMQSIZE_G(v) * 256;
9201 }
9202}
9203
e5f0e43b
HS
9204/**
9205 * t4_read_cim_ibq - read the contents of a CIM inbound queue
9206 * @adap: the adapter
9207 * @qid: the queue index
9208 * @data: where to store the queue contents
9209 * @n: capacity of @data in 32-bit words
9210 *
9211 * Reads the contents of the selected CIM queue starting at address 0 up
9212 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9213 * error and the number of 32-bit words actually read on success.
9214 */
9215int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9216{
9217 int i, err, attempts;
9218 unsigned int addr;
9219 const unsigned int nwords = CIM_IBQ_SIZE * 4;
9220
9221 if (qid > 5 || (n & 3))
9222 return -EINVAL;
9223
9224 addr = qid * nwords;
9225 if (n > nwords)
9226 n = nwords;
9227
9228 /* It might take 3-10ms before the IBQ debug read access is allowed.
9229 * Wait for 1 Sec with a delay of 1 usec.
9230 */
9231 attempts = 1000000;
9232
9233 for (i = 0; i < n; i++, addr++) {
9234 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
9235 IBQDBGEN_F);
9236 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
9237 attempts, 1);
9238 if (err)
9239 return err;
9240 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
9241 }
9242 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
c778af7d
HS
9243 return i;
9244}
9245
9246/**
9247 * t4_read_cim_obq - read the contents of a CIM outbound queue
9248 * @adap: the adapter
9249 * @qid: the queue index
9250 * @data: where to store the queue contents
9251 * @n: capacity of @data in 32-bit words
9252 *
9253 * Reads the contents of the selected CIM queue starting at address 0 up
9254 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9255 * error and the number of 32-bit words actually read on success.
9256 */
9257int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9258{
9259 int i, err;
9260 unsigned int addr, v, nwords;
9261 int cim_num_obq = is_t4(adap->params.chip) ?
9262 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9263
9264 if ((qid > (cim_num_obq - 1)) || (n & 3))
9265 return -EINVAL;
9266
9267 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9268 QUENUMSELECT_V(qid));
9269 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9270
9271 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
9272 nwords = CIMQSIZE_G(v) * 64; /* same */
9273 if (n > nwords)
9274 n = nwords;
9275
9276 for (i = 0; i < n; i++, addr++) {
9277 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
9278 OBQDBGEN_F);
9279 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
9280 2, 1);
9281 if (err)
9282 return err;
9283 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
9284 }
9285 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
e5f0e43b
HS
9286 return i;
9287}
9288
f1ff24aa
HS
9289/**
9290 * t4_cim_read - read a block from CIM internal address space
9291 * @adap: the adapter
9292 * @addr: the start address within the CIM address space
9293 * @n: number of words to read
9294 * @valp: where to store the result
9295 *
9296 * Reads a block of 4-byte words from the CIM intenal address space.
9297 */
9298int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9299 unsigned int *valp)
9300{
9301 int ret = 0;
9302
9303 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9304 return -EBUSY;
9305
9306 for ( ; !ret && n--; addr += 4) {
9307 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
9308 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9309 0, 5, 2);
9310 if (!ret)
9311 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
9312 }
9313 return ret;
9314}
9315
9316/**
9317 * t4_cim_write - write a block into CIM internal address space
9318 * @adap: the adapter
9319 * @addr: the start address within the CIM address space
9320 * @n: number of words to write
9321 * @valp: set of values to write
9322 *
9323 * Writes a block of 4-byte words into the CIM intenal address space.
9324 */
9325int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9326 const unsigned int *valp)
9327{
9328 int ret = 0;
9329
9330 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9331 return -EBUSY;
9332
9333 for ( ; !ret && n--; addr += 4) {
9334 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
9335 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
9336 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9337 0, 5, 2);
9338 }
9339 return ret;
9340}
9341
9342static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9343 unsigned int val)
9344{
9345 return t4_cim_write(adap, addr, 1, &val);
9346}
9347
9348/**
9349 * t4_cim_read_la - read CIM LA capture buffer
9350 * @adap: the adapter
9351 * @la_buf: where to store the LA data
9352 * @wrptr: the HW write pointer within the capture buffer
9353 *
9354 * Reads the contents of the CIM LA buffer with the most recent entry at
9355 * the end of the returned data and with the entry at @wrptr first.
9356 * We try to leave the LA in the running state we find it in.
9357 */
9358int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9359{
9360 int i, ret;
9361 unsigned int cfg, val, idx;
9362
9363 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
9364 if (ret)
9365 return ret;
9366
9367 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
9368 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
9369 if (ret)
9370 return ret;
9371 }
9372
9373 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9374 if (ret)
9375 goto restart;
9376
9377 idx = UPDBGLAWRPTR_G(val);
9378 if (wrptr)
9379 *wrptr = idx;
9380
9381 for (i = 0; i < adap->params.cim_la_size; i++) {
9382 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9383 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
9384 if (ret)
9385 break;
9386 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9387 if (ret)
9388 break;
9389 if (val & UPDBGLARDEN_F) {
9390 ret = -ETIMEDOUT;
9391 break;
9392 }
9393 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
9394 if (ret)
9395 break;
a97051f4
GG
9396
9397 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9398 * identify the 32-bit portion of the full 312-bit data
9399 */
9400 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
9401 idx = (idx & 0xff0) + 0x10;
9402 else
9403 idx++;
9404 /* address can't exceed 0xfff */
9405 idx &= UPDBGLARDPTR_M;
f1ff24aa
HS
9406 }
9407restart:
9408 if (cfg & UPDBGLAEN_F) {
9409 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9410 cfg & ~UPDBGLARDEN_F);
9411 if (!ret)
9412 ret = r;
9413 }
9414 return ret;
9415}
2d277b3b
HS
9416
9417/**
9418 * t4_tp_read_la - read TP LA capture buffer
9419 * @adap: the adapter
9420 * @la_buf: where to store the LA data
9421 * @wrptr: the HW write pointer within the capture buffer
9422 *
9423 * Reads the contents of the TP LA buffer with the most recent entry at
9424 * the end of the returned data and with the entry at @wrptr first.
9425 * We leave the LA in the running state we find it in.
9426 */
9427void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9428{
9429 bool last_incomplete;
9430 unsigned int i, cfg, val, idx;
9431
9432 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
9433 if (cfg & DBGLAENABLE_F) /* freeze LA */
9434 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9435 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
9436
9437 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
9438 idx = DBGLAWPTR_G(val);
9439 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
9440 if (last_incomplete)
9441 idx = (idx + 1) & DBGLARPTR_M;
9442 if (wrptr)
9443 *wrptr = idx;
9444
9445 val &= 0xffff;
9446 val &= ~DBGLARPTR_V(DBGLARPTR_M);
9447 val |= adap->params.tp.la_mask;
9448
9449 for (i = 0; i < TPLA_SIZE; i++) {
9450 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
9451 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
9452 idx = (idx + 1) & DBGLARPTR_M;
9453 }
9454
9455 /* Wipe out last entry if it isn't valid */
9456 if (last_incomplete)
9457 la_buf[TPLA_SIZE - 1] = ~0ULL;
9458
9459 if (cfg & DBGLAENABLE_F) /* restore running state */
9460 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9461 cfg | adap->params.tp.la_mask);
9462}
a3bfb617
HS
9463
9464/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
9465 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
9466 * state for more than the Warning Threshold then we'll issue a warning about
9467 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
9468 * appears to be hung every Warning Repeat second till the situation clears.
9469 * If the situation clears, we'll note that as well.
9470 */
9471#define SGE_IDMA_WARN_THRESH 1
9472#define SGE_IDMA_WARN_REPEAT 300
9473
9474/**
9475 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
9476 * @adapter: the adapter
9477 * @idma: the adapter IDMA Monitor state
9478 *
9479 * Initialize the state of an SGE Ingress DMA Monitor.
9480 */
9481void t4_idma_monitor_init(struct adapter *adapter,
9482 struct sge_idma_monitor_state *idma)
9483{
9484 /* Initialize the state variables for detecting an SGE Ingress DMA
9485 * hang. The SGE has internal counters which count up on each clock
9486 * tick whenever the SGE finds its Ingress DMA State Engines in the
9487 * same state they were on the previous clock tick. The clock used is
9488 * the Core Clock so we have a limit on the maximum "time" they can
9489 * record; typically a very small number of seconds. For instance,
9490 * with a 600MHz Core Clock, we can only count up to a bit more than
9491 * 7s. So we'll synthesize a larger counter in order to not run the
9492 * risk of having the "timers" overflow and give us the flexibility to
9493 * maintain a Hung SGE State Machine of our own which operates across
9494 * a longer time frame.
9495 */
9496 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
9497 idma->idma_stalled[0] = 0;
9498 idma->idma_stalled[1] = 0;
9499}
9500
9501/**
9502 * t4_idma_monitor - monitor SGE Ingress DMA state
9503 * @adapter: the adapter
9504 * @idma: the adapter IDMA Monitor state
9505 * @hz: number of ticks/second
9506 * @ticks: number of ticks since the last IDMA Monitor call
9507 */
9508void t4_idma_monitor(struct adapter *adapter,
9509 struct sge_idma_monitor_state *idma,
9510 int hz, int ticks)
9511{
9512 int i, idma_same_state_cnt[2];
9513
9514 /* Read the SGE Debug Ingress DMA Same State Count registers. These
9515 * are counters inside the SGE which count up on each clock when the
9516 * SGE finds its Ingress DMA State Engines in the same states they
9517 * were in the previous clock. The counters will peg out at
9518 * 0xffffffff without wrapping around so once they pass the 1s
9519 * threshold they'll stay above that till the IDMA state changes.
9520 */
9521 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
9522 idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
9523 idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9524
9525 for (i = 0; i < 2; i++) {
9526 u32 debug0, debug11;
9527
9528 /* If the Ingress DMA Same State Counter ("timer") is less
9529 * than 1s, then we can reset our synthesized Stall Timer and
9530 * continue. If we have previously emitted warnings about a
9531 * potential stalled Ingress Queue, issue a note indicating
9532 * that the Ingress Queue has resumed forward progress.
9533 */
9534 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
9535 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
9536 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
9537 "resumed after %d seconds\n",
9538 i, idma->idma_qid[i],
9539 idma->idma_stalled[i] / hz);
9540 idma->idma_stalled[i] = 0;
9541 continue;
9542 }
9543
9544 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
9545 * domain. The first time we get here it'll be because we
9546 * passed the 1s Threshold; each additional time it'll be
9547 * because the RX Timer Callback is being fired on its regular
9548 * schedule.
9549 *
9550 * If the stall is below our Potential Hung Ingress Queue
9551 * Warning Threshold, continue.
9552 */
9553 if (idma->idma_stalled[i] == 0) {
9554 idma->idma_stalled[i] = hz;
9555 idma->idma_warn[i] = 0;
9556 } else {
9557 idma->idma_stalled[i] += ticks;
9558 idma->idma_warn[i] -= ticks;
9559 }
9560
9561 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
9562 continue;
9563
9564 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
9565 */
9566 if (idma->idma_warn[i] > 0)
9567 continue;
9568 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
9569
9570 /* Read and save the SGE IDMA State and Queue ID information.
9571 * We do this every time in case it changes across time ...
9572 * can't be too careful ...
9573 */
9574 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
9575 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9576 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
9577
9578 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
9579 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9580 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
9581
9582 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
9583 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
9584 i, idma->idma_qid[i], idma->idma_state[i],
9585 idma->idma_stalled[i] / hz,
9586 debug0, debug11);
9587 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
9588 }
9589}
858aa65c 9590
4da18741
AV
9591/**
9592 * t4_load_cfg - download config file
9593 * @adap: the adapter
9594 * @cfg_data: the cfg text file to write
9595 * @size: text file size
9596 *
9597 * Write the supplied config text file to the card's serial flash.
9598 */
9599int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9600{
9601 int ret, i, n, cfg_addr;
9602 unsigned int addr;
9603 unsigned int flash_cfg_start_sec;
9604 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9605
9606 cfg_addr = t4_flash_cfg_addr(adap);
9607 if (cfg_addr < 0)
9608 return cfg_addr;
9609
9610 addr = cfg_addr;
9611 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9612
9613 if (size > FLASH_CFG_MAX_SIZE) {
9614 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
9615 FLASH_CFG_MAX_SIZE);
9616 return -EFBIG;
9617 }
9618
9619 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
9620 sf_sec_size);
9621 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9622 flash_cfg_start_sec + i - 1);
9623 /* If size == 0 then we're simply erasing the FLASH sectors associated
9624 * with the on-adapter Firmware Configuration File.
9625 */
9626 if (ret || size == 0)
9627 goto out;
9628
9629 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9630 for (i = 0; i < size; i += SF_PAGE_SIZE) {
9631 if ((size - i) < SF_PAGE_SIZE)
9632 n = size - i;
9633 else
9634 n = SF_PAGE_SIZE;
9635 ret = t4_write_flash(adap, addr, n, cfg_data);
9636 if (ret)
9637 goto out;
9638
9639 addr += SF_PAGE_SIZE;
9640 cfg_data += SF_PAGE_SIZE;
9641 }
9642
9643out:
9644 if (ret)
9645 dev_err(adap->pdev_dev, "config file %s failed %d\n",
9646 (size == 0 ? "clear" : "download"), ret);
9647 return ret;
9648}
9649
858aa65c
HS
9650/**
9651 * t4_set_vf_mac - Set MAC address for the specified VF
9652 * @adapter: The adapter
9653 * @vf: one of the VFs instantiated by the specified PF
9654 * @naddr: the number of MAC addresses
9655 * @addr: the MAC address(es) to be set to the specified VF
9656 */
9657int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
9658 unsigned int naddr, u8 *addr)
9659{
9660 struct fw_acl_mac_cmd cmd;
9661
9662 memset(&cmd, 0, sizeof(cmd));
9663 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
9664 FW_CMD_REQUEST_F |
9665 FW_CMD_WRITE_F |
9666 FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
9667 FW_ACL_MAC_CMD_VFN_V(vf));
9668
9669 /* Note: Do not enable the ACL */
9670 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
9671 cmd.nmac = naddr;
9672
9673 switch (adapter->pf) {
9674 case 3:
9675 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
9676 break;
9677 case 2:
9678 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
9679 break;
9680 case 1:
9681 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
9682 break;
9683 case 0:
9684 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
9685 break;
9686 }
9687
9688 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
9689}
b72a32da 9690
08c4901b
RL
9691/**
9692 * t4_read_pace_tbl - read the pace table
9693 * @adap: the adapter
9694 * @pace_vals: holds the returned values
9695 *
9696 * Returns the values of TP's pace table in microseconds.
9697 */
9698void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
9699{
9700 unsigned int i, v;
9701
9702 for (i = 0; i < NTX_SCHED; i++) {
9703 t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
9704 v = t4_read_reg(adap, TP_PACE_TABLE_A);
9705 pace_vals[i] = dack_ticks_to_usec(adap, v);
9706 }
9707}
9708
9709/**
9710 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
9711 * @adap: the adapter
9712 * @sched: the scheduler index
9713 * @kbps: the byte rate in Kbps
9714 * @ipg: the interpacket delay in tenths of nanoseconds
9715 * @sleep_ok: if true we may sleep while awaiting command completion
9716 *
9717 * Return the current configuration of a HW Tx scheduler.
9718 */
9719void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
9720 unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
9721{
9722 unsigned int v, addr, bpt, cpt;
9723
9724 if (kbps) {
9725 addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
9726 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9727 if (sched & 1)
9728 v >>= 16;
9729 bpt = (v >> 8) & 0xff;
9730 cpt = v & 0xff;
9731 if (!cpt) {
9732 *kbps = 0; /* scheduler disabled */
9733 } else {
9734 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
9735 *kbps = (v * bpt) / 125;
9736 }
9737 }
9738 if (ipg) {
9739 addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
9740 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9741 if (sched & 1)
9742 v >>= 16;
9743 v &= 0xffff;
9744 *ipg = (10000 * v) / core_ticks_per_usec(adap);
9745 }
9746}
9747
9e5c598c
RL
9748/* t4_sge_ctxt_rd - read an SGE context through FW
9749 * @adap: the adapter
9750 * @mbox: mailbox to use for the FW command
9751 * @cid: the context id
9752 * @ctype: the context type
9753 * @data: where to store the context data
9754 *
9755 * Issues a FW command through the given mailbox to read an SGE context.
9756 */
9757int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9758 enum ctxt_type ctype, u32 *data)
9759{
9760 struct fw_ldst_cmd c;
9761 int ret;
9762
9763 if (ctype == CTXT_FLM)
9764 ret = FW_LDST_ADDRSPC_SGE_FLMC;
9765 else
9766 ret = FW_LDST_ADDRSPC_SGE_CONMC;
9767
9768 memset(&c, 0, sizeof(c));
9769 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
9770 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9771 FW_LDST_CMD_ADDRSPACE_V(ret));
9772 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9773 c.u.idctxt.physid = cpu_to_be32(cid);
9774
9775 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9776 if (ret == 0) {
9777 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9778 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9779 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9780 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9781 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9782 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9783 }
9784 return ret;
9785}
9786
9787/**
9788 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9789 * @adap: the adapter
9790 * @cid: the context id
9791 * @ctype: the context type
9792 * @data: where to store the context data
9793 *
9794 * Reads an SGE context directly, bypassing FW. This is only for
9795 * debugging when FW is unavailable.
9796 */
9797int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
9798 enum ctxt_type ctype, u32 *data)
9799{
9800 int i, ret;
9801
9802 t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
9803 ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
9804 if (!ret)
9805 for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
9806 *data++ = t4_read_reg(adap, i);
9807 return ret;
9808}
9809
b72a32da
RL
9810int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9811 int rateunit, int ratemode, int channel, int class,
9812 int minrate, int maxrate, int weight, int pktsize)
9813{
9814 struct fw_sched_cmd cmd;
9815
9816 memset(&cmd, 0, sizeof(cmd));
9817 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
9818 FW_CMD_REQUEST_F |
9819 FW_CMD_WRITE_F);
9820 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9821
9822 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9823 cmd.u.params.type = type;
9824 cmd.u.params.level = level;
9825 cmd.u.params.mode = mode;
9826 cmd.u.params.ch = channel;
9827 cmd.u.params.cl = class;
9828 cmd.u.params.unit = rateunit;
9829 cmd.u.params.rate = ratemode;
9830 cmd.u.params.min = cpu_to_be32(minrate);
9831 cmd.u.params.max = cpu_to_be32(maxrate);
9832 cmd.u.params.weight = cpu_to_be16(weight);
9833 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9834
9835 return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
9836 NULL, 1);
9837}
f56ec676
AV
9838
9839/**
9840 * t4_i2c_rd - read I2C data from adapter
9841 * @adap: the adapter
9842 * @port: Port number if per-port device; <0 if not
9843 * @devid: per-port device ID or absolute device ID
9844 * @offset: byte offset into device I2C space
9845 * @len: byte length of I2C space data
9846 * @buf: buffer in which to return I2C data
9847 *
9848 * Reads the I2C data from the indicated device and location.
9849 */
9850int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
9851 unsigned int devid, unsigned int offset,
9852 unsigned int len, u8 *buf)
9853{
9854 struct fw_ldst_cmd ldst_cmd, ldst_rpl;
9855 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
9856 int ret = 0;
9857
9858 if (len > I2C_PAGE_SIZE)
9859 return -EINVAL;
9860
9861 /* Dont allow reads that spans multiple pages */
9862 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
9863 return -EINVAL;
9864
9865 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
9866 ldst_cmd.op_to_addrspace =
9867 cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
9868 FW_CMD_REQUEST_F |
9869 FW_CMD_READ_F |
9870 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
9871 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
9872 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
9873 ldst_cmd.u.i2c.did = devid;
9874
9875 while (len > 0) {
9876 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
9877
9878 ldst_cmd.u.i2c.boffset = offset;
9879 ldst_cmd.u.i2c.blen = i2c_len;
9880
9881 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
9882 &ldst_rpl);
9883 if (ret)
9884 break;
9885
9886 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
9887 offset += i2c_len;
9888 buf += i2c_len;
9889 len -= i2c_len;
9890 }
9891
9892 return ret;
9893}
9d5fd927
GG
9894
9895/**
9896 * t4_set_vlan_acl - Set a VLAN id for the specified VF
9897 * @adapter: the adapter
9898 * @mbox: mailbox to use for the FW command
9899 * @vf: one of the VFs instantiated by the specified PF
9900 * @vlan: The vlanid to be set
9901 */
9902int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
9903 u16 vlan)
9904{
9905 struct fw_acl_vlan_cmd vlan_cmd;
9906 unsigned int enable;
9907
9908 enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
9909 memset(&vlan_cmd, 0, sizeof(vlan_cmd));
9910 vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
9911 FW_CMD_REQUEST_F |
9912 FW_CMD_WRITE_F |
9913 FW_CMD_EXEC_F |
9914 FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
9915 FW_ACL_VLAN_CMD_VFN_V(vf));
9916 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
9917 /* Drop all packets that donot match vlan id */
9918 vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F;
9919 if (enable != 0) {
9920 vlan_cmd.nvlan = 1;
9921 vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
9922 }
9923
9924 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
9925}