cxgb4: Add support for devlog
[linux-2.6-block.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
ce100b8b 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
56d36be4
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
56d36be4
DM
35#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
f612b815 38#include "t4_values.h"
56d36be4
DM
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
de498c89
RD
56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
58{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
f2b7e78d 112void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
de498c89
RD
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
13ee15d3
VP
123/**
124 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
131 *
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
134 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
138{
139 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
142 }
143}
144
0abfd152
HS
145/*
146 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
147 * mechanism. This guarantees that we get the real value even if we're
148 * operating within a Virtual Machine and the Hypervisor is trapping our
149 * Configuration Space accesses.
150 */
151void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
152{
f061de42 153 u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
0abfd152
HS
154
155 if (is_t4(adap->params.chip))
f061de42 156 req |= LOCALCFG_F;
0abfd152 157
f061de42
HS
158 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
159 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
0abfd152
HS
160
161 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
162 * Configuration Space read. (None of the other fields matter when
163 * ENABLE is 0 so a simple register write is easier than a
164 * read-modify-write via t4_set_reg_field().)
165 */
f061de42 166 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
0abfd152
HS
167}
168
31d55c2d
HS
169/*
170 * t4_report_fw_error - report firmware error
171 * @adap: the adapter
172 *
173 * The adapter firmware can indicate error conditions to the host.
174 * If the firmware has indicated an error, print out the reason for
175 * the firmware error.
176 */
177static void t4_report_fw_error(struct adapter *adap)
178{
179 static const char *const reason[] = {
180 "Crash", /* PCIE_FW_EVAL_CRASH */
181 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
182 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
183 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
184 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
185 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
186 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
187 "Reserved", /* reserved */
188 };
189 u32 pcie_fw;
190
f061de42
HS
191 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
192 if (pcie_fw & PCIE_FW_ERR_F)
31d55c2d 193 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
b2e1a3f0 194 reason[PCIE_FW_EVAL_G(pcie_fw)]);
31d55c2d
HS
195}
196
56d36be4
DM
197/*
198 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
199 */
200static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
201 u32 mbox_addr)
202{
203 for ( ; nflit; nflit--, mbox_addr += 8)
204 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
205}
206
207/*
208 * Handle a FW assertion reported in a mailbox.
209 */
210static void fw_asrt(struct adapter *adap, u32 mbox_addr)
211{
212 struct fw_debug_cmd asrt;
213
214 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
215 dev_alert(adap->pdev_dev,
216 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
217 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
218 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
219}
220
221static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
222{
223 dev_err(adap->pdev_dev,
224 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
225 (unsigned long long)t4_read_reg64(adap, data_reg),
226 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
227 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
228 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
229 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
230 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
231 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
232 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
233}
234
235/**
236 * t4_wr_mbox_meat - send a command to FW through the given mailbox
237 * @adap: the adapter
238 * @mbox: index of the mailbox to use
239 * @cmd: the command to write
240 * @size: command length in bytes
241 * @rpl: where to optionally store the reply
242 * @sleep_ok: if true we may sleep while awaiting command completion
243 *
244 * Sends the given command to FW through the selected mailbox and waits
245 * for the FW to execute the command. If @rpl is not %NULL it is used to
246 * store the FW's reply to the command. The command and its optional
247 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
248 * to respond. @sleep_ok determines whether we may sleep while awaiting
249 * the response. If sleeping is allowed we use progressive backoff
250 * otherwise we spin.
251 *
252 * The return value is 0 on success or a negative errno on failure. A
253 * failure can happen either because we are not able to execute the
254 * command or FW executes it but signals an error. In the latter case
255 * the return value is the error code indicated by FW (negated).
256 */
257int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
258 void *rpl, bool sleep_ok)
259{
005b5717 260 static const int delay[] = {
56d36be4
DM
261 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
262 };
263
264 u32 v;
265 u64 res;
266 int i, ms, delay_idx;
267 const __be64 *p = cmd;
89c3a86c
HS
268 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
269 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
56d36be4
DM
270
271 if ((size & 15) || size > MBOX_LEN)
272 return -EINVAL;
273
204dc3c0
DM
274 /*
275 * If the device is off-line, as in EEH, commands will time out.
276 * Fail them early so we don't waste time waiting.
277 */
278 if (adap->pdev->error_state != pci_channel_io_normal)
279 return -EIO;
280
89c3a86c 281 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
56d36be4 282 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
89c3a86c 283 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
56d36be4
DM
284
285 if (v != MBOX_OWNER_DRV)
286 return v ? -EBUSY : -ETIMEDOUT;
287
288 for (i = 0; i < size; i += 8)
289 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
290
89c3a86c 291 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
56d36be4
DM
292 t4_read_reg(adap, ctl_reg); /* flush write */
293
294 delay_idx = 0;
295 ms = delay[0];
296
297 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
298 if (sleep_ok) {
299 ms = delay[delay_idx]; /* last element may repeat */
300 if (delay_idx < ARRAY_SIZE(delay) - 1)
301 delay_idx++;
302 msleep(ms);
303 } else
304 mdelay(ms);
305
306 v = t4_read_reg(adap, ctl_reg);
89c3a86c
HS
307 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
308 if (!(v & MBMSGVALID_F)) {
56d36be4
DM
309 t4_write_reg(adap, ctl_reg, 0);
310 continue;
311 }
312
313 res = t4_read_reg64(adap, data_reg);
e2ac9628 314 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
56d36be4 315 fw_asrt(adap, data_reg);
e2ac9628
HS
316 res = FW_CMD_RETVAL_V(EIO);
317 } else if (rpl) {
56d36be4 318 get_mbox_rpl(adap, rpl, size / 8, data_reg);
e2ac9628 319 }
56d36be4 320
e2ac9628 321 if (FW_CMD_RETVAL_G((int)res))
56d36be4
DM
322 dump_mbox(adap, mbox, data_reg);
323 t4_write_reg(adap, ctl_reg, 0);
e2ac9628 324 return -FW_CMD_RETVAL_G((int)res);
56d36be4
DM
325 }
326 }
327
328 dump_mbox(adap, mbox, data_reg);
329 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
330 *(const u8 *)cmd, mbox);
31d55c2d 331 t4_report_fw_error(adap);
56d36be4
DM
332 return -ETIMEDOUT;
333}
334
335/**
336 * t4_mc_read - read from MC through backdoor accesses
337 * @adap: the adapter
338 * @addr: address of first byte requested
19dd37ba 339 * @idx: which MC to access
56d36be4
DM
340 * @data: 64 bytes of data containing the requested address
341 * @ecc: where to store the corresponding 64-bit ECC word
342 *
343 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
344 * that covers the requested address @addr. If @parity is not %NULL it
345 * is assigned the 64-bit ECC word for the read data.
346 */
19dd37ba 347int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
56d36be4
DM
348{
349 int i;
19dd37ba
SR
350 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
351 u32 mc_bist_status_rdata, mc_bist_data_pattern;
56d36be4 352
d14807dd 353 if (is_t4(adap->params.chip)) {
89c3a86c
HS
354 mc_bist_cmd = MC_BIST_CMD_A;
355 mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
356 mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
357 mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
358 mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
19dd37ba 359 } else {
89c3a86c
HS
360 mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
361 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
362 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
363 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
364 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
19dd37ba
SR
365 }
366
89c3a86c 367 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
56d36be4 368 return -EBUSY;
19dd37ba
SR
369 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
370 t4_write_reg(adap, mc_bist_cmd_len, 64);
371 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
89c3a86c
HS
372 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
373 BIST_CMD_GAP_V(1));
374 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
56d36be4
DM
375 if (i)
376 return i;
377
19dd37ba 378#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
56d36be4
DM
379
380 for (i = 15; i >= 0; i--)
381 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
382 if (ecc)
383 *ecc = t4_read_reg64(adap, MC_DATA(16));
384#undef MC_DATA
385 return 0;
386}
387
388/**
389 * t4_edc_read - read from EDC through backdoor accesses
390 * @adap: the adapter
391 * @idx: which EDC to access
392 * @addr: address of first byte requested
393 * @data: 64 bytes of data containing the requested address
394 * @ecc: where to store the corresponding 64-bit ECC word
395 *
396 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
397 * that covers the requested address @addr. If @parity is not %NULL it
398 * is assigned the 64-bit ECC word for the read data.
399 */
400int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
401{
402 int i;
19dd37ba
SR
403 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
404 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
56d36be4 405
d14807dd 406 if (is_t4(adap->params.chip)) {
89c3a86c
HS
407 edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
408 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
409 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
410 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
19dd37ba 411 idx);
89c3a86c
HS
412 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
413 idx);
19dd37ba 414 } else {
89c3a86c
HS
415 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
416 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
417 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
19dd37ba 418 edc_bist_cmd_data_pattern =
89c3a86c 419 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
19dd37ba 420 edc_bist_status_rdata =
89c3a86c 421 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
19dd37ba
SR
422 }
423
89c3a86c 424 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
56d36be4 425 return -EBUSY;
19dd37ba
SR
426 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
427 t4_write_reg(adap, edc_bist_cmd_len, 64);
428 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
429 t4_write_reg(adap, edc_bist_cmd,
89c3a86c
HS
430 BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
431 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
56d36be4
DM
432 if (i)
433 return i;
434
19dd37ba 435#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
56d36be4
DM
436
437 for (i = 15; i >= 0; i--)
438 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
439 if (ecc)
440 *ecc = t4_read_reg64(adap, EDC_DATA(16));
441#undef EDC_DATA
442 return 0;
443}
444
5afc8b84
VP
445/**
446 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
447 * @adap: the adapter
fc5ab020 448 * @win: PCI-E Memory Window to use
5afc8b84
VP
449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
450 * @addr: address within indicated memory type
451 * @len: amount of memory to transfer
452 * @buf: host memory buffer
fc5ab020 453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5afc8b84
VP
454 *
455 * Reads/writes an [almost] arbitrary memory region in the firmware: the
fc5ab020
HS
456 * firmware memory address and host buffer must be aligned on 32-bit
457 * boudaries; the length may be arbitrary. The memory is transferred as
458 * a raw byte sequence from/to the firmware's memory. If this memory
459 * contains data structures which contain multi-byte integers, it's the
460 * caller's responsibility to perform appropriate byte order conversions.
5afc8b84 461 */
fc5ab020
HS
462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
463 u32 len, __be32 *buf, int dir)
5afc8b84 464{
fc5ab020
HS
465 u32 pos, offset, resid, memoffset;
466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
5afc8b84 467
fc5ab020 468 /* Argument sanity checks ...
5afc8b84 469 */
fc5ab020 470 if (addr & 0x3)
5afc8b84
VP
471 return -EINVAL;
472
fc5ab020
HS
473 /* It's convenient to be able to handle lengths which aren't a
474 * multiple of 32-bits because we often end up transferring files to
475 * the firmware. So we'll handle that by normalizing the length here
476 * and then handling any residual transfer at the end.
477 */
478 resid = len & 0x3;
479 len -= resid;
8c357ebd 480
19dd37ba 481 /* Offset into the region of memory which is being accessed
5afc8b84
VP
482 * MEM_EDC0 = 0
483 * MEM_EDC1 = 1
19dd37ba
SR
484 * MEM_MC = 2 -- T4
485 * MEM_MC0 = 2 -- For T5
486 * MEM_MC1 = 3 -- For T5
5afc8b84 487 */
6559a7e8 488 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
19dd37ba
SR
489 if (mtype != MEM_MC1)
490 memoffset = (mtype * (edc_size * 1024 * 1024));
491 else {
6559a7e8
HS
492 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
493 MA_EXT_MEMORY1_BAR_A));
19dd37ba
SR
494 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
495 }
5afc8b84
VP
496
497 /* Determine the PCIE_MEM_ACCESS_OFFSET */
498 addr = addr + memoffset;
499
fc5ab020
HS
500 /* Each PCI-E Memory Window is programmed with a window size -- or
501 * "aperture" -- which controls the granularity of its mapping onto
502 * adapter memory. We need to grab that aperture in order to know
503 * how to use the specified window. The window is also programmed
504 * with the base address of the Memory Window in BAR0's address
505 * space. For T4 this is an absolute PCI-E Bus Address. For T5
506 * the address is relative to BAR0.
5afc8b84 507 */
fc5ab020 508 mem_reg = t4_read_reg(adap,
f061de42 509 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
fc5ab020 510 win));
f061de42
HS
511 mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
512 mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
fc5ab020
HS
513 if (is_t4(adap->params.chip))
514 mem_base -= adap->t4_bar0;
f061de42 515 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
5afc8b84 516
fc5ab020
HS
517 /* Calculate our initial PCI-E Memory Window Position and Offset into
518 * that Window.
519 */
520 pos = addr & ~(mem_aperture-1);
521 offset = addr - pos;
5afc8b84 522
fc5ab020
HS
523 /* Set up initial PCI-E Memory Window to cover the start of our
524 * transfer. (Read it back to ensure that changes propagate before we
525 * attempt to use the new value.)
526 */
527 t4_write_reg(adap,
f061de42 528 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
fc5ab020
HS
529 pos | win_pf);
530 t4_read_reg(adap,
f061de42 531 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
fc5ab020
HS
532
533 /* Transfer data to/from the adapter as long as there's an integral
534 * number of 32-bit transfers to complete.
535 */
536 while (len > 0) {
537 if (dir == T4_MEMORY_READ)
538 *buf++ = (__force __be32) t4_read_reg(adap,
539 mem_base + offset);
540 else
541 t4_write_reg(adap, mem_base + offset,
542 (__force u32) *buf++);
543 offset += sizeof(__be32);
544 len -= sizeof(__be32);
545
546 /* If we've reached the end of our current window aperture,
547 * move the PCI-E Memory Window on to the next. Note that
548 * doing this here after "len" may be 0 allows us to set up
549 * the PCI-E Memory Window for a possible final residual
550 * transfer below ...
5afc8b84 551 */
fc5ab020
HS
552 if (offset == mem_aperture) {
553 pos += mem_aperture;
554 offset = 0;
555 t4_write_reg(adap,
f061de42
HS
556 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
557 win), pos | win_pf);
fc5ab020 558 t4_read_reg(adap,
f061de42
HS
559 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
560 win));
5afc8b84 561 }
5afc8b84
VP
562 }
563
fc5ab020
HS
564 /* If the original transfer had a length which wasn't a multiple of
565 * 32-bits, now's where we need to finish off the transfer of the
566 * residual amount. The PCI-E Memory Window has already been moved
567 * above (if necessary) to cover this final transfer.
568 */
569 if (resid) {
570 union {
571 __be32 word;
572 char byte[4];
573 } last;
574 unsigned char *bp;
575 int i;
576
c81576c2 577 if (dir == T4_MEMORY_READ) {
fc5ab020
HS
578 last.word = (__force __be32) t4_read_reg(adap,
579 mem_base + offset);
580 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
581 bp[i] = last.byte[i];
582 } else {
583 last.word = *buf;
584 for (i = resid; i < 4; i++)
585 last.byte[i] = 0;
586 t4_write_reg(adap, mem_base + offset,
587 (__force u32) last.word);
588 }
589 }
5afc8b84 590
fc5ab020 591 return 0;
5afc8b84
VP
592}
593
56d36be4 594#define EEPROM_STAT_ADDR 0x7bfc
47ce9c48
SR
595#define VPD_BASE 0x400
596#define VPD_BASE_OLD 0
0a57a536 597#define VPD_LEN 1024
63a92fe6 598#define CHELSIO_VPD_UNIQUE_ID 0x82
56d36be4
DM
599
600/**
601 * t4_seeprom_wp - enable/disable EEPROM write protection
602 * @adapter: the adapter
603 * @enable: whether to enable or disable write protection
604 *
605 * Enables or disables write protection on the serial EEPROM.
606 */
607int t4_seeprom_wp(struct adapter *adapter, bool enable)
608{
609 unsigned int v = enable ? 0xc : 0;
610 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
611 return ret < 0 ? ret : 0;
612}
613
614/**
615 * get_vpd_params - read VPD parameters from VPD EEPROM
616 * @adapter: adapter to read
617 * @p: where to store the parameters
618 *
619 * Reads card parameters stored in VPD EEPROM.
620 */
636f9d37 621int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
56d36be4 622{
636f9d37 623 u32 cclk_param, cclk_val;
47ce9c48 624 int i, ret, addr;
a94cd705 625 int ec, sn, pn;
8c357ebd 626 u8 *vpd, csum;
23d88e1d 627 unsigned int vpdr_len, kw_offset, id_len;
56d36be4 628
8c357ebd
VP
629 vpd = vmalloc(VPD_LEN);
630 if (!vpd)
631 return -ENOMEM;
632
47ce9c48
SR
633 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
634 if (ret < 0)
635 goto out;
63a92fe6
HS
636
637 /* The VPD shall have a unique identifier specified by the PCI SIG.
638 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
639 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
640 * is expected to automatically put this entry at the
641 * beginning of the VPD.
642 */
643 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
47ce9c48
SR
644
645 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
56d36be4 646 if (ret < 0)
8c357ebd 647 goto out;
56d36be4 648
23d88e1d
DM
649 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
650 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
8c357ebd
VP
651 ret = -EINVAL;
652 goto out;
23d88e1d
DM
653 }
654
655 id_len = pci_vpd_lrdt_size(vpd);
656 if (id_len > ID_LEN)
657 id_len = ID_LEN;
658
659 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
660 if (i < 0) {
661 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
8c357ebd
VP
662 ret = -EINVAL;
663 goto out;
23d88e1d
DM
664 }
665
666 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
667 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
668 if (vpdr_len + kw_offset > VPD_LEN) {
226ec5fd 669 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
8c357ebd
VP
670 ret = -EINVAL;
671 goto out;
226ec5fd
DM
672 }
673
674#define FIND_VPD_KW(var, name) do { \
23d88e1d 675 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
226ec5fd
DM
676 if (var < 0) { \
677 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
8c357ebd
VP
678 ret = -EINVAL; \
679 goto out; \
226ec5fd
DM
680 } \
681 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
682} while (0)
683
684 FIND_VPD_KW(i, "RV");
685 for (csum = 0; i >= 0; i--)
686 csum += vpd[i];
56d36be4
DM
687
688 if (csum) {
689 dev_err(adapter->pdev_dev,
690 "corrupted VPD EEPROM, actual csum %u\n", csum);
8c357ebd
VP
691 ret = -EINVAL;
692 goto out;
56d36be4
DM
693 }
694
226ec5fd
DM
695 FIND_VPD_KW(ec, "EC");
696 FIND_VPD_KW(sn, "SN");
a94cd705 697 FIND_VPD_KW(pn, "PN");
226ec5fd
DM
698#undef FIND_VPD_KW
699
23d88e1d 700 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
56d36be4 701 strim(p->id);
226ec5fd 702 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 703 strim(p->ec);
226ec5fd
DM
704 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
705 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4 706 strim(p->sn);
63a92fe6 707 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
a94cd705
KS
708 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
709 strim(p->pn);
636f9d37
VP
710
711 /*
712 * Ask firmware for the Core Clock since it knows how to translate the
713 * Reference Clock ('V2') VPD field into a Core Clock value ...
714 */
5167865a
HS
715 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
716 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
636f9d37
VP
717 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
718 1, &cclk_param, &cclk_val);
8c357ebd
VP
719
720out:
721 vfree(vpd);
636f9d37
VP
722 if (ret)
723 return ret;
724 p->cclk = cclk_val;
725
56d36be4
DM
726 return 0;
727}
728
729/* serial flash and firmware constants */
730enum {
731 SF_ATTEMPTS = 10, /* max retries for SF operations */
732
733 /* flash command opcodes */
734 SF_PROG_PAGE = 2, /* program page */
735 SF_WR_DISABLE = 4, /* disable writes */
736 SF_RD_STATUS = 5, /* read status register */
737 SF_WR_ENABLE = 6, /* enable writes */
738 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 739 SF_RD_ID = 0x9f, /* read ID */
56d36be4
DM
740 SF_ERASE_SECTOR = 0xd8, /* erase sector */
741
6f1d7210 742 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
56d36be4
DM
743};
744
745/**
746 * sf1_read - read data from the serial flash
747 * @adapter: the adapter
748 * @byte_cnt: number of bytes to read
749 * @cont: whether another operation will be chained
750 * @lock: whether to lock SF for PL access only
751 * @valp: where to store the read data
752 *
753 * Reads up to 4 bytes of data from the serial flash. The location of
754 * the read needs to be specified prior to calling this by issuing the
755 * appropriate commands to the serial flash.
756 */
757static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
758 int lock, u32 *valp)
759{
760 int ret;
761
762 if (!byte_cnt || byte_cnt > 4)
763 return -EINVAL;
0d804338 764 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
56d36be4 765 return -EBUSY;
0d804338
HS
766 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
767 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
768 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
56d36be4 769 if (!ret)
0d804338 770 *valp = t4_read_reg(adapter, SF_DATA_A);
56d36be4
DM
771 return ret;
772}
773
774/**
775 * sf1_write - write data to the serial flash
776 * @adapter: the adapter
777 * @byte_cnt: number of bytes to write
778 * @cont: whether another operation will be chained
779 * @lock: whether to lock SF for PL access only
780 * @val: value to write
781 *
782 * Writes up to 4 bytes of data to the serial flash. The location of
783 * the write needs to be specified prior to calling this by issuing the
784 * appropriate commands to the serial flash.
785 */
786static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
787 int lock, u32 val)
788{
789 if (!byte_cnt || byte_cnt > 4)
790 return -EINVAL;
0d804338 791 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
56d36be4 792 return -EBUSY;
0d804338
HS
793 t4_write_reg(adapter, SF_DATA_A, val);
794 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
795 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
796 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
56d36be4
DM
797}
798
799/**
800 * flash_wait_op - wait for a flash operation to complete
801 * @adapter: the adapter
802 * @attempts: max number of polls of the status register
803 * @delay: delay between polls in ms
804 *
805 * Wait for a flash operation to complete by polling the status register.
806 */
807static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
808{
809 int ret;
810 u32 status;
811
812 while (1) {
813 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
814 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
815 return ret;
816 if (!(status & 1))
817 return 0;
818 if (--attempts == 0)
819 return -EAGAIN;
820 if (delay)
821 msleep(delay);
822 }
823}
824
825/**
826 * t4_read_flash - read words from serial flash
827 * @adapter: the adapter
828 * @addr: the start address for the read
829 * @nwords: how many 32-bit words to read
830 * @data: where to store the read data
831 * @byte_oriented: whether to store data as bytes or as words
832 *
833 * Read the specified number of 32-bit words from the serial flash.
834 * If @byte_oriented is set the read data is stored as a byte array
835 * (i.e., big-endian), otherwise as 32-bit words in the platform's
836 * natural endianess.
837 */
de498c89
RD
838static int t4_read_flash(struct adapter *adapter, unsigned int addr,
839 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
840{
841 int ret;
842
900a6596 843 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
844 return -EINVAL;
845
846 addr = swab32(addr) | SF_RD_DATA_FAST;
847
848 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
849 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
850 return ret;
851
852 for ( ; nwords; nwords--, data++) {
853 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
854 if (nwords == 1)
0d804338 855 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
856 if (ret)
857 return ret;
858 if (byte_oriented)
404d9e3f 859 *data = (__force __u32) (htonl(*data));
56d36be4
DM
860 }
861 return 0;
862}
863
864/**
865 * t4_write_flash - write up to a page of data to the serial flash
866 * @adapter: the adapter
867 * @addr: the start address to write
868 * @n: length of data to write in bytes
869 * @data: the data to write
870 *
871 * Writes up to a page of data (256 bytes) to the serial flash starting
872 * at the given address. All the data must be written to the same page.
873 */
874static int t4_write_flash(struct adapter *adapter, unsigned int addr,
875 unsigned int n, const u8 *data)
876{
877 int ret;
878 u32 buf[64];
879 unsigned int i, c, left, val, offset = addr & 0xff;
880
900a6596 881 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
882 return -EINVAL;
883
884 val = swab32(addr) | SF_PROG_PAGE;
885
886 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
887 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
888 goto unlock;
889
890 for (left = n; left; left -= c) {
891 c = min(left, 4U);
892 for (val = 0, i = 0; i < c; ++i)
893 val = (val << 8) + *data++;
894
895 ret = sf1_write(adapter, c, c != left, 1, val);
896 if (ret)
897 goto unlock;
898 }
900a6596 899 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
900 if (ret)
901 goto unlock;
902
0d804338 903 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
904
905 /* Read the page to verify the write succeeded */
906 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
907 if (ret)
908 return ret;
909
910 if (memcmp(data - n, (u8 *)buf + offset, n)) {
911 dev_err(adapter->pdev_dev,
912 "failed to correctly write the flash page at %#x\n",
913 addr);
914 return -EIO;
915 }
916 return 0;
917
918unlock:
0d804338 919 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
920 return ret;
921}
922
923/**
16e47624 924 * t4_get_fw_version - read the firmware version
56d36be4
DM
925 * @adapter: the adapter
926 * @vers: where to place the version
927 *
928 * Reads the FW version from flash.
929 */
16e47624 930int t4_get_fw_version(struct adapter *adapter, u32 *vers)
56d36be4 931{
16e47624
HS
932 return t4_read_flash(adapter, FLASH_FW_START +
933 offsetof(struct fw_hdr, fw_ver), 1,
934 vers, 0);
56d36be4
DM
935}
936
937/**
16e47624 938 * t4_get_tp_version - read the TP microcode version
56d36be4
DM
939 * @adapter: the adapter
940 * @vers: where to place the version
941 *
942 * Reads the TP microcode version from flash.
943 */
16e47624 944int t4_get_tp_version(struct adapter *adapter, u32 *vers)
56d36be4 945{
16e47624 946 return t4_read_flash(adapter, FLASH_FW_START +
900a6596 947 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
948 1, vers, 0);
949}
950
16e47624
HS
951/* Is the given firmware API compatible with the one the driver was compiled
952 * with?
56d36be4 953 */
16e47624 954static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
56d36be4 955{
56d36be4 956
16e47624
HS
957 /* short circuit if it's the exact same firmware version */
958 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
959 return 1;
56d36be4 960
16e47624
HS
961#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
962 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
963 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
964 return 1;
965#undef SAME_INTF
0a57a536 966
16e47624
HS
967 return 0;
968}
56d36be4 969
16e47624
HS
970/* The firmware in the filesystem is usable, but should it be installed?
971 * This routine explains itself in detail if it indicates the filesystem
972 * firmware should be installed.
973 */
974static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
975 int k, int c)
976{
977 const char *reason;
978
979 if (!card_fw_usable) {
980 reason = "incompatible or unusable";
981 goto install;
e69972f5
JH
982 }
983
16e47624
HS
984 if (k > c) {
985 reason = "older than the version supported with this driver";
986 goto install;
56d36be4
DM
987 }
988
16e47624
HS
989 return 0;
990
991install:
992 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
993 "installing firmware %u.%u.%u.%u on card.\n",
b2e1a3f0
HS
994 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
995 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
996 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
997 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
56d36be4 998
56d36be4
DM
999 return 1;
1000}
1001
16e47624
HS
1002int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1003 const u8 *fw_data, unsigned int fw_size,
1004 struct fw_hdr *card_fw, enum dev_state state,
1005 int *reset)
1006{
1007 int ret, card_fw_usable, fs_fw_usable;
1008 const struct fw_hdr *fs_fw;
1009 const struct fw_hdr *drv_fw;
1010
1011 drv_fw = &fw_info->fw_hdr;
1012
1013 /* Read the header of the firmware on the card */
1014 ret = -t4_read_flash(adap, FLASH_FW_START,
1015 sizeof(*card_fw) / sizeof(uint32_t),
1016 (uint32_t *)card_fw, 1);
1017 if (ret == 0) {
1018 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
1019 } else {
1020 dev_err(adap->pdev_dev,
1021 "Unable to read card's firmware header: %d\n", ret);
1022 card_fw_usable = 0;
1023 }
1024
1025 if (fw_data != NULL) {
1026 fs_fw = (const void *)fw_data;
1027 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1028 } else {
1029 fs_fw = NULL;
1030 fs_fw_usable = 0;
1031 }
1032
1033 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1034 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1035 /* Common case: the firmware on the card is an exact match and
1036 * the filesystem one is an exact match too, or the filesystem
1037 * one is absent/incompatible.
1038 */
1039 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1040 should_install_fs_fw(adap, card_fw_usable,
1041 be32_to_cpu(fs_fw->fw_ver),
1042 be32_to_cpu(card_fw->fw_ver))) {
1043 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1044 fw_size, 0);
1045 if (ret != 0) {
1046 dev_err(adap->pdev_dev,
1047 "failed to install firmware: %d\n", ret);
1048 goto bye;
1049 }
1050
1051 /* Installed successfully, update the cached header too. */
1052 memcpy(card_fw, fs_fw, sizeof(*card_fw));
1053 card_fw_usable = 1;
1054 *reset = 0; /* already reset as part of load_fw */
1055 }
1056
1057 if (!card_fw_usable) {
1058 uint32_t d, c, k;
1059
1060 d = be32_to_cpu(drv_fw->fw_ver);
1061 c = be32_to_cpu(card_fw->fw_ver);
1062 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1063
1064 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1065 "chip state %d, "
1066 "driver compiled with %d.%d.%d.%d, "
1067 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1068 state,
b2e1a3f0
HS
1069 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
1070 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
1071 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
1072 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
1073 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
1074 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
16e47624
HS
1075 ret = EINVAL;
1076 goto bye;
1077 }
1078
1079 /* We're using whatever's on the card and it's known to be good. */
1080 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1081 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1082
1083bye:
1084 return ret;
1085}
1086
56d36be4
DM
1087/**
1088 * t4_flash_erase_sectors - erase a range of flash sectors
1089 * @adapter: the adapter
1090 * @start: the first sector to erase
1091 * @end: the last sector to erase
1092 *
1093 * Erases the sectors in the given inclusive range.
1094 */
1095static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1096{
1097 int ret = 0;
1098
c0d5b8cf
HS
1099 if (end >= adapter->params.sf_nsec)
1100 return -EINVAL;
1101
56d36be4
DM
1102 while (start <= end) {
1103 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1104 (ret = sf1_write(adapter, 4, 0, 1,
1105 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 1106 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
1107 dev_err(adapter->pdev_dev,
1108 "erase of flash sector %d failed, error %d\n",
1109 start, ret);
1110 break;
1111 }
1112 start++;
1113 }
0d804338 1114 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
56d36be4
DM
1115 return ret;
1116}
1117
636f9d37
VP
1118/**
1119 * t4_flash_cfg_addr - return the address of the flash configuration file
1120 * @adapter: the adapter
1121 *
1122 * Return the address within the flash where the Firmware Configuration
1123 * File is stored.
1124 */
1125unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1126{
1127 if (adapter->params.sf_size == 0x100000)
1128 return FLASH_FPGA_CFG_START;
1129 else
1130 return FLASH_CFG_START;
1131}
1132
79af221d
HS
1133/* Return TRUE if the specified firmware matches the adapter. I.e. T4
1134 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
1135 * and emit an error message for mismatched firmware to save our caller the
1136 * effort ...
1137 */
1138static bool t4_fw_matches_chip(const struct adapter *adap,
1139 const struct fw_hdr *hdr)
1140{
1141 /* The expression below will return FALSE for any unsupported adapter
1142 * which will keep us "honest" in the future ...
1143 */
1144 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
1145 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
1146 return true;
1147
1148 dev_err(adap->pdev_dev,
1149 "FW image (%d) is not suitable for this adapter (%d)\n",
1150 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
1151 return false;
1152}
1153
56d36be4
DM
1154/**
1155 * t4_load_fw - download firmware
1156 * @adap: the adapter
1157 * @fw_data: the firmware image to write
1158 * @size: image size
1159 *
1160 * Write the supplied firmware image to the card's serial flash.
1161 */
1162int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1163{
1164 u32 csum;
1165 int ret, addr;
1166 unsigned int i;
1167 u8 first_page[SF_PAGE_SIZE];
404d9e3f 1168 const __be32 *p = (const __be32 *)fw_data;
56d36be4 1169 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596
DM
1170 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1171 unsigned int fw_img_start = adap->params.sf_fw_start;
1172 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
56d36be4
DM
1173
1174 if (!size) {
1175 dev_err(adap->pdev_dev, "FW image has no data\n");
1176 return -EINVAL;
1177 }
1178 if (size & 511) {
1179 dev_err(adap->pdev_dev,
1180 "FW image size not multiple of 512 bytes\n");
1181 return -EINVAL;
1182 }
1183 if (ntohs(hdr->len512) * 512 != size) {
1184 dev_err(adap->pdev_dev,
1185 "FW image size differs from size in FW header\n");
1186 return -EINVAL;
1187 }
1188 if (size > FW_MAX_SIZE) {
1189 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1190 FW_MAX_SIZE);
1191 return -EFBIG;
1192 }
79af221d
HS
1193 if (!t4_fw_matches_chip(adap, hdr))
1194 return -EINVAL;
56d36be4
DM
1195
1196 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1197 csum += ntohl(p[i]);
1198
1199 if (csum != 0xffffffff) {
1200 dev_err(adap->pdev_dev,
1201 "corrupted firmware image, checksum %#x\n", csum);
1202 return -EINVAL;
1203 }
1204
900a6596
DM
1205 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1206 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
1207 if (ret)
1208 goto out;
1209
1210 /*
1211 * We write the correct version at the end so the driver can see a bad
1212 * version if the FW write fails. Start by writing a copy of the
1213 * first page with a bad version.
1214 */
1215 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1216 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
900a6596 1217 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
1218 if (ret)
1219 goto out;
1220
900a6596 1221 addr = fw_img_start;
56d36be4
DM
1222 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1223 addr += SF_PAGE_SIZE;
1224 fw_data += SF_PAGE_SIZE;
1225 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1226 if (ret)
1227 goto out;
1228 }
1229
1230 ret = t4_write_flash(adap,
900a6596 1231 fw_img_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
1232 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1233out:
1234 if (ret)
1235 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1236 ret);
dff04bce
HS
1237 else
1238 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
56d36be4
DM
1239 return ret;
1240}
1241
1242#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
72aca4bf
KS
1243 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1244 FW_PORT_CAP_ANEG)
56d36be4
DM
1245
1246/**
1247 * t4_link_start - apply link configuration to MAC/PHY
1248 * @phy: the PHY to setup
1249 * @mac: the MAC to setup
1250 * @lc: the requested link configuration
1251 *
1252 * Set up a port's MAC and PHY according to a desired link configuration.
1253 * - If the PHY can auto-negotiate first decide what to advertise, then
1254 * enable/disable auto-negotiation as desired, and reset.
1255 * - If the PHY does not auto-negotiate just reset it.
1256 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1257 * otherwise do it later based on the outcome of auto-negotiation.
1258 */
1259int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1260 struct link_config *lc)
1261{
1262 struct fw_port_cmd c;
2b5fb1f2 1263 unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
56d36be4
DM
1264
1265 lc->link_ok = 0;
1266 if (lc->requested_fc & PAUSE_RX)
1267 fc |= FW_PORT_CAP_FC_RX;
1268 if (lc->requested_fc & PAUSE_TX)
1269 fc |= FW_PORT_CAP_FC_TX;
1270
1271 memset(&c, 0, sizeof(c));
e2ac9628 1272 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
1273 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
1274 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
56d36be4
DM
1275 FW_LEN16(c));
1276
1277 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1278 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1279 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1280 } else if (lc->autoneg == AUTONEG_DISABLE) {
1281 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1282 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1283 } else
1284 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1285
1286 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1287}
1288
1289/**
1290 * t4_restart_aneg - restart autonegotiation
1291 * @adap: the adapter
1292 * @mbox: mbox to use for the FW command
1293 * @port: the port id
1294 *
1295 * Restarts autonegotiation for the selected port.
1296 */
1297int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1298{
1299 struct fw_port_cmd c;
1300
1301 memset(&c, 0, sizeof(c));
e2ac9628 1302 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
1303 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
1304 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
56d36be4
DM
1305 FW_LEN16(c));
1306 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1307 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1308}
1309
8caa1e84
VP
1310typedef void (*int_handler_t)(struct adapter *adap);
1311
56d36be4
DM
1312struct intr_info {
1313 unsigned int mask; /* bits to check in interrupt status */
1314 const char *msg; /* message to print or NULL */
1315 short stat_idx; /* stat counter to increment or -1 */
1316 unsigned short fatal; /* whether the condition reported is fatal */
8caa1e84 1317 int_handler_t int_handler; /* platform-specific int handler */
56d36be4
DM
1318};
1319
1320/**
1321 * t4_handle_intr_status - table driven interrupt handler
1322 * @adapter: the adapter that generated the interrupt
1323 * @reg: the interrupt status register to process
1324 * @acts: table of interrupt actions
1325 *
1326 * A table driven interrupt handler that applies a set of masks to an
1327 * interrupt status word and performs the corresponding actions if the
25985edc 1328 * interrupts described by the mask have occurred. The actions include
56d36be4
DM
1329 * optionally emitting a warning or alert message. The table is terminated
1330 * by an entry specifying mask 0. Returns the number of fatal interrupt
1331 * conditions.
1332 */
1333static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1334 const struct intr_info *acts)
1335{
1336 int fatal = 0;
1337 unsigned int mask = 0;
1338 unsigned int status = t4_read_reg(adapter, reg);
1339
1340 for ( ; acts->mask; ++acts) {
1341 if (!(status & acts->mask))
1342 continue;
1343 if (acts->fatal) {
1344 fatal++;
1345 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1346 status & acts->mask);
1347 } else if (acts->msg && printk_ratelimit())
1348 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1349 status & acts->mask);
8caa1e84
VP
1350 if (acts->int_handler)
1351 acts->int_handler(adapter);
56d36be4
DM
1352 mask |= acts->mask;
1353 }
1354 status &= mask;
1355 if (status) /* clear processed interrupts */
1356 t4_write_reg(adapter, reg, status);
1357 return fatal;
1358}
1359
1360/*
1361 * Interrupt handler for the PCIE module.
1362 */
1363static void pcie_intr_handler(struct adapter *adapter)
1364{
005b5717 1365 static const struct intr_info sysbus_intr_info[] = {
f061de42
HS
1366 { RNPP_F, "RXNP array parity error", -1, 1 },
1367 { RPCP_F, "RXPC array parity error", -1, 1 },
1368 { RCIP_F, "RXCIF array parity error", -1, 1 },
1369 { RCCP_F, "Rx completions control array parity error", -1, 1 },
1370 { RFTP_F, "RXFT array parity error", -1, 1 },
56d36be4
DM
1371 { 0 }
1372 };
005b5717 1373 static const struct intr_info pcie_port_intr_info[] = {
f061de42
HS
1374 { TPCP_F, "TXPC array parity error", -1, 1 },
1375 { TNPP_F, "TXNP array parity error", -1, 1 },
1376 { TFTP_F, "TXFT array parity error", -1, 1 },
1377 { TCAP_F, "TXCA array parity error", -1, 1 },
1378 { TCIP_F, "TXCIF array parity error", -1, 1 },
1379 { RCAP_F, "RXCA array parity error", -1, 1 },
1380 { OTDD_F, "outbound request TLP discarded", -1, 1 },
1381 { RDPE_F, "Rx data parity error", -1, 1 },
1382 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
56d36be4
DM
1383 { 0 }
1384 };
005b5717 1385 static const struct intr_info pcie_intr_info[] = {
f061de42
HS
1386 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
1387 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
1388 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
1389 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
1390 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
1391 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
1392 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
1393 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
1394 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
1395 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
1396 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
1397 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
1398 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
1399 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
1400 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
1401 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
1402 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
1403 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
1404 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
1405 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
1406 { FIDPERR_F, "PCI FID parity error", -1, 1 },
1407 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
1408 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
1409 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
1410 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
1411 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
1412 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
1413 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
1414 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
1415 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
1416 -1, 0 },
56d36be4
DM
1417 { 0 }
1418 };
1419
0a57a536 1420 static struct intr_info t5_pcie_intr_info[] = {
f061de42 1421 { MSTGRPPERR_F, "Master Response Read Queue parity error",
0a57a536 1422 -1, 1 },
f061de42
HS
1423 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
1424 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
1425 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
1426 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
1427 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
1428 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
1429 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
0a57a536 1430 -1, 1 },
f061de42 1431 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
0a57a536 1432 -1, 1 },
f061de42
HS
1433 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
1434 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
1435 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
1436 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
1437 { DREQWRPERR_F, "PCI DMA channel write request parity error",
0a57a536 1438 -1, 1 },
f061de42
HS
1439 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
1440 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
1441 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
1442 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
1443 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
1444 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
1445 { FIDPERR_F, "PCI FID parity error", -1, 1 },
1446 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
1447 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
1448 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
1449 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
0a57a536 1450 -1, 1 },
f061de42
HS
1451 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
1452 -1, 1 },
1453 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
1454 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
1455 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1456 { READRSPERR_F, "Outbound read error", -1, 0 },
0a57a536
SR
1457 { 0 }
1458 };
1459
56d36be4
DM
1460 int fat;
1461
9bb59b96
HS
1462 if (is_t4(adapter->params.chip))
1463 fat = t4_handle_intr_status(adapter,
f061de42
HS
1464 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
1465 sysbus_intr_info) +
9bb59b96 1466 t4_handle_intr_status(adapter,
f061de42
HS
1467 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
1468 pcie_port_intr_info) +
1469 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
9bb59b96
HS
1470 pcie_intr_info);
1471 else
f061de42 1472 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
9bb59b96 1473 t5_pcie_intr_info);
0a57a536 1474
56d36be4
DM
1475 if (fat)
1476 t4_fatal_err(adapter);
1477}
1478
1479/*
1480 * TP interrupt handler.
1481 */
1482static void tp_intr_handler(struct adapter *adapter)
1483{
005b5717 1484 static const struct intr_info tp_intr_info[] = {
56d36be4 1485 { 0x3fffffff, "TP parity error", -1, 1 },
837e4a42 1486 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
56d36be4
DM
1487 { 0 }
1488 };
1489
837e4a42 1490 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
56d36be4
DM
1491 t4_fatal_err(adapter);
1492}
1493
1494/*
1495 * SGE interrupt handler.
1496 */
1497static void sge_intr_handler(struct adapter *adapter)
1498{
1499 u64 v;
1500
005b5717 1501 static const struct intr_info sge_intr_info[] = {
f612b815 1502 { ERR_CPL_EXCEED_IQE_SIZE_F,
56d36be4 1503 "SGE received CPL exceeding IQE size", -1, 1 },
f612b815 1504 { ERR_INVALID_CIDX_INC_F,
56d36be4 1505 "SGE GTS CIDX increment too large", -1, 0 },
f612b815
HS
1506 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
1507 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
1508 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
1509 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
1510 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
56d36be4 1511 "SGE IQID > 1023 received CPL for FL", -1, 0 },
f612b815 1512 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
56d36be4 1513 0 },
f612b815 1514 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
56d36be4 1515 0 },
f612b815 1516 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
56d36be4 1517 0 },
f612b815 1518 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
56d36be4 1519 0 },
f612b815 1520 { ERR_ING_CTXT_PRIO_F,
56d36be4 1521 "SGE too many priority ingress contexts", -1, 0 },
f612b815 1522 { ERR_EGR_CTXT_PRIO_F,
56d36be4 1523 "SGE too many priority egress contexts", -1, 0 },
f612b815
HS
1524 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
1525 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
56d36be4
DM
1526 { 0 }
1527 };
1528
f612b815
HS
1529 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
1530 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
56d36be4
DM
1531 if (v) {
1532 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
8caa1e84 1533 (unsigned long long)v);
f612b815
HS
1534 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
1535 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
56d36be4
DM
1536 }
1537
f612b815 1538 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
56d36be4
DM
1539 v != 0)
1540 t4_fatal_err(adapter);
1541}
1542
89c3a86c
HS
1543#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
1544 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
1545#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
1546 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
1547
56d36be4
DM
1548/*
1549 * CIM interrupt handler.
1550 */
1551static void cim_intr_handler(struct adapter *adapter)
1552{
005b5717 1553 static const struct intr_info cim_intr_info[] = {
89c3a86c
HS
1554 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
1555 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
1556 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
1557 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
1558 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
1559 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
1560 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
56d36be4
DM
1561 { 0 }
1562 };
005b5717 1563 static const struct intr_info cim_upintr_info[] = {
89c3a86c
HS
1564 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
1565 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
1566 { ILLWRINT_F, "CIM illegal write", -1, 1 },
1567 { ILLRDINT_F, "CIM illegal read", -1, 1 },
1568 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
1569 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
1570 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
1571 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
1572 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
1573 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
1574 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
1575 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
1576 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
1577 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
1578 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
1579 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
1580 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
1581 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
1582 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
1583 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
1584 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
1585 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
1586 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
1587 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
1588 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
1589 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
1590 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
1591 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
56d36be4
DM
1592 { 0 }
1593 };
1594
1595 int fat;
1596
f061de42 1597 if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
31d55c2d
HS
1598 t4_report_fw_error(adapter);
1599
89c3a86c 1600 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
56d36be4 1601 cim_intr_info) +
89c3a86c 1602 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
56d36be4
DM
1603 cim_upintr_info);
1604 if (fat)
1605 t4_fatal_err(adapter);
1606}
1607
1608/*
1609 * ULP RX interrupt handler.
1610 */
1611static void ulprx_intr_handler(struct adapter *adapter)
1612{
005b5717 1613 static const struct intr_info ulprx_intr_info[] = {
91e9a1ec 1614 { 0x1800000, "ULPRX context error", -1, 1 },
56d36be4
DM
1615 { 0x7fffff, "ULPRX parity error", -1, 1 },
1616 { 0 }
1617 };
1618
0d804338 1619 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
56d36be4
DM
1620 t4_fatal_err(adapter);
1621}
1622
1623/*
1624 * ULP TX interrupt handler.
1625 */
1626static void ulptx_intr_handler(struct adapter *adapter)
1627{
005b5717 1628 static const struct intr_info ulptx_intr_info[] = {
837e4a42 1629 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
56d36be4 1630 0 },
837e4a42 1631 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
56d36be4 1632 0 },
837e4a42 1633 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
56d36be4 1634 0 },
837e4a42 1635 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
56d36be4
DM
1636 0 },
1637 { 0xfffffff, "ULPTX parity error", -1, 1 },
1638 { 0 }
1639 };
1640
837e4a42 1641 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
56d36be4
DM
1642 t4_fatal_err(adapter);
1643}
1644
1645/*
1646 * PM TX interrupt handler.
1647 */
1648static void pmtx_intr_handler(struct adapter *adapter)
1649{
005b5717 1650 static const struct intr_info pmtx_intr_info[] = {
837e4a42
HS
1651 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
1652 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
1653 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
1654 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
1655 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
1656 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
1657 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
1658 -1, 1 },
1659 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
1660 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
56d36be4
DM
1661 { 0 }
1662 };
1663
837e4a42 1664 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
56d36be4
DM
1665 t4_fatal_err(adapter);
1666}
1667
1668/*
1669 * PM RX interrupt handler.
1670 */
1671static void pmrx_intr_handler(struct adapter *adapter)
1672{
005b5717 1673 static const struct intr_info pmrx_intr_info[] = {
837e4a42
HS
1674 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
1675 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
1676 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
1677 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
1678 -1, 1 },
1679 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
1680 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
56d36be4
DM
1681 { 0 }
1682 };
1683
837e4a42 1684 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
56d36be4
DM
1685 t4_fatal_err(adapter);
1686}
1687
1688/*
1689 * CPL switch interrupt handler.
1690 */
1691static void cplsw_intr_handler(struct adapter *adapter)
1692{
005b5717 1693 static const struct intr_info cplsw_intr_info[] = {
0d804338
HS
1694 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
1695 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
1696 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
1697 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
1698 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
1699 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
56d36be4
DM
1700 { 0 }
1701 };
1702
0d804338 1703 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
56d36be4
DM
1704 t4_fatal_err(adapter);
1705}
1706
1707/*
1708 * LE interrupt handler.
1709 */
1710static void le_intr_handler(struct adapter *adap)
1711{
005b5717 1712 static const struct intr_info le_intr_info[] = {
0d804338
HS
1713 { LIPMISS_F, "LE LIP miss", -1, 0 },
1714 { LIP0_F, "LE 0 LIP error", -1, 0 },
1715 { PARITYERR_F, "LE parity error", -1, 1 },
1716 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
1717 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
56d36be4
DM
1718 { 0 }
1719 };
1720
0d804338 1721 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
56d36be4
DM
1722 t4_fatal_err(adap);
1723}
1724
1725/*
1726 * MPS interrupt handler.
1727 */
1728static void mps_intr_handler(struct adapter *adapter)
1729{
005b5717 1730 static const struct intr_info mps_rx_intr_info[] = {
56d36be4
DM
1731 { 0xffffff, "MPS Rx parity error", -1, 1 },
1732 { 0 }
1733 };
005b5717 1734 static const struct intr_info mps_tx_intr_info[] = {
837e4a42
HS
1735 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
1736 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1737 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
1738 -1, 1 },
1739 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
1740 -1, 1 },
1741 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
1742 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
1743 { FRMERR_F, "MPS Tx framing error", -1, 1 },
56d36be4
DM
1744 { 0 }
1745 };
005b5717 1746 static const struct intr_info mps_trc_intr_info[] = {
837e4a42
HS
1747 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
1748 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
1749 -1, 1 },
1750 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
56d36be4
DM
1751 { 0 }
1752 };
005b5717 1753 static const struct intr_info mps_stat_sram_intr_info[] = {
56d36be4
DM
1754 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1755 { 0 }
1756 };
005b5717 1757 static const struct intr_info mps_stat_tx_intr_info[] = {
56d36be4
DM
1758 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1759 { 0 }
1760 };
005b5717 1761 static const struct intr_info mps_stat_rx_intr_info[] = {
56d36be4
DM
1762 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1763 { 0 }
1764 };
005b5717 1765 static const struct intr_info mps_cls_intr_info[] = {
837e4a42
HS
1766 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
1767 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
1768 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
56d36be4
DM
1769 { 0 }
1770 };
1771
1772 int fat;
1773
837e4a42 1774 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
56d36be4 1775 mps_rx_intr_info) +
837e4a42 1776 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
56d36be4 1777 mps_tx_intr_info) +
837e4a42 1778 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
56d36be4 1779 mps_trc_intr_info) +
837e4a42 1780 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
56d36be4 1781 mps_stat_sram_intr_info) +
837e4a42 1782 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
56d36be4 1783 mps_stat_tx_intr_info) +
837e4a42 1784 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
56d36be4 1785 mps_stat_rx_intr_info) +
837e4a42 1786 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
56d36be4
DM
1787 mps_cls_intr_info);
1788
837e4a42
HS
1789 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
1790 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
56d36be4
DM
1791 if (fat)
1792 t4_fatal_err(adapter);
1793}
1794
89c3a86c
HS
1795#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
1796 ECC_UE_INT_CAUSE_F)
56d36be4
DM
1797
1798/*
1799 * EDC/MC interrupt handler.
1800 */
1801static void mem_intr_handler(struct adapter *adapter, int idx)
1802{
822dd8a8 1803 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
56d36be4
DM
1804
1805 unsigned int addr, cnt_addr, v;
1806
1807 if (idx <= MEM_EDC1) {
89c3a86c
HS
1808 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
1809 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
822dd8a8
HS
1810 } else if (idx == MEM_MC) {
1811 if (is_t4(adapter->params.chip)) {
89c3a86c
HS
1812 addr = MC_INT_CAUSE_A;
1813 cnt_addr = MC_ECC_STATUS_A;
822dd8a8 1814 } else {
89c3a86c
HS
1815 addr = MC_P_INT_CAUSE_A;
1816 cnt_addr = MC_P_ECC_STATUS_A;
822dd8a8 1817 }
56d36be4 1818 } else {
89c3a86c
HS
1819 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
1820 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
56d36be4
DM
1821 }
1822
1823 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
89c3a86c 1824 if (v & PERR_INT_CAUSE_F)
56d36be4
DM
1825 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1826 name[idx]);
89c3a86c
HS
1827 if (v & ECC_CE_INT_CAUSE_F) {
1828 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
56d36be4 1829
89c3a86c 1830 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
56d36be4
DM
1831 if (printk_ratelimit())
1832 dev_warn(adapter->pdev_dev,
1833 "%u %s correctable ECC data error%s\n",
1834 cnt, name[idx], cnt > 1 ? "s" : "");
1835 }
89c3a86c 1836 if (v & ECC_UE_INT_CAUSE_F)
56d36be4
DM
1837 dev_alert(adapter->pdev_dev,
1838 "%s uncorrectable ECC data error\n", name[idx]);
1839
1840 t4_write_reg(adapter, addr, v);
89c3a86c 1841 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
56d36be4
DM
1842 t4_fatal_err(adapter);
1843}
1844
1845/*
1846 * MA interrupt handler.
1847 */
1848static void ma_intr_handler(struct adapter *adap)
1849{
89c3a86c 1850 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
56d36be4 1851
89c3a86c 1852 if (status & MEM_PERR_INT_CAUSE_F) {
56d36be4
DM
1853 dev_alert(adap->pdev_dev,
1854 "MA parity error, parity status %#x\n",
89c3a86c 1855 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
9bb59b96
HS
1856 if (is_t5(adap->params.chip))
1857 dev_alert(adap->pdev_dev,
1858 "MA parity error, parity status %#x\n",
1859 t4_read_reg(adap,
89c3a86c 1860 MA_PARITY_ERROR_STATUS2_A));
9bb59b96 1861 }
89c3a86c
HS
1862 if (status & MEM_WRAP_INT_CAUSE_F) {
1863 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
56d36be4
DM
1864 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1865 "client %u to address %#x\n",
89c3a86c
HS
1866 MEM_WRAP_CLIENT_NUM_G(v),
1867 MEM_WRAP_ADDRESS_G(v) << 4);
56d36be4 1868 }
89c3a86c 1869 t4_write_reg(adap, MA_INT_CAUSE_A, status);
56d36be4
DM
1870 t4_fatal_err(adap);
1871}
1872
1873/*
1874 * SMB interrupt handler.
1875 */
1876static void smb_intr_handler(struct adapter *adap)
1877{
005b5717 1878 static const struct intr_info smb_intr_info[] = {
0d804338
HS
1879 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
1880 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
1881 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
56d36be4
DM
1882 { 0 }
1883 };
1884
0d804338 1885 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
56d36be4
DM
1886 t4_fatal_err(adap);
1887}
1888
1889/*
1890 * NC-SI interrupt handler.
1891 */
1892static void ncsi_intr_handler(struct adapter *adap)
1893{
005b5717 1894 static const struct intr_info ncsi_intr_info[] = {
0d804338
HS
1895 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
1896 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
1897 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
1898 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
56d36be4
DM
1899 { 0 }
1900 };
1901
0d804338 1902 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
56d36be4
DM
1903 t4_fatal_err(adap);
1904}
1905
1906/*
1907 * XGMAC interrupt handler.
1908 */
1909static void xgmac_intr_handler(struct adapter *adap, int port)
1910{
0a57a536
SR
1911 u32 v, int_cause_reg;
1912
d14807dd 1913 if (is_t4(adap->params.chip))
0d804338 1914 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
0a57a536 1915 else
0d804338 1916 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
0a57a536
SR
1917
1918 v = t4_read_reg(adap, int_cause_reg);
56d36be4 1919
0d804338 1920 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
56d36be4
DM
1921 if (!v)
1922 return;
1923
0d804338 1924 if (v & TXFIFO_PRTY_ERR_F)
56d36be4
DM
1925 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1926 port);
0d804338 1927 if (v & RXFIFO_PRTY_ERR_F)
56d36be4
DM
1928 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1929 port);
0d804338 1930 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
56d36be4
DM
1931 t4_fatal_err(adap);
1932}
1933
1934/*
1935 * PL interrupt handler.
1936 */
1937static void pl_intr_handler(struct adapter *adap)
1938{
005b5717 1939 static const struct intr_info pl_intr_info[] = {
0d804338
HS
1940 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
1941 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
56d36be4
DM
1942 { 0 }
1943 };
1944
0d804338 1945 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
56d36be4
DM
1946 t4_fatal_err(adap);
1947}
1948
0d804338
HS
1949#define PF_INTR_MASK (PFSW_F)
1950#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
1951 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
1952 CPL_SWITCH_F | SGE_F | ULP_TX_F)
56d36be4
DM
1953
1954/**
1955 * t4_slow_intr_handler - control path interrupt handler
1956 * @adapter: the adapter
1957 *
1958 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1959 * The designation 'slow' is because it involves register reads, while
1960 * data interrupts typically don't involve any MMIOs.
1961 */
1962int t4_slow_intr_handler(struct adapter *adapter)
1963{
0d804338 1964 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
56d36be4
DM
1965
1966 if (!(cause & GLBL_INTR_MASK))
1967 return 0;
0d804338 1968 if (cause & CIM_F)
56d36be4 1969 cim_intr_handler(adapter);
0d804338 1970 if (cause & MPS_F)
56d36be4 1971 mps_intr_handler(adapter);
0d804338 1972 if (cause & NCSI_F)
56d36be4 1973 ncsi_intr_handler(adapter);
0d804338 1974 if (cause & PL_F)
56d36be4 1975 pl_intr_handler(adapter);
0d804338 1976 if (cause & SMB_F)
56d36be4 1977 smb_intr_handler(adapter);
0d804338 1978 if (cause & XGMAC0_F)
56d36be4 1979 xgmac_intr_handler(adapter, 0);
0d804338 1980 if (cause & XGMAC1_F)
56d36be4 1981 xgmac_intr_handler(adapter, 1);
0d804338 1982 if (cause & XGMAC_KR0_F)
56d36be4 1983 xgmac_intr_handler(adapter, 2);
0d804338 1984 if (cause & XGMAC_KR1_F)
56d36be4 1985 xgmac_intr_handler(adapter, 3);
0d804338 1986 if (cause & PCIE_F)
56d36be4 1987 pcie_intr_handler(adapter);
0d804338 1988 if (cause & MC_F)
56d36be4 1989 mem_intr_handler(adapter, MEM_MC);
0d804338 1990 if (!is_t4(adapter->params.chip) && (cause & MC1_S))
822dd8a8 1991 mem_intr_handler(adapter, MEM_MC1);
0d804338 1992 if (cause & EDC0_F)
56d36be4 1993 mem_intr_handler(adapter, MEM_EDC0);
0d804338 1994 if (cause & EDC1_F)
56d36be4 1995 mem_intr_handler(adapter, MEM_EDC1);
0d804338 1996 if (cause & LE_F)
56d36be4 1997 le_intr_handler(adapter);
0d804338 1998 if (cause & TP_F)
56d36be4 1999 tp_intr_handler(adapter);
0d804338 2000 if (cause & MA_F)
56d36be4 2001 ma_intr_handler(adapter);
0d804338 2002 if (cause & PM_TX_F)
56d36be4 2003 pmtx_intr_handler(adapter);
0d804338 2004 if (cause & PM_RX_F)
56d36be4 2005 pmrx_intr_handler(adapter);
0d804338 2006 if (cause & ULP_RX_F)
56d36be4 2007 ulprx_intr_handler(adapter);
0d804338 2008 if (cause & CPL_SWITCH_F)
56d36be4 2009 cplsw_intr_handler(adapter);
0d804338 2010 if (cause & SGE_F)
56d36be4 2011 sge_intr_handler(adapter);
0d804338 2012 if (cause & ULP_TX_F)
56d36be4
DM
2013 ulptx_intr_handler(adapter);
2014
2015 /* Clear the interrupts just processed for which we are the master. */
0d804338
HS
2016 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
2017 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
56d36be4
DM
2018 return 1;
2019}
2020
2021/**
2022 * t4_intr_enable - enable interrupts
2023 * @adapter: the adapter whose interrupts should be enabled
2024 *
2025 * Enable PF-specific interrupts for the calling function and the top-level
2026 * interrupt concentrator for global interrupts. Interrupts are already
2027 * enabled at each module, here we just enable the roots of the interrupt
2028 * hierarchies.
2029 *
2030 * Note: this function should be called only when the driver manages
2031 * non PF-specific interrupts from the various HW modules. Only one PCI
2032 * function at a time should be doing this.
2033 */
2034void t4_intr_enable(struct adapter *adapter)
2035{
0d804338 2036 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
56d36be4 2037
f612b815
HS
2038 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
2039 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
2040 ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
2041 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
2042 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
2043 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
2044 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
2045 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
2046 EGRESS_SIZE_ERR_F);
0d804338
HS
2047 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
2048 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
56d36be4
DM
2049}
2050
2051/**
2052 * t4_intr_disable - disable interrupts
2053 * @adapter: the adapter whose interrupts should be disabled
2054 *
2055 * Disable interrupts. We only disable the top-level interrupt
2056 * concentrators. The caller must be a PCI function managing global
2057 * interrupts.
2058 */
2059void t4_intr_disable(struct adapter *adapter)
2060{
0d804338 2061 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
56d36be4 2062
0d804338
HS
2063 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
2064 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
56d36be4
DM
2065}
2066
56d36be4
DM
2067/**
2068 * hash_mac_addr - return the hash value of a MAC address
2069 * @addr: the 48-bit Ethernet MAC address
2070 *
2071 * Hashes a MAC address according to the hash function used by HW inexact
2072 * (hash) address matching.
2073 */
2074static int hash_mac_addr(const u8 *addr)
2075{
2076 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2077 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2078 a ^= b;
2079 a ^= (a >> 12);
2080 a ^= (a >> 6);
2081 return a & 0x3f;
2082}
2083
2084/**
2085 * t4_config_rss_range - configure a portion of the RSS mapping table
2086 * @adapter: the adapter
2087 * @mbox: mbox to use for the FW command
2088 * @viid: virtual interface whose RSS subtable is to be written
2089 * @start: start entry in the table to write
2090 * @n: how many table entries to write
2091 * @rspq: values for the response queue lookup table
2092 * @nrspq: number of values in @rspq
2093 *
2094 * Programs the selected part of the VI's RSS mapping table with the
2095 * provided values. If @nrspq < @n the supplied values are used repeatedly
2096 * until the full table range is populated.
2097 *
2098 * The caller must ensure the values in @rspq are in the range allowed for
2099 * @viid.
2100 */
2101int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2102 int start, int n, const u16 *rspq, unsigned int nrspq)
2103{
2104 int ret;
2105 const u16 *rsp = rspq;
2106 const u16 *rsp_end = rspq + nrspq;
2107 struct fw_rss_ind_tbl_cmd cmd;
2108
2109 memset(&cmd, 0, sizeof(cmd));
e2ac9628
HS
2110 cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
2111 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
b2e1a3f0 2112 FW_RSS_IND_TBL_CMD_VIID_V(viid));
56d36be4
DM
2113 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2114
2115 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2116 while (n > 0) {
2117 int nq = min(n, 32);
2118 __be32 *qp = &cmd.iq0_to_iq2;
2119
2120 cmd.niqid = htons(nq);
2121 cmd.startidx = htons(start);
2122
2123 start += nq;
2124 n -= nq;
2125
2126 while (nq > 0) {
2127 unsigned int v;
2128
b2e1a3f0 2129 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
56d36be4
DM
2130 if (++rsp >= rsp_end)
2131 rsp = rspq;
b2e1a3f0 2132 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
56d36be4
DM
2133 if (++rsp >= rsp_end)
2134 rsp = rspq;
b2e1a3f0 2135 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
56d36be4
DM
2136 if (++rsp >= rsp_end)
2137 rsp = rspq;
2138
2139 *qp++ = htonl(v);
2140 nq -= 3;
2141 }
2142
2143 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2144 if (ret)
2145 return ret;
2146 }
2147 return 0;
2148}
2149
2150/**
2151 * t4_config_glbl_rss - configure the global RSS mode
2152 * @adapter: the adapter
2153 * @mbox: mbox to use for the FW command
2154 * @mode: global RSS mode
2155 * @flags: mode-specific flags
2156 *
2157 * Sets the global RSS mode.
2158 */
2159int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2160 unsigned int flags)
2161{
2162 struct fw_rss_glb_config_cmd c;
2163
2164 memset(&c, 0, sizeof(c));
e2ac9628
HS
2165 c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
2166 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
56d36be4
DM
2167 c.retval_len16 = htonl(FW_LEN16(c));
2168 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
b2e1a3f0 2169 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
56d36be4
DM
2170 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2171 c.u.basicvirtual.mode_pkd =
b2e1a3f0 2172 htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
56d36be4
DM
2173 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2174 } else
2175 return -EINVAL;
2176 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2177}
2178
56d36be4
DM
2179/**
2180 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2181 * @adap: the adapter
2182 * @v4: holds the TCP/IP counter values
2183 * @v6: holds the TCP/IPv6 counter values
2184 *
2185 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2186 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2187 */
2188void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2189 struct tp_tcp_stats *v6)
2190{
837e4a42 2191 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
56d36be4 2192
837e4a42 2193#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
56d36be4
DM
2194#define STAT(x) val[STAT_IDX(x)]
2195#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2196
2197 if (v4) {
837e4a42
HS
2198 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
2199 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
56d36be4
DM
2200 v4->tcpOutRsts = STAT(OUT_RST);
2201 v4->tcpInSegs = STAT64(IN_SEG);
2202 v4->tcpOutSegs = STAT64(OUT_SEG);
2203 v4->tcpRetransSegs = STAT64(RXT_SEG);
2204 }
2205 if (v6) {
837e4a42
HS
2206 t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
2207 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
56d36be4
DM
2208 v6->tcpOutRsts = STAT(OUT_RST);
2209 v6->tcpInSegs = STAT64(IN_SEG);
2210 v6->tcpOutSegs = STAT64(OUT_SEG);
2211 v6->tcpRetransSegs = STAT64(RXT_SEG);
2212 }
2213#undef STAT64
2214#undef STAT
2215#undef STAT_IDX
2216}
2217
56d36be4
DM
2218/**
2219 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2220 * @adap: the adapter
2221 * @mtus: where to store the MTU values
2222 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2223 *
2224 * Reads the HW path MTU table.
2225 */
2226void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2227{
2228 u32 v;
2229 int i;
2230
2231 for (i = 0; i < NMTUS; ++i) {
837e4a42
HS
2232 t4_write_reg(adap, TP_MTU_TABLE_A,
2233 MTUINDEX_V(0xff) | MTUVALUE_V(i));
2234 v = t4_read_reg(adap, TP_MTU_TABLE_A);
2235 mtus[i] = MTUVALUE_G(v);
56d36be4 2236 if (mtu_log)
837e4a42 2237 mtu_log[i] = MTUWIDTH_G(v);
56d36be4
DM
2238 }
2239}
2240
636f9d37
VP
2241/**
2242 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2243 * @adap: the adapter
2244 * @addr: the indirect TP register address
2245 * @mask: specifies the field within the register to modify
2246 * @val: new value for the field
2247 *
2248 * Sets a field of an indirect TP register to the given value.
2249 */
2250void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2251 unsigned int mask, unsigned int val)
2252{
837e4a42
HS
2253 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
2254 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
2255 t4_write_reg(adap, TP_PIO_DATA_A, val);
636f9d37
VP
2256}
2257
56d36be4
DM
2258/**
2259 * init_cong_ctrl - initialize congestion control parameters
2260 * @a: the alpha values for congestion control
2261 * @b: the beta values for congestion control
2262 *
2263 * Initialize the congestion control parameters.
2264 */
91744948 2265static void init_cong_ctrl(unsigned short *a, unsigned short *b)
56d36be4
DM
2266{
2267 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2268 a[9] = 2;
2269 a[10] = 3;
2270 a[11] = 4;
2271 a[12] = 5;
2272 a[13] = 6;
2273 a[14] = 7;
2274 a[15] = 8;
2275 a[16] = 9;
2276 a[17] = 10;
2277 a[18] = 14;
2278 a[19] = 17;
2279 a[20] = 21;
2280 a[21] = 25;
2281 a[22] = 30;
2282 a[23] = 35;
2283 a[24] = 45;
2284 a[25] = 60;
2285 a[26] = 80;
2286 a[27] = 100;
2287 a[28] = 200;
2288 a[29] = 300;
2289 a[30] = 400;
2290 a[31] = 500;
2291
2292 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2293 b[9] = b[10] = 1;
2294 b[11] = b[12] = 2;
2295 b[13] = b[14] = b[15] = b[16] = 3;
2296 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2297 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2298 b[28] = b[29] = 6;
2299 b[30] = b[31] = 7;
2300}
2301
2302/* The minimum additive increment value for the congestion control table */
2303#define CC_MIN_INCR 2U
2304
2305/**
2306 * t4_load_mtus - write the MTU and congestion control HW tables
2307 * @adap: the adapter
2308 * @mtus: the values for the MTU table
2309 * @alpha: the values for the congestion control alpha parameter
2310 * @beta: the values for the congestion control beta parameter
2311 *
2312 * Write the HW MTU table with the supplied MTUs and the high-speed
2313 * congestion control table with the supplied alpha, beta, and MTUs.
2314 * We write the two tables together because the additive increments
2315 * depend on the MTUs.
2316 */
2317void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2318 const unsigned short *alpha, const unsigned short *beta)
2319{
2320 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2321 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2322 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2323 28672, 40960, 57344, 81920, 114688, 163840, 229376
2324 };
2325
2326 unsigned int i, w;
2327
2328 for (i = 0; i < NMTUS; ++i) {
2329 unsigned int mtu = mtus[i];
2330 unsigned int log2 = fls(mtu);
2331
2332 if (!(mtu & ((1 << log2) >> 2))) /* round */
2333 log2--;
837e4a42
HS
2334 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
2335 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
56d36be4
DM
2336
2337 for (w = 0; w < NCCTRL_WIN; ++w) {
2338 unsigned int inc;
2339
2340 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2341 CC_MIN_INCR);
2342
837e4a42 2343 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
56d36be4
DM
2344 (w << 16) | (beta[w] << 13) | inc);
2345 }
2346 }
2347}
2348
56d36be4
DM
2349/**
2350 * get_mps_bg_map - return the buffer groups associated with a port
2351 * @adap: the adapter
2352 * @idx: the port index
2353 *
2354 * Returns a bitmap indicating which MPS buffer groups are associated
2355 * with the given port. Bit i is set if buffer group i is used by the
2356 * port.
2357 */
2358static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2359{
837e4a42 2360 u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
56d36be4
DM
2361
2362 if (n == 0)
2363 return idx == 0 ? 0xf : 0;
2364 if (n == 1)
2365 return idx < 2 ? (3 << (2 * idx)) : 0;
2366 return 1 << idx;
2367}
2368
72aca4bf
KS
2369/**
2370 * t4_get_port_type_description - return Port Type string description
2371 * @port_type: firmware Port Type enumeration
2372 */
2373const char *t4_get_port_type_description(enum fw_port_type port_type)
2374{
2375 static const char *const port_type_description[] = {
2376 "R XFI",
2377 "R XAUI",
2378 "T SGMII",
2379 "T XFI",
2380 "T XAUI",
2381 "KX4",
2382 "CX4",
2383 "KX",
2384 "KR",
2385 "R SFP+",
2386 "KR/KX",
2387 "KR/KX/KX4",
2388 "R QSFP_10G",
5aa80e51 2389 "R QSA",
72aca4bf
KS
2390 "R QSFP",
2391 "R BP40_BA",
2392 };
2393
2394 if (port_type < ARRAY_SIZE(port_type_description))
2395 return port_type_description[port_type];
2396 return "UNKNOWN";
2397}
2398
56d36be4
DM
2399/**
2400 * t4_get_port_stats - collect port statistics
2401 * @adap: the adapter
2402 * @idx: the port index
2403 * @p: the stats structure to fill
2404 *
2405 * Collect statistics related to the given port from HW.
2406 */
2407void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2408{
2409 u32 bgmap = get_mps_bg_map(adap, idx);
2410
2411#define GET_STAT(name) \
0a57a536 2412 t4_read_reg64(adap, \
d14807dd 2413 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
0a57a536 2414 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
56d36be4
DM
2415#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2416
2417 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2418 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2419 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2420 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2421 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2422 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2423 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2424 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2425 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2426 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2427 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2428 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2429 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2430 p->tx_drop = GET_STAT(TX_PORT_DROP);
2431 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2432 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2433 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2434 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2435 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2436 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2437 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2438 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2439 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2440
2441 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2442 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2443 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2444 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2445 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2446 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2447 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2448 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2449 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2450 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2451 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2452 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2453 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2454 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2455 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2456 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2457 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2458 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2459 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2460 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2461 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2462 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2463 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2464 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2465 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2466 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2467 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2468
2469 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2470 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2471 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2472 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2473 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2474 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2475 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2476 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2477
2478#undef GET_STAT
2479#undef GET_STAT_COM
2480}
2481
56d36be4
DM
2482/**
2483 * t4_wol_magic_enable - enable/disable magic packet WoL
2484 * @adap: the adapter
2485 * @port: the physical port index
2486 * @addr: MAC address expected in magic packets, %NULL to disable
2487 *
2488 * Enables/disables magic packet wake-on-LAN for the selected port.
2489 */
2490void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2491 const u8 *addr)
2492{
0a57a536
SR
2493 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2494
d14807dd 2495 if (is_t4(adap->params.chip)) {
0a57a536
SR
2496 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2497 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
0d804338 2498 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
0a57a536
SR
2499 } else {
2500 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2501 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
837e4a42 2502 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
0a57a536
SR
2503 }
2504
56d36be4 2505 if (addr) {
0a57a536 2506 t4_write_reg(adap, mag_id_reg_l,
56d36be4
DM
2507 (addr[2] << 24) | (addr[3] << 16) |
2508 (addr[4] << 8) | addr[5]);
0a57a536 2509 t4_write_reg(adap, mag_id_reg_h,
56d36be4
DM
2510 (addr[0] << 8) | addr[1]);
2511 }
0d804338
HS
2512 t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
2513 addr ? MAGICEN_F : 0);
56d36be4
DM
2514}
2515
2516/**
2517 * t4_wol_pat_enable - enable/disable pattern-based WoL
2518 * @adap: the adapter
2519 * @port: the physical port index
2520 * @map: bitmap of which HW pattern filters to set
2521 * @mask0: byte mask for bytes 0-63 of a packet
2522 * @mask1: byte mask for bytes 64-127 of a packet
2523 * @crc: Ethernet CRC for selected bytes
2524 * @enable: enable/disable switch
2525 *
2526 * Sets the pattern filters indicated in @map to mask out the bytes
2527 * specified in @mask0/@mask1 in received packets and compare the CRC of
2528 * the resulting packet against @crc. If @enable is %true pattern-based
2529 * WoL is enabled, otherwise disabled.
2530 */
2531int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2532 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2533{
2534 int i;
0a57a536
SR
2535 u32 port_cfg_reg;
2536
d14807dd 2537 if (is_t4(adap->params.chip))
0d804338 2538 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
0a57a536 2539 else
837e4a42 2540 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
56d36be4
DM
2541
2542 if (!enable) {
0d804338 2543 t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
56d36be4
DM
2544 return 0;
2545 }
2546 if (map > 0xff)
2547 return -EINVAL;
2548
0a57a536 2549#define EPIO_REG(name) \
0d804338
HS
2550 (is_t4(adap->params.chip) ? \
2551 PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
2552 T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
56d36be4
DM
2553
2554 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2555 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2556 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2557
2558 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2559 if (!(map & 1))
2560 continue;
2561
2562 /* write byte masks */
2563 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
0d804338 2564 t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
56d36be4 2565 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
0d804338 2566 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
56d36be4
DM
2567 return -ETIMEDOUT;
2568
2569 /* write CRC */
2570 t4_write_reg(adap, EPIO_REG(DATA0), crc);
0d804338 2571 t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
56d36be4 2572 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
0d804338 2573 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
56d36be4
DM
2574 return -ETIMEDOUT;
2575 }
2576#undef EPIO_REG
2577
0d804338 2578 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
56d36be4
DM
2579 return 0;
2580}
2581
f2b7e78d
VP
2582/* t4_mk_filtdelwr - create a delete filter WR
2583 * @ftid: the filter ID
2584 * @wr: the filter work request to populate
2585 * @qid: ingress queue to receive the delete notification
2586 *
2587 * Creates a filter work request to delete the supplied filter. If @qid is
2588 * negative the delete notification is suppressed.
2589 */
2590void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2591{
2592 memset(wr, 0, sizeof(*wr));
e2ac9628
HS
2593 wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
2594 wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
77a80e23
HS
2595 wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
2596 FW_FILTER_WR_NOREPLY_V(qid < 0));
2597 wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
f2b7e78d 2598 if (qid >= 0)
77a80e23 2599 wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
f2b7e78d
VP
2600}
2601
56d36be4 2602#define INIT_CMD(var, cmd, rd_wr) do { \
e2ac9628
HS
2603 (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
2604 FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
56d36be4
DM
2605 (var).retval_len16 = htonl(FW_LEN16(var)); \
2606} while (0)
2607
8caa1e84
VP
2608int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2609 u32 addr, u32 val)
2610{
2611 struct fw_ldst_cmd c;
2612
2613 memset(&c, 0, sizeof(c));
e2ac9628
HS
2614 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2615 FW_CMD_WRITE_F |
5167865a 2616 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
8caa1e84
VP
2617 c.cycles_to_len16 = htonl(FW_LEN16(c));
2618 c.u.addrval.addr = htonl(addr);
2619 c.u.addrval.val = htonl(val);
2620
2621 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2622}
2623
56d36be4
DM
2624/**
2625 * t4_mdio_rd - read a PHY register through MDIO
2626 * @adap: the adapter
2627 * @mbox: mailbox to use for the FW command
2628 * @phy_addr: the PHY address
2629 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2630 * @reg: the register to read
2631 * @valp: where to store the value
2632 *
2633 * Issues a FW command through the given mailbox to read a PHY register.
2634 */
2635int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2636 unsigned int mmd, unsigned int reg, u16 *valp)
2637{
2638 int ret;
2639 struct fw_ldst_cmd c;
2640
2641 memset(&c, 0, sizeof(c));
e2ac9628 2642 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
5167865a 2643 FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
56d36be4 2644 c.cycles_to_len16 = htonl(FW_LEN16(c));
5167865a
HS
2645 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
2646 FW_LDST_CMD_MMD_V(mmd));
56d36be4
DM
2647 c.u.mdio.raddr = htons(reg);
2648
2649 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2650 if (ret == 0)
2651 *valp = ntohs(c.u.mdio.rval);
2652 return ret;
2653}
2654
2655/**
2656 * t4_mdio_wr - write a PHY register through MDIO
2657 * @adap: the adapter
2658 * @mbox: mailbox to use for the FW command
2659 * @phy_addr: the PHY address
2660 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2661 * @reg: the register to write
2662 * @valp: value to write
2663 *
2664 * Issues a FW command through the given mailbox to write a PHY register.
2665 */
2666int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2667 unsigned int mmd, unsigned int reg, u16 val)
2668{
2669 struct fw_ldst_cmd c;
2670
2671 memset(&c, 0, sizeof(c));
e2ac9628 2672 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
5167865a 2673 FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
56d36be4 2674 c.cycles_to_len16 = htonl(FW_LEN16(c));
5167865a
HS
2675 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
2676 FW_LDST_CMD_MMD_V(mmd));
56d36be4
DM
2677 c.u.mdio.raddr = htons(reg);
2678 c.u.mdio.rval = htons(val);
2679
2680 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2681}
2682
68bce192
KS
2683/**
2684 * t4_sge_decode_idma_state - decode the idma state
2685 * @adap: the adapter
2686 * @state: the state idma is stuck in
2687 */
2688void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2689{
2690 static const char * const t4_decode[] = {
2691 "IDMA_IDLE",
2692 "IDMA_PUSH_MORE_CPL_FIFO",
2693 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2694 "Not used",
2695 "IDMA_PHYSADDR_SEND_PCIEHDR",
2696 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2697 "IDMA_PHYSADDR_SEND_PAYLOAD",
2698 "IDMA_SEND_FIFO_TO_IMSG",
2699 "IDMA_FL_REQ_DATA_FL_PREP",
2700 "IDMA_FL_REQ_DATA_FL",
2701 "IDMA_FL_DROP",
2702 "IDMA_FL_H_REQ_HEADER_FL",
2703 "IDMA_FL_H_SEND_PCIEHDR",
2704 "IDMA_FL_H_PUSH_CPL_FIFO",
2705 "IDMA_FL_H_SEND_CPL",
2706 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2707 "IDMA_FL_H_SEND_IP_HDR",
2708 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2709 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2710 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2711 "IDMA_FL_D_SEND_PCIEHDR",
2712 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2713 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2714 "IDMA_FL_SEND_PCIEHDR",
2715 "IDMA_FL_PUSH_CPL_FIFO",
2716 "IDMA_FL_SEND_CPL",
2717 "IDMA_FL_SEND_PAYLOAD_FIRST",
2718 "IDMA_FL_SEND_PAYLOAD",
2719 "IDMA_FL_REQ_NEXT_DATA_FL",
2720 "IDMA_FL_SEND_NEXT_PCIEHDR",
2721 "IDMA_FL_SEND_PADDING",
2722 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2723 "IDMA_FL_SEND_FIFO_TO_IMSG",
2724 "IDMA_FL_REQ_DATAFL_DONE",
2725 "IDMA_FL_REQ_HEADERFL_DONE",
2726 };
2727 static const char * const t5_decode[] = {
2728 "IDMA_IDLE",
2729 "IDMA_ALMOST_IDLE",
2730 "IDMA_PUSH_MORE_CPL_FIFO",
2731 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2732 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2733 "IDMA_PHYSADDR_SEND_PCIEHDR",
2734 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2735 "IDMA_PHYSADDR_SEND_PAYLOAD",
2736 "IDMA_SEND_FIFO_TO_IMSG",
2737 "IDMA_FL_REQ_DATA_FL",
2738 "IDMA_FL_DROP",
2739 "IDMA_FL_DROP_SEND_INC",
2740 "IDMA_FL_H_REQ_HEADER_FL",
2741 "IDMA_FL_H_SEND_PCIEHDR",
2742 "IDMA_FL_H_PUSH_CPL_FIFO",
2743 "IDMA_FL_H_SEND_CPL",
2744 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2745 "IDMA_FL_H_SEND_IP_HDR",
2746 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2747 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2748 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2749 "IDMA_FL_D_SEND_PCIEHDR",
2750 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2751 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2752 "IDMA_FL_SEND_PCIEHDR",
2753 "IDMA_FL_PUSH_CPL_FIFO",
2754 "IDMA_FL_SEND_CPL",
2755 "IDMA_FL_SEND_PAYLOAD_FIRST",
2756 "IDMA_FL_SEND_PAYLOAD",
2757 "IDMA_FL_REQ_NEXT_DATA_FL",
2758 "IDMA_FL_SEND_NEXT_PCIEHDR",
2759 "IDMA_FL_SEND_PADDING",
2760 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2761 };
2762 static const u32 sge_regs[] = {
f061de42
HS
2763 SGE_DEBUG_DATA_LOW_INDEX_2_A,
2764 SGE_DEBUG_DATA_LOW_INDEX_3_A,
2765 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
68bce192
KS
2766 };
2767 const char **sge_idma_decode;
2768 int sge_idma_decode_nstates;
2769 int i;
2770
2771 if (is_t4(adapter->params.chip)) {
2772 sge_idma_decode = (const char **)t4_decode;
2773 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2774 } else {
2775 sge_idma_decode = (const char **)t5_decode;
2776 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2777 }
2778
2779 if (state < sge_idma_decode_nstates)
2780 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2781 else
2782 CH_WARN(adapter, "idma state %d unknown\n", state);
2783
2784 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2785 CH_WARN(adapter, "SGE register %#x value %#x\n",
2786 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2787}
2788
56d36be4 2789/**
636f9d37
VP
2790 * t4_fw_hello - establish communication with FW
2791 * @adap: the adapter
2792 * @mbox: mailbox to use for the FW command
2793 * @evt_mbox: mailbox to receive async FW events
2794 * @master: specifies the caller's willingness to be the device master
2795 * @state: returns the current device state (if non-NULL)
56d36be4 2796 *
636f9d37
VP
2797 * Issues a command to establish communication with FW. Returns either
2798 * an error (negative integer) or the mailbox of the Master PF.
56d36be4
DM
2799 */
2800int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2801 enum dev_master master, enum dev_state *state)
2802{
2803 int ret;
2804 struct fw_hello_cmd c;
636f9d37
VP
2805 u32 v;
2806 unsigned int master_mbox;
2807 int retries = FW_CMD_HELLO_RETRIES;
56d36be4 2808
636f9d37
VP
2809retry:
2810 memset(&c, 0, sizeof(c));
56d36be4 2811 INIT_CMD(c, HELLO, WRITE);
ce91a923 2812 c.err_to_clearinit = htonl(
5167865a
HS
2813 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
2814 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
2815 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
2816 FW_HELLO_CMD_MBMASTER_M) |
2817 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
2818 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
2819 FW_HELLO_CMD_CLEARINIT_F);
56d36be4 2820
636f9d37
VP
2821 /*
2822 * Issue the HELLO command to the firmware. If it's not successful
2823 * but indicates that we got a "busy" or "timeout" condition, retry
31d55c2d
HS
2824 * the HELLO until we exhaust our retry limit. If we do exceed our
2825 * retry limit, check to see if the firmware left us any error
2826 * information and report that if so.
636f9d37 2827 */
56d36be4 2828 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
636f9d37
VP
2829 if (ret < 0) {
2830 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2831 goto retry;
f061de42 2832 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
31d55c2d 2833 t4_report_fw_error(adap);
636f9d37
VP
2834 return ret;
2835 }
2836
ce91a923 2837 v = ntohl(c.err_to_clearinit);
5167865a 2838 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
636f9d37 2839 if (state) {
5167865a 2840 if (v & FW_HELLO_CMD_ERR_F)
56d36be4 2841 *state = DEV_STATE_ERR;
5167865a 2842 else if (v & FW_HELLO_CMD_INIT_F)
636f9d37 2843 *state = DEV_STATE_INIT;
56d36be4
DM
2844 else
2845 *state = DEV_STATE_UNINIT;
2846 }
636f9d37
VP
2847
2848 /*
2849 * If we're not the Master PF then we need to wait around for the
2850 * Master PF Driver to finish setting up the adapter.
2851 *
2852 * Note that we also do this wait if we're a non-Master-capable PF and
2853 * there is no current Master PF; a Master PF may show up momentarily
2854 * and we wouldn't want to fail pointlessly. (This can happen when an
2855 * OS loads lots of different drivers rapidly at the same time). In
2856 * this case, the Master PF returned by the firmware will be
b2e1a3f0 2857 * PCIE_FW_MASTER_M so the test below will work ...
636f9d37 2858 */
5167865a 2859 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
636f9d37
VP
2860 master_mbox != mbox) {
2861 int waiting = FW_CMD_HELLO_TIMEOUT;
2862
2863 /*
2864 * Wait for the firmware to either indicate an error or
2865 * initialized state. If we see either of these we bail out
2866 * and report the issue to the caller. If we exhaust the
2867 * "hello timeout" and we haven't exhausted our retries, try
2868 * again. Otherwise bail with a timeout error.
2869 */
2870 for (;;) {
2871 u32 pcie_fw;
2872
2873 msleep(50);
2874 waiting -= 50;
2875
2876 /*
2877 * If neither Error nor Initialialized are indicated
2878 * by the firmware keep waiting till we exaust our
2879 * timeout ... and then retry if we haven't exhausted
2880 * our retries ...
2881 */
f061de42
HS
2882 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
2883 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
636f9d37
VP
2884 if (waiting <= 0) {
2885 if (retries-- > 0)
2886 goto retry;
2887
2888 return -ETIMEDOUT;
2889 }
2890 continue;
2891 }
2892
2893 /*
2894 * We either have an Error or Initialized condition
2895 * report errors preferentially.
2896 */
2897 if (state) {
f061de42 2898 if (pcie_fw & PCIE_FW_ERR_F)
636f9d37 2899 *state = DEV_STATE_ERR;
f061de42 2900 else if (pcie_fw & PCIE_FW_INIT_F)
636f9d37
VP
2901 *state = DEV_STATE_INIT;
2902 }
2903
2904 /*
2905 * If we arrived before a Master PF was selected and
2906 * there's not a valid Master PF, grab its identity
2907 * for our caller.
2908 */
b2e1a3f0 2909 if (master_mbox == PCIE_FW_MASTER_M &&
f061de42 2910 (pcie_fw & PCIE_FW_MASTER_VLD_F))
b2e1a3f0 2911 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
636f9d37
VP
2912 break;
2913 }
2914 }
2915
2916 return master_mbox;
56d36be4
DM
2917}
2918
2919/**
2920 * t4_fw_bye - end communication with FW
2921 * @adap: the adapter
2922 * @mbox: mailbox to use for the FW command
2923 *
2924 * Issues a command to terminate communication with FW.
2925 */
2926int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2927{
2928 struct fw_bye_cmd c;
2929
0062b15c 2930 memset(&c, 0, sizeof(c));
56d36be4
DM
2931 INIT_CMD(c, BYE, WRITE);
2932 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2933}
2934
2935/**
2936 * t4_init_cmd - ask FW to initialize the device
2937 * @adap: the adapter
2938 * @mbox: mailbox to use for the FW command
2939 *
2940 * Issues a command to FW to partially initialize the device. This
2941 * performs initialization that generally doesn't depend on user input.
2942 */
2943int t4_early_init(struct adapter *adap, unsigned int mbox)
2944{
2945 struct fw_initialize_cmd c;
2946
0062b15c 2947 memset(&c, 0, sizeof(c));
56d36be4
DM
2948 INIT_CMD(c, INITIALIZE, WRITE);
2949 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2950}
2951
2952/**
2953 * t4_fw_reset - issue a reset to FW
2954 * @adap: the adapter
2955 * @mbox: mailbox to use for the FW command
2956 * @reset: specifies the type of reset to perform
2957 *
2958 * Issues a reset command of the specified type to FW.
2959 */
2960int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2961{
2962 struct fw_reset_cmd c;
2963
0062b15c 2964 memset(&c, 0, sizeof(c));
56d36be4
DM
2965 INIT_CMD(c, RESET, WRITE);
2966 c.val = htonl(reset);
2967 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2968}
2969
26f7cbc0
VP
2970/**
2971 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2972 * @adap: the adapter
2973 * @mbox: mailbox to use for the FW RESET command (if desired)
2974 * @force: force uP into RESET even if FW RESET command fails
2975 *
2976 * Issues a RESET command to firmware (if desired) with a HALT indication
2977 * and then puts the microprocessor into RESET state. The RESET command
2978 * will only be issued if a legitimate mailbox is provided (mbox <=
b2e1a3f0 2979 * PCIE_FW_MASTER_M).
26f7cbc0
VP
2980 *
2981 * This is generally used in order for the host to safely manipulate the
2982 * adapter without fear of conflicting with whatever the firmware might
2983 * be doing. The only way out of this state is to RESTART the firmware
2984 * ...
2985 */
de5b8677 2986static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
26f7cbc0
VP
2987{
2988 int ret = 0;
2989
2990 /*
2991 * If a legitimate mailbox is provided, issue a RESET command
2992 * with a HALT indication.
2993 */
b2e1a3f0 2994 if (mbox <= PCIE_FW_MASTER_M) {
26f7cbc0
VP
2995 struct fw_reset_cmd c;
2996
2997 memset(&c, 0, sizeof(c));
2998 INIT_CMD(c, RESET, WRITE);
0d804338 2999 c.val = htonl(PIORST_F | PIORSTMODE_F);
5167865a 3000 c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
26f7cbc0
VP
3001 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3002 }
3003
3004 /*
3005 * Normally we won't complete the operation if the firmware RESET
3006 * command fails but if our caller insists we'll go ahead and put the
3007 * uP into RESET. This can be useful if the firmware is hung or even
3008 * missing ... We'll have to take the risk of putting the uP into
3009 * RESET without the cooperation of firmware in that case.
3010 *
3011 * We also force the firmware's HALT flag to be on in case we bypassed
3012 * the firmware RESET command above or we're dealing with old firmware
3013 * which doesn't have the HALT capability. This will serve as a flag
3014 * for the incoming firmware to know that it's coming out of a HALT
3015 * rather than a RESET ... if it's new enough to understand that ...
3016 */
3017 if (ret == 0 || force) {
89c3a86c 3018 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
f061de42 3019 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
b2e1a3f0 3020 PCIE_FW_HALT_F);
26f7cbc0
VP
3021 }
3022
3023 /*
3024 * And we always return the result of the firmware RESET command
3025 * even when we force the uP into RESET ...
3026 */
3027 return ret;
3028}
3029
3030/**
3031 * t4_fw_restart - restart the firmware by taking the uP out of RESET
3032 * @adap: the adapter
3033 * @reset: if we want to do a RESET to restart things
3034 *
3035 * Restart firmware previously halted by t4_fw_halt(). On successful
3036 * return the previous PF Master remains as the new PF Master and there
3037 * is no need to issue a new HELLO command, etc.
3038 *
3039 * We do this in two ways:
3040 *
3041 * 1. If we're dealing with newer firmware we'll simply want to take
3042 * the chip's microprocessor out of RESET. This will cause the
3043 * firmware to start up from its start vector. And then we'll loop
3044 * until the firmware indicates it's started again (PCIE_FW.HALT
3045 * reset to 0) or we timeout.
3046 *
3047 * 2. If we're dealing with older firmware then we'll need to RESET
3048 * the chip since older firmware won't recognize the PCIE_FW.HALT
3049 * flag and automatically RESET itself on startup.
3050 */
de5b8677 3051static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
26f7cbc0
VP
3052{
3053 if (reset) {
3054 /*
3055 * Since we're directing the RESET instead of the firmware
3056 * doing it automatically, we need to clear the PCIE_FW.HALT
3057 * bit.
3058 */
f061de42 3059 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
26f7cbc0
VP
3060
3061 /*
3062 * If we've been given a valid mailbox, first try to get the
3063 * firmware to do the RESET. If that works, great and we can
3064 * return success. Otherwise, if we haven't been given a
3065 * valid mailbox or the RESET command failed, fall back to
3066 * hitting the chip with a hammer.
3067 */
b2e1a3f0 3068 if (mbox <= PCIE_FW_MASTER_M) {
89c3a86c 3069 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
26f7cbc0
VP
3070 msleep(100);
3071 if (t4_fw_reset(adap, mbox,
0d804338 3072 PIORST_F | PIORSTMODE_F) == 0)
26f7cbc0
VP
3073 return 0;
3074 }
3075
0d804338 3076 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
26f7cbc0
VP
3077 msleep(2000);
3078 } else {
3079 int ms;
3080
89c3a86c 3081 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
26f7cbc0 3082 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
f061de42 3083 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
26f7cbc0
VP
3084 return 0;
3085 msleep(100);
3086 ms += 100;
3087 }
3088 return -ETIMEDOUT;
3089 }
3090 return 0;
3091}
3092
3093/**
3094 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3095 * @adap: the adapter
3096 * @mbox: mailbox to use for the FW RESET command (if desired)
3097 * @fw_data: the firmware image to write
3098 * @size: image size
3099 * @force: force upgrade even if firmware doesn't cooperate
3100 *
3101 * Perform all of the steps necessary for upgrading an adapter's
3102 * firmware image. Normally this requires the cooperation of the
3103 * existing firmware in order to halt all existing activities
3104 * but if an invalid mailbox token is passed in we skip that step
3105 * (though we'll still put the adapter microprocessor into RESET in
3106 * that case).
3107 *
3108 * On successful return the new firmware will have been loaded and
3109 * the adapter will have been fully RESET losing all previous setup
3110 * state. On unsuccessful return the adapter may be completely hosed ...
3111 * positive errno indicates that the adapter is ~probably~ intact, a
3112 * negative errno indicates that things are looking bad ...
3113 */
22c0b963
HS
3114int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3115 const u8 *fw_data, unsigned int size, int force)
26f7cbc0
VP
3116{
3117 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3118 int reset, ret;
3119
79af221d
HS
3120 if (!t4_fw_matches_chip(adap, fw_hdr))
3121 return -EINVAL;
3122
26f7cbc0
VP
3123 ret = t4_fw_halt(adap, mbox, force);
3124 if (ret < 0 && !force)
3125 return ret;
3126
3127 ret = t4_load_fw(adap, fw_data, size);
3128 if (ret < 0)
3129 return ret;
3130
3131 /*
3132 * Older versions of the firmware don't understand the new
3133 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3134 * restart. So for newly loaded older firmware we'll have to do the
3135 * RESET for it so it starts up on a clean slate. We can tell if
3136 * the newly loaded firmware will handle this right by checking
3137 * its header flags to see if it advertises the capability.
3138 */
3139 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3140 return t4_fw_restart(adap, mbox, reset);
3141}
3142
636f9d37
VP
3143/**
3144 * t4_fixup_host_params - fix up host-dependent parameters
3145 * @adap: the adapter
3146 * @page_size: the host's Base Page Size
3147 * @cache_line_size: the host's Cache Line Size
3148 *
3149 * Various registers in T4 contain values which are dependent on the
3150 * host's Base Page and Cache Line Sizes. This function will fix all of
3151 * those registers with the appropriate values as passed in ...
3152 */
3153int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3154 unsigned int cache_line_size)
3155{
3156 unsigned int page_shift = fls(page_size) - 1;
3157 unsigned int sge_hps = page_shift - 10;
3158 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3159 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3160 unsigned int fl_align_log = fls(fl_align) - 1;
3161
f612b815
HS
3162 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
3163 HOSTPAGESIZEPF0_V(sge_hps) |
3164 HOSTPAGESIZEPF1_V(sge_hps) |
3165 HOSTPAGESIZEPF2_V(sge_hps) |
3166 HOSTPAGESIZEPF3_V(sge_hps) |
3167 HOSTPAGESIZEPF4_V(sge_hps) |
3168 HOSTPAGESIZEPF5_V(sge_hps) |
3169 HOSTPAGESIZEPF6_V(sge_hps) |
3170 HOSTPAGESIZEPF7_V(sge_hps));
636f9d37 3171
ce8f407a 3172 if (is_t4(adap->params.chip)) {
f612b815
HS
3173 t4_set_reg_field(adap, SGE_CONTROL_A,
3174 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
3175 EGRSTATUSPAGESIZE_F,
3176 INGPADBOUNDARY_V(fl_align_log -
3177 INGPADBOUNDARY_SHIFT_X) |
3178 EGRSTATUSPAGESIZE_V(stat_len != 64));
ce8f407a
HS
3179 } else {
3180 /* T5 introduced the separation of the Free List Padding and
3181 * Packing Boundaries. Thus, we can select a smaller Padding
3182 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3183 * Bandwidth, and use a Packing Boundary which is large enough
3184 * to avoid false sharing between CPUs, etc.
3185 *
3186 * For the PCI Link, the smaller the Padding Boundary the
3187 * better. For the Memory Controller, a smaller Padding
3188 * Boundary is better until we cross under the Memory Line
3189 * Size (the minimum unit of transfer to/from Memory). If we
3190 * have a Padding Boundary which is smaller than the Memory
3191 * Line Size, that'll involve a Read-Modify-Write cycle on the
3192 * Memory Controller which is never good. For T5 the smallest
3193 * Padding Boundary which we can select is 32 bytes which is
3194 * larger than any known Memory Controller Line Size so we'll
3195 * use that.
3196 *
3197 * T5 has a different interpretation of the "0" value for the
3198 * Packing Boundary. This corresponds to 16 bytes instead of
3199 * the expected 32 bytes. We never have a Packing Boundary
3200 * less than 32 bytes so we can't use that special value but
3201 * on the other hand, if we wanted 32 bytes, the best we can
3202 * really do is 64 bytes.
3203 */
3204 if (fl_align <= 32) {
3205 fl_align = 64;
3206 fl_align_log = 6;
3207 }
f612b815
HS
3208 t4_set_reg_field(adap, SGE_CONTROL_A,
3209 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
3210 EGRSTATUSPAGESIZE_F,
3211 INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
3212 EGRSTATUSPAGESIZE_V(stat_len != 64));
ce8f407a
HS
3213 t4_set_reg_field(adap, SGE_CONTROL2_A,
3214 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3215 INGPACKBOUNDARY_V(fl_align_log -
f612b815 3216 INGPACKBOUNDARY_SHIFT_X));
ce8f407a 3217 }
636f9d37
VP
3218 /*
3219 * Adjust various SGE Free List Host Buffer Sizes.
3220 *
3221 * This is something of a crock since we're using fixed indices into
3222 * the array which are also known by the sge.c code and the T4
3223 * Firmware Configuration File. We need to come up with a much better
3224 * approach to managing this array. For now, the first four entries
3225 * are:
3226 *
3227 * 0: Host Page Size
3228 * 1: 64KB
3229 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3230 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3231 *
3232 * For the single-MTU buffers in unpacked mode we need to include
3233 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3234 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3235 * Padding boundry. All of these are accommodated in the Factory
3236 * Default Firmware Configuration File but we need to adjust it for
3237 * this host's cache line size.
3238 */
f612b815
HS
3239 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
3240 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
3241 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
636f9d37 3242 & ~(fl_align-1));
f612b815
HS
3243 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
3244 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
636f9d37
VP
3245 & ~(fl_align-1));
3246
0d804338 3247 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
636f9d37
VP
3248
3249 return 0;
3250}
3251
3252/**
3253 * t4_fw_initialize - ask FW to initialize the device
3254 * @adap: the adapter
3255 * @mbox: mailbox to use for the FW command
3256 *
3257 * Issues a command to FW to partially initialize the device. This
3258 * performs initialization that generally doesn't depend on user input.
3259 */
3260int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3261{
3262 struct fw_initialize_cmd c;
3263
3264 memset(&c, 0, sizeof(c));
3265 INIT_CMD(c, INITIALIZE, WRITE);
3266 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3267}
3268
56d36be4
DM
3269/**
3270 * t4_query_params - query FW or device parameters
3271 * @adap: the adapter
3272 * @mbox: mailbox to use for the FW command
3273 * @pf: the PF
3274 * @vf: the VF
3275 * @nparams: the number of parameters
3276 * @params: the parameter names
3277 * @val: the parameter values
3278 *
3279 * Reads the value of FW or device parameters. Up to 7 parameters can be
3280 * queried at once.
3281 */
3282int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3283 unsigned int vf, unsigned int nparams, const u32 *params,
3284 u32 *val)
3285{
3286 int i, ret;
3287 struct fw_params_cmd c;
3288 __be32 *p = &c.param[0].mnem;
3289
3290 if (nparams > 7)
3291 return -EINVAL;
3292
3293 memset(&c, 0, sizeof(c));
e2ac9628 3294 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
5167865a
HS
3295 FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
3296 FW_PARAMS_CMD_VFN_V(vf));
56d36be4
DM
3297 c.retval_len16 = htonl(FW_LEN16(c));
3298 for (i = 0; i < nparams; i++, p += 2)
3299 *p = htonl(*params++);
3300
3301 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3302 if (ret == 0)
3303 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3304 *val++ = ntohl(*p);
3305 return ret;
3306}
3307
688848b1
AB
3308/**
3309 * t4_set_params_nosleep - sets FW or device parameters
3310 * @adap: the adapter
3311 * @mbox: mailbox to use for the FW command
3312 * @pf: the PF
3313 * @vf: the VF
3314 * @nparams: the number of parameters
3315 * @params: the parameter names
3316 * @val: the parameter values
3317 *
3318 * Does not ever sleep
3319 * Sets the value of FW or device parameters. Up to 7 parameters can be
3320 * specified at once.
3321 */
3322int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3323 unsigned int pf, unsigned int vf,
3324 unsigned int nparams, const u32 *params,
3325 const u32 *val)
3326{
3327 struct fw_params_cmd c;
3328 __be32 *p = &c.param[0].mnem;
3329
3330 if (nparams > 7)
3331 return -EINVAL;
3332
3333 memset(&c, 0, sizeof(c));
e2ac9628
HS
3334 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3335 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5167865a
HS
3336 FW_PARAMS_CMD_PFN_V(pf) |
3337 FW_PARAMS_CMD_VFN_V(vf));
688848b1
AB
3338 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3339
3340 while (nparams--) {
3341 *p++ = cpu_to_be32(*params++);
3342 *p++ = cpu_to_be32(*val++);
3343 }
3344
3345 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3346}
3347
56d36be4
DM
3348/**
3349 * t4_set_params - sets FW or device parameters
3350 * @adap: the adapter
3351 * @mbox: mailbox to use for the FW command
3352 * @pf: the PF
3353 * @vf: the VF
3354 * @nparams: the number of parameters
3355 * @params: the parameter names
3356 * @val: the parameter values
3357 *
3358 * Sets the value of FW or device parameters. Up to 7 parameters can be
3359 * specified at once.
3360 */
3361int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3362 unsigned int vf, unsigned int nparams, const u32 *params,
3363 const u32 *val)
3364{
3365 struct fw_params_cmd c;
3366 __be32 *p = &c.param[0].mnem;
3367
3368 if (nparams > 7)
3369 return -EINVAL;
3370
3371 memset(&c, 0, sizeof(c));
e2ac9628 3372 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
5167865a
HS
3373 FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
3374 FW_PARAMS_CMD_VFN_V(vf));
56d36be4
DM
3375 c.retval_len16 = htonl(FW_LEN16(c));
3376 while (nparams--) {
3377 *p++ = htonl(*params++);
3378 *p++ = htonl(*val++);
3379 }
3380
3381 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3382}
3383
3384/**
3385 * t4_cfg_pfvf - configure PF/VF resource limits
3386 * @adap: the adapter
3387 * @mbox: mailbox to use for the FW command
3388 * @pf: the PF being configured
3389 * @vf: the VF being configured
3390 * @txq: the max number of egress queues
3391 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
3392 * @rxqi: the max number of interrupt-capable ingress queues
3393 * @rxq: the max number of interruptless ingress queues
3394 * @tc: the PCI traffic class
3395 * @vi: the max number of virtual interfaces
3396 * @cmask: the channel access rights mask for the PF/VF
3397 * @pmask: the port access rights mask for the PF/VF
3398 * @nexact: the maximum number of exact MPS filters
3399 * @rcaps: read capabilities
3400 * @wxcaps: write/execute capabilities
3401 *
3402 * Configures resource limits and capabilities for a physical or virtual
3403 * function.
3404 */
3405int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3406 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3407 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3408 unsigned int vi, unsigned int cmask, unsigned int pmask,
3409 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3410{
3411 struct fw_pfvf_cmd c;
3412
3413 memset(&c, 0, sizeof(c));
e2ac9628 3414 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
5167865a
HS
3415 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
3416 FW_PFVF_CMD_VFN_V(vf));
56d36be4 3417 c.retval_len16 = htonl(FW_LEN16(c));
5167865a
HS
3418 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
3419 FW_PFVF_CMD_NIQ_V(rxq));
3420 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
3421 FW_PFVF_CMD_PMASK_V(pmask) |
3422 FW_PFVF_CMD_NEQ_V(txq));
3423 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
3424 FW_PFVF_CMD_NEXACTF_V(nexact));
3425 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
3426 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
3427 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
56d36be4
DM
3428 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3429}
3430
3431/**
3432 * t4_alloc_vi - allocate a virtual interface
3433 * @adap: the adapter
3434 * @mbox: mailbox to use for the FW command
3435 * @port: physical port associated with the VI
3436 * @pf: the PF owning the VI
3437 * @vf: the VF owning the VI
3438 * @nmac: number of MAC addresses needed (1 to 5)
3439 * @mac: the MAC addresses of the VI
3440 * @rss_size: size of RSS table slice associated with this VI
3441 *
3442 * Allocates a virtual interface for the given physical port. If @mac is
3443 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3444 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3445 * stored consecutively so the space needed is @nmac * 6 bytes.
3446 * Returns a negative error number or the non-negative VI id.
3447 */
3448int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3449 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3450 unsigned int *rss_size)
3451{
3452 int ret;
3453 struct fw_vi_cmd c;
3454
3455 memset(&c, 0, sizeof(c));
e2ac9628
HS
3456 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
3457 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2b5fb1f2
HS
3458 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
3459 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
3460 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
56d36be4
DM
3461 c.nmac = nmac - 1;
3462
3463 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3464 if (ret)
3465 return ret;
3466
3467 if (mac) {
3468 memcpy(mac, c.mac, sizeof(c.mac));
3469 switch (nmac) {
3470 case 5:
3471 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3472 case 4:
3473 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3474 case 3:
3475 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3476 case 2:
3477 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3478 }
3479 }
3480 if (rss_size)
2b5fb1f2
HS
3481 *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd));
3482 return FW_VI_CMD_VIID_G(ntohs(c.type_viid));
56d36be4
DM
3483}
3484
56d36be4
DM
3485/**
3486 * t4_set_rxmode - set Rx properties of a virtual interface
3487 * @adap: the adapter
3488 * @mbox: mailbox to use for the FW command
3489 * @viid: the VI id
3490 * @mtu: the new MTU or -1
3491 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3492 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3493 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 3494 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
3495 * @sleep_ok: if true we may sleep while awaiting command completion
3496 *
3497 * Sets Rx properties of a virtual interface.
3498 */
3499int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
3500 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3501 bool sleep_ok)
56d36be4
DM
3502{
3503 struct fw_vi_rxmode_cmd c;
3504
3505 /* convert to FW values */
3506 if (mtu < 0)
3507 mtu = FW_RXMODE_MTU_NO_CHG;
3508 if (promisc < 0)
2b5fb1f2 3509 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
56d36be4 3510 if (all_multi < 0)
2b5fb1f2 3511 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
56d36be4 3512 if (bcast < 0)
2b5fb1f2 3513 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
f8f5aafa 3514 if (vlanex < 0)
2b5fb1f2 3515 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
56d36be4
DM
3516
3517 memset(&c, 0, sizeof(c));
e2ac9628 3518 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2 3519 FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid));
56d36be4 3520 c.retval_len16 = htonl(FW_LEN16(c));
2b5fb1f2
HS
3521 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) |
3522 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
3523 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
3524 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
3525 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
56d36be4
DM
3526 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3527}
3528
3529/**
3530 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3531 * @adap: the adapter
3532 * @mbox: mailbox to use for the FW command
3533 * @viid: the VI id
3534 * @free: if true any existing filters for this VI id are first removed
3535 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3536 * @addr: the MAC address(es)
3537 * @idx: where to store the index of each allocated filter
3538 * @hash: pointer to hash address filter bitmap
3539 * @sleep_ok: call is allowed to sleep
3540 *
3541 * Allocates an exact-match filter for each of the supplied addresses and
3542 * sets it to the corresponding address. If @idx is not %NULL it should
3543 * have at least @naddr entries, each of which will be set to the index of
3544 * the filter allocated for the corresponding MAC address. If a filter
3545 * could not be allocated for an address its index is set to 0xffff.
3546 * If @hash is not %NULL addresses that fail to allocate an exact filter
3547 * are hashed and update the hash filter bitmap pointed at by @hash.
3548 *
3549 * Returns a negative error number or the number of filters allocated.
3550 */
3551int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3552 unsigned int viid, bool free, unsigned int naddr,
3553 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3554{
3555 int i, ret;
3556 struct fw_vi_mac_cmd c;
3557 struct fw_vi_mac_exact *p;
d14807dd 3558 unsigned int max_naddr = is_t4(adap->params.chip) ?
0a57a536
SR
3559 NUM_MPS_CLS_SRAM_L_INSTANCES :
3560 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
56d36be4
DM
3561
3562 if (naddr > 7)
3563 return -EINVAL;
3564
3565 memset(&c, 0, sizeof(c));
e2ac9628
HS
3566 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3567 FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
2b5fb1f2
HS
3568 FW_VI_MAC_CMD_VIID_V(viid));
3569 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) |
e2ac9628 3570 FW_CMD_LEN16_V((naddr + 2) / 2));
56d36be4
DM
3571
3572 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2b5fb1f2
HS
3573 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
3574 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
56d36be4
DM
3575 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3576 }
3577
3578 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3579 if (ret)
3580 return ret;
3581
3582 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2b5fb1f2 3583 u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
56d36be4
DM
3584
3585 if (idx)
0a57a536
SR
3586 idx[i] = index >= max_naddr ? 0xffff : index;
3587 if (index < max_naddr)
56d36be4
DM
3588 ret++;
3589 else if (hash)
ce9aeb58 3590 *hash |= (1ULL << hash_mac_addr(addr[i]));
56d36be4
DM
3591 }
3592 return ret;
3593}
3594
3595/**
3596 * t4_change_mac - modifies the exact-match filter for a MAC address
3597 * @adap: the adapter
3598 * @mbox: mailbox to use for the FW command
3599 * @viid: the VI id
3600 * @idx: index of existing filter for old value of MAC address, or -1
3601 * @addr: the new MAC address value
3602 * @persist: whether a new MAC allocation should be persistent
3603 * @add_smt: if true also add the address to the HW SMT
3604 *
3605 * Modifies an exact-match filter and sets it to the new MAC address.
3606 * Note that in general it is not possible to modify the value of a given
3607 * filter so the generic way to modify an address filter is to free the one
3608 * being used by the old address value and allocate a new filter for the
3609 * new address value. @idx can be -1 if the address is a new addition.
3610 *
3611 * Returns a negative error number or the index of the filter with the new
3612 * MAC value.
3613 */
3614int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3615 int idx, const u8 *addr, bool persist, bool add_smt)
3616{
3617 int ret, mode;
3618 struct fw_vi_mac_cmd c;
3619 struct fw_vi_mac_exact *p = c.u.exact;
d14807dd 3620 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
0a57a536
SR
3621 NUM_MPS_CLS_SRAM_L_INSTANCES :
3622 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
56d36be4
DM
3623
3624 if (idx < 0) /* new allocation */
3625 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3626 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3627
3628 memset(&c, 0, sizeof(c));
e2ac9628 3629 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2 3630 FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid));
e2ac9628 3631 c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
2b5fb1f2
HS
3632 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
3633 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
3634 FW_VI_MAC_CMD_IDX_V(idx));
56d36be4
DM
3635 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3636
3637 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3638 if (ret == 0) {
2b5fb1f2 3639 ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
0a57a536 3640 if (ret >= max_mac_addr)
56d36be4
DM
3641 ret = -ENOMEM;
3642 }
3643 return ret;
3644}
3645
3646/**
3647 * t4_set_addr_hash - program the MAC inexact-match hash filter
3648 * @adap: the adapter
3649 * @mbox: mailbox to use for the FW command
3650 * @viid: the VI id
3651 * @ucast: whether the hash filter should also match unicast addresses
3652 * @vec: the value to be written to the hash filter
3653 * @sleep_ok: call is allowed to sleep
3654 *
3655 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3656 */
3657int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3658 bool ucast, u64 vec, bool sleep_ok)
3659{
3660 struct fw_vi_mac_cmd c;
3661
3662 memset(&c, 0, sizeof(c));
e2ac9628 3663 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
3664 FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid));
3665 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F |
3666 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
e2ac9628 3667 FW_CMD_LEN16_V(1));
56d36be4
DM
3668 c.u.hash.hashvec = cpu_to_be64(vec);
3669 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3670}
3671
688848b1
AB
3672/**
3673 * t4_enable_vi_params - enable/disable a virtual interface
3674 * @adap: the adapter
3675 * @mbox: mailbox to use for the FW command
3676 * @viid: the VI id
3677 * @rx_en: 1=enable Rx, 0=disable Rx
3678 * @tx_en: 1=enable Tx, 0=disable Tx
3679 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3680 *
3681 * Enables/disables a virtual interface. Note that setting DCB Enable
3682 * only makes sense when enabling a Virtual Interface ...
3683 */
3684int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3685 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3686{
3687 struct fw_vi_enable_cmd c;
3688
3689 memset(&c, 0, sizeof(c));
e2ac9628 3690 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2 3691 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
688848b1 3692
2b5fb1f2
HS
3693 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
3694 FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) |
3695 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en));
30f00847 3696 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
688848b1
AB
3697}
3698
56d36be4
DM
3699/**
3700 * t4_enable_vi - enable/disable a virtual interface
3701 * @adap: the adapter
3702 * @mbox: mailbox to use for the FW command
3703 * @viid: the VI id
3704 * @rx_en: 1=enable Rx, 0=disable Rx
3705 * @tx_en: 1=enable Tx, 0=disable Tx
3706 *
3707 * Enables/disables a virtual interface.
3708 */
3709int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3710 bool rx_en, bool tx_en)
3711{
688848b1 3712 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
56d36be4
DM
3713}
3714
3715/**
3716 * t4_identify_port - identify a VI's port by blinking its LED
3717 * @adap: the adapter
3718 * @mbox: mailbox to use for the FW command
3719 * @viid: the VI id
3720 * @nblinks: how many times to blink LED at 2.5 Hz
3721 *
3722 * Identifies a VI's port by blinking its LED.
3723 */
3724int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3725 unsigned int nblinks)
3726{
3727 struct fw_vi_enable_cmd c;
3728
0062b15c 3729 memset(&c, 0, sizeof(c));
e2ac9628 3730 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
3731 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
3732 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
56d36be4
DM
3733 c.blinkdur = htons(nblinks);
3734 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
56d36be4
DM
3735}
3736
3737/**
3738 * t4_iq_free - free an ingress queue and its FLs
3739 * @adap: the adapter
3740 * @mbox: mailbox to use for the FW command
3741 * @pf: the PF owning the queues
3742 * @vf: the VF owning the queues
3743 * @iqtype: the ingress queue type
3744 * @iqid: ingress queue id
3745 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3746 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3747 *
3748 * Frees an ingress queue and its associated FLs, if any.
3749 */
3750int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3751 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3752 unsigned int fl0id, unsigned int fl1id)
3753{
3754 struct fw_iq_cmd c;
3755
3756 memset(&c, 0, sizeof(c));
e2ac9628 3757 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3758 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
3759 FW_IQ_CMD_VFN_V(vf));
3760 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
3761 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
56d36be4
DM
3762 c.iqid = htons(iqid);
3763 c.fl0id = htons(fl0id);
3764 c.fl1id = htons(fl1id);
3765 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3766}
3767
3768/**
3769 * t4_eth_eq_free - free an Ethernet egress queue
3770 * @adap: the adapter
3771 * @mbox: mailbox to use for the FW command
3772 * @pf: the PF owning the queue
3773 * @vf: the VF owning the queue
3774 * @eqid: egress queue id
3775 *
3776 * Frees an Ethernet egress queue.
3777 */
3778int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3779 unsigned int vf, unsigned int eqid)
3780{
3781 struct fw_eq_eth_cmd c;
3782
3783 memset(&c, 0, sizeof(c));
e2ac9628 3784 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3785 FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
3786 FW_EQ_ETH_CMD_VFN_V(vf));
3787 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
3788 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
56d36be4
DM
3789 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3790}
3791
3792/**
3793 * t4_ctrl_eq_free - free a control egress queue
3794 * @adap: the adapter
3795 * @mbox: mailbox to use for the FW command
3796 * @pf: the PF owning the queue
3797 * @vf: the VF owning the queue
3798 * @eqid: egress queue id
3799 *
3800 * Frees a control egress queue.
3801 */
3802int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3803 unsigned int vf, unsigned int eqid)
3804{
3805 struct fw_eq_ctrl_cmd c;
3806
3807 memset(&c, 0, sizeof(c));
e2ac9628 3808 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3809 FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
3810 FW_EQ_CTRL_CMD_VFN_V(vf));
3811 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
3812 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
56d36be4
DM
3813 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3814}
3815
3816/**
3817 * t4_ofld_eq_free - free an offload egress queue
3818 * @adap: the adapter
3819 * @mbox: mailbox to use for the FW command
3820 * @pf: the PF owning the queue
3821 * @vf: the VF owning the queue
3822 * @eqid: egress queue id
3823 *
3824 * Frees a control egress queue.
3825 */
3826int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3827 unsigned int vf, unsigned int eqid)
3828{
3829 struct fw_eq_ofld_cmd c;
3830
3831 memset(&c, 0, sizeof(c));
e2ac9628 3832 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3833 FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
3834 FW_EQ_OFLD_CMD_VFN_V(vf));
3835 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
3836 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
56d36be4
DM
3837 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3838}
3839
3840/**
3841 * t4_handle_fw_rpl - process a FW reply message
3842 * @adap: the adapter
3843 * @rpl: start of the FW message
3844 *
3845 * Processes a FW message, such as link state change messages.
3846 */
3847int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3848{
3849 u8 opcode = *(const u8 *)rpl;
3850
3851 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3852 int speed = 0, fc = 0;
3853 const struct fw_port_cmd *p = (void *)rpl;
2b5fb1f2 3854 int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid));
56d36be4
DM
3855 int port = adap->chan_map[chan];
3856 struct port_info *pi = adap2pinfo(adap, port);
3857 struct link_config *lc = &pi->link_cfg;
3858 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2b5fb1f2
HS
3859 int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
3860 u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
56d36be4 3861
2b5fb1f2 3862 if (stat & FW_PORT_CMD_RXPAUSE_F)
56d36be4 3863 fc |= PAUSE_RX;
2b5fb1f2 3864 if (stat & FW_PORT_CMD_TXPAUSE_F)
56d36be4 3865 fc |= PAUSE_TX;
2b5fb1f2 3866 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
e8b39015 3867 speed = 100;
2b5fb1f2 3868 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
e8b39015 3869 speed = 1000;
2b5fb1f2 3870 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
e8b39015 3871 speed = 10000;
2b5fb1f2 3872 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
e8b39015 3873 speed = 40000;
56d36be4
DM
3874
3875 if (link_ok != lc->link_ok || speed != lc->speed ||
3876 fc != lc->fc) { /* something changed */
3877 lc->link_ok = link_ok;
3878 lc->speed = speed;
3879 lc->fc = fc;
444018a7 3880 lc->supported = be16_to_cpu(p->u.info.pcap);
56d36be4
DM
3881 t4_os_link_changed(adap, port, link_ok);
3882 }
3883 if (mod != pi->mod_type) {
3884 pi->mod_type = mod;
3885 t4_os_portmod_changed(adap, port);
3886 }
3887 }
3888 return 0;
3889}
3890
1dd06ae8 3891static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
56d36be4
DM
3892{
3893 u16 val;
56d36be4 3894
e5c8ae5f
JL
3895 if (pci_is_pcie(adapter->pdev)) {
3896 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
56d36be4
DM
3897 p->speed = val & PCI_EXP_LNKSTA_CLS;
3898 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3899 }
3900}
3901
3902/**
3903 * init_link_config - initialize a link's SW state
3904 * @lc: structure holding the link state
3905 * @caps: link capabilities
3906 *
3907 * Initializes the SW state maintained for each link, including the link's
3908 * capabilities and default speed/flow-control/autonegotiation settings.
3909 */
1dd06ae8 3910static void init_link_config(struct link_config *lc, unsigned int caps)
56d36be4
DM
3911{
3912 lc->supported = caps;
3913 lc->requested_speed = 0;
3914 lc->speed = 0;
3915 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3916 if (lc->supported & FW_PORT_CAP_ANEG) {
3917 lc->advertising = lc->supported & ADVERT_MASK;
3918 lc->autoneg = AUTONEG_ENABLE;
3919 lc->requested_fc |= PAUSE_AUTONEG;
3920 } else {
3921 lc->advertising = 0;
3922 lc->autoneg = AUTONEG_DISABLE;
3923 }
3924}
3925
8203b509
HS
3926#define CIM_PF_NOACCESS 0xeeeeeeee
3927
3928int t4_wait_dev_ready(void __iomem *regs)
56d36be4 3929{
8203b509
HS
3930 u32 whoami;
3931
0d804338 3932 whoami = readl(regs + PL_WHOAMI_A);
8203b509 3933 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
56d36be4 3934 return 0;
8203b509 3935
56d36be4 3936 msleep(500);
0d804338 3937 whoami = readl(regs + PL_WHOAMI_A);
8203b509 3938 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
56d36be4
DM
3939}
3940
fe2ee139
HS
3941struct flash_desc {
3942 u32 vendor_and_model_id;
3943 u32 size_mb;
3944};
3945
91744948 3946static int get_flash_params(struct adapter *adap)
900a6596 3947{
fe2ee139
HS
3948 /* Table for non-Numonix supported flash parts. Numonix parts are left
3949 * to the preexisting code. All flash parts have 64KB sectors.
3950 */
3951 static struct flash_desc supported_flash[] = {
3952 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
3953 };
3954
900a6596
DM
3955 int ret;
3956 u32 info;
3957
3958 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3959 if (!ret)
3960 ret = sf1_read(adap, 3, 0, 1, &info);
0d804338 3961 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
900a6596
DM
3962 if (ret)
3963 return ret;
3964
fe2ee139
HS
3965 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
3966 if (supported_flash[ret].vendor_and_model_id == info) {
3967 adap->params.sf_size = supported_flash[ret].size_mb;
3968 adap->params.sf_nsec =
3969 adap->params.sf_size / SF_SEC_SIZE;
3970 return 0;
3971 }
3972
900a6596
DM
3973 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3974 return -EINVAL;
3975 info >>= 16; /* log2 of size */
3976 if (info >= 0x14 && info < 0x18)
3977 adap->params.sf_nsec = 1 << (info - 16);
3978 else if (info == 0x18)
3979 adap->params.sf_nsec = 64;
3980 else
3981 return -EINVAL;
3982 adap->params.sf_size = 1 << info;
3983 adap->params.sf_fw_start =
89c3a86c 3984 t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
c290607e
HS
3985
3986 if (adap->params.sf_size < FLASH_MIN_SIZE)
3987 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
3988 adap->params.sf_size, FLASH_MIN_SIZE);
900a6596
DM
3989 return 0;
3990}
3991
56d36be4
DM
3992/**
3993 * t4_prep_adapter - prepare SW and HW for operation
3994 * @adapter: the adapter
3995 * @reset: if true perform a HW reset
3996 *
3997 * Initialize adapter SW state for the various HW modules, set initial
3998 * values for some adapter tunables, take PHYs out of reset, and
3999 * initialize the MDIO interface.
4000 */
91744948 4001int t4_prep_adapter(struct adapter *adapter)
56d36be4 4002{
0a57a536
SR
4003 int ret, ver;
4004 uint16_t device_id;
d14807dd 4005 u32 pl_rev;
56d36be4 4006
56d36be4 4007 get_pci_mode(adapter, &adapter->params.pci);
0d804338 4008 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
56d36be4 4009
900a6596
DM
4010 ret = get_flash_params(adapter);
4011 if (ret < 0) {
4012 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
4013 return ret;
4014 }
4015
0a57a536
SR
4016 /* Retrieve adapter's device ID
4017 */
4018 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
4019 ver = device_id >> 12;
d14807dd 4020 adapter->params.chip = 0;
0a57a536
SR
4021 switch (ver) {
4022 case CHELSIO_T4:
d14807dd 4023 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
0a57a536
SR
4024 break;
4025 case CHELSIO_T5:
d14807dd 4026 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
0a57a536
SR
4027 break;
4028 default:
4029 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4030 device_id);
4031 return -EINVAL;
4032 }
4033
56d36be4
DM
4034 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4035
4036 /*
4037 * Default port for debugging in case we can't reach FW.
4038 */
4039 adapter->params.nports = 1;
4040 adapter->params.portvec = 1;
636f9d37 4041 adapter->params.vpd.cclk = 50000;
56d36be4
DM
4042 return 0;
4043}
4044
e85c9a7a 4045/**
dd0bcc0b 4046 * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
e85c9a7a
HS
4047 * @adapter: the adapter
4048 * @qid: the Queue ID
4049 * @qtype: the Ingress or Egress type for @qid
4050 * @pbar2_qoffset: BAR2 Queue Offset
4051 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4052 *
4053 * Returns the BAR2 SGE Queue Registers information associated with the
4054 * indicated Absolute Queue ID. These are passed back in return value
4055 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
4056 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
4057 *
4058 * This may return an error which indicates that BAR2 SGE Queue
4059 * registers aren't available. If an error is not returned, then the
4060 * following values are returned:
4061 *
4062 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
4063 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
4064 *
4065 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
4066 * require the "Inferred Queue ID" ability may be used. E.g. the
4067 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
4068 * then these "Inferred Queue ID" register may not be used.
4069 */
dd0bcc0b 4070int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
e85c9a7a
HS
4071 unsigned int qid,
4072 enum t4_bar2_qtype qtype,
4073 u64 *pbar2_qoffset,
4074 unsigned int *pbar2_qid)
4075{
4076 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
4077 u64 bar2_page_offset, bar2_qoffset;
4078 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
4079
4080 /* T4 doesn't support BAR2 SGE Queue registers.
4081 */
4082 if (is_t4(adapter->params.chip))
4083 return -EINVAL;
4084
4085 /* Get our SGE Page Size parameters.
4086 */
4087 page_shift = adapter->params.sge.hps + 10;
4088 page_size = 1 << page_shift;
4089
4090 /* Get the right Queues per Page parameters for our Queue.
4091 */
4092 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
4093 ? adapter->params.sge.eq_qpp
4094 : adapter->params.sge.iq_qpp);
4095 qpp_mask = (1 << qpp_shift) - 1;
4096
4097 /* Calculate the basics of the BAR2 SGE Queue register area:
4098 * o The BAR2 page the Queue registers will be in.
4099 * o The BAR2 Queue ID.
4100 * o The BAR2 Queue ID Offset into the BAR2 page.
4101 */
4102 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
4103 bar2_qid = qid & qpp_mask;
4104 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
4105
4106 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
4107 * hardware will infer the Absolute Queue ID simply from the writes to
4108 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
4109 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
4110 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
4111 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
4112 * from the BAR2 Page and BAR2 Queue ID.
4113 *
4114 * One important censequence of this is that some BAR2 SGE registers
4115 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
4116 * there. But other registers synthesize the SGE Queue ID purely
4117 * from the writes to the registers -- the Write Combined Doorbell
4118 * Buffer is a good example. These BAR2 SGE Registers are only
4119 * available for those BAR2 SGE Register areas where the SGE Absolute
4120 * Queue ID can be inferred from simple writes.
4121 */
4122 bar2_qoffset = bar2_page_offset;
4123 bar2_qinferred = (bar2_qid_offset < page_size);
4124 if (bar2_qinferred) {
4125 bar2_qoffset += bar2_qid_offset;
4126 bar2_qid = 0;
4127 }
4128
4129 *pbar2_qoffset = bar2_qoffset;
4130 *pbar2_qid = bar2_qid;
4131 return 0;
4132}
4133
4134/**
4135 * t4_init_sge_params - initialize adap->params.sge
4136 * @adapter: the adapter
4137 *
4138 * Initialize various fields of the adapter's SGE Parameters structure.
4139 */
4140int t4_init_sge_params(struct adapter *adapter)
4141{
4142 struct sge_params *sge_params = &adapter->params.sge;
4143 u32 hps, qpp;
4144 unsigned int s_hps, s_qpp;
4145
4146 /* Extract the SGE Page Size for our PF.
4147 */
f612b815 4148 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
e85c9a7a
HS
4149 s_hps = (HOSTPAGESIZEPF0_S +
4150 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
4151 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
4152
4153 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
4154 */
4155 s_qpp = (QUEUESPERPAGEPF0_S +
4156 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
f612b815
HS
4157 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
4158 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
f061de42 4159 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
f612b815 4160 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
e85c9a7a
HS
4161
4162 return 0;
4163}
4164
dcf7b6f5
KS
4165/**
4166 * t4_init_tp_params - initialize adap->params.tp
4167 * @adap: the adapter
4168 *
4169 * Initialize various fields of the adapter's TP Parameters structure.
4170 */
4171int t4_init_tp_params(struct adapter *adap)
4172{
4173 int chan;
4174 u32 v;
4175
837e4a42
HS
4176 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
4177 adap->params.tp.tre = TIMERRESOLUTION_G(v);
4178 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
dcf7b6f5
KS
4179
4180 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4181 for (chan = 0; chan < NCHAN; chan++)
4182 adap->params.tp.tx_modq[chan] = chan;
4183
4184 /* Cache the adapter's Compressed Filter Mode and global Incress
4185 * Configuration.
4186 */
837e4a42 4187 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
dcf7b6f5 4188 &adap->params.tp.vlan_pri_map, 1,
837e4a42
HS
4189 TP_VLAN_PRI_MAP_A);
4190 t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
dcf7b6f5 4191 &adap->params.tp.ingress_config, 1,
837e4a42 4192 TP_INGRESS_CONFIG_A);
dcf7b6f5
KS
4193
4194 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
4195 * shift positions of several elements of the Compressed Filter Tuple
4196 * for this adapter which we need frequently ...
4197 */
0d804338
HS
4198 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
4199 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
4200 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
dcf7b6f5 4201 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
0d804338 4202 PROTOCOL_F);
dcf7b6f5
KS
4203
4204 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4205 * represents the presense of an Outer VLAN instead of a VNIC ID.
4206 */
0d804338 4207 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
dcf7b6f5
KS
4208 adap->params.tp.vnic_shift = -1;
4209
4210 return 0;
4211}
4212
4213/**
4214 * t4_filter_field_shift - calculate filter field shift
4215 * @adap: the adapter
4216 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
4217 *
4218 * Return the shift position of a filter field within the Compressed
4219 * Filter Tuple. The filter field is specified via its selection bit
4220 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
4221 */
4222int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
4223{
4224 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
4225 unsigned int sel;
4226 int field_shift;
4227
4228 if ((filter_mode & filter_sel) == 0)
4229 return -1;
4230
4231 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4232 switch (filter_mode & sel) {
0d804338
HS
4233 case FCOE_F:
4234 field_shift += FT_FCOE_W;
dcf7b6f5 4235 break;
0d804338
HS
4236 case PORT_F:
4237 field_shift += FT_PORT_W;
dcf7b6f5 4238 break;
0d804338
HS
4239 case VNIC_ID_F:
4240 field_shift += FT_VNIC_ID_W;
dcf7b6f5 4241 break;
0d804338
HS
4242 case VLAN_F:
4243 field_shift += FT_VLAN_W;
dcf7b6f5 4244 break;
0d804338
HS
4245 case TOS_F:
4246 field_shift += FT_TOS_W;
dcf7b6f5 4247 break;
0d804338
HS
4248 case PROTOCOL_F:
4249 field_shift += FT_PROTOCOL_W;
dcf7b6f5 4250 break;
0d804338
HS
4251 case ETHERTYPE_F:
4252 field_shift += FT_ETHERTYPE_W;
dcf7b6f5 4253 break;
0d804338
HS
4254 case MACMATCH_F:
4255 field_shift += FT_MACMATCH_W;
dcf7b6f5 4256 break;
0d804338
HS
4257 case MPSHITTYPE_F:
4258 field_shift += FT_MPSHITTYPE_W;
dcf7b6f5 4259 break;
0d804338
HS
4260 case FRAGMENTATION_F:
4261 field_shift += FT_FRAGMENTATION_W;
dcf7b6f5
KS
4262 break;
4263 }
4264 }
4265 return field_shift;
4266}
4267
91744948 4268int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
56d36be4
DM
4269{
4270 u8 addr[6];
4271 int ret, i, j = 0;
4272 struct fw_port_cmd c;
f796564a 4273 struct fw_rss_vi_config_cmd rvc;
56d36be4
DM
4274
4275 memset(&c, 0, sizeof(c));
f796564a 4276 memset(&rvc, 0, sizeof(rvc));
56d36be4
DM
4277
4278 for_each_port(adap, i) {
4279 unsigned int rss_size;
4280 struct port_info *p = adap2pinfo(adap, i);
4281
4282 while ((adap->params.portvec & (1 << j)) == 0)
4283 j++;
4284
e2ac9628
HS
4285 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
4286 FW_CMD_REQUEST_F | FW_CMD_READ_F |
2b5fb1f2 4287 FW_PORT_CMD_PORTID_V(j));
56d36be4 4288 c.action_to_len16 = htonl(
2b5fb1f2 4289 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
56d36be4
DM
4290 FW_LEN16(c));
4291 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4292 if (ret)
4293 return ret;
4294
4295 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4296 if (ret < 0)
4297 return ret;
4298
4299 p->viid = ret;
4300 p->tx_chan = j;
4301 p->lport = j;
4302 p->rss_size = rss_size;
4303 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
40c9f8ab 4304 adap->port[i]->dev_port = j;
56d36be4
DM
4305
4306 ret = ntohl(c.u.info.lstatus_to_modtype);
2b5fb1f2
HS
4307 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
4308 FW_PORT_CMD_MDIOADDR_G(ret) : -1;
4309 p->port_type = FW_PORT_CMD_PTYPE_G(ret);
a0881cab 4310 p->mod_type = FW_PORT_MOD_TYPE_NA;
56d36be4 4311
e2ac9628
HS
4312 rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
4313 FW_CMD_REQUEST_F | FW_CMD_READ_F |
f796564a
DM
4314 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4315 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4316 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4317 if (ret)
4318 return ret;
4319 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4320
56d36be4
DM
4321 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4322 j++;
4323 }
4324 return 0;
4325}