RDMA/cxgb4/cxgb4vf/csiostor: Cleanup SGE register defines
[linux-block.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
ce100b8b 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
56d36be4
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
56d36be4
DM
35#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
f612b815 38#include "t4_values.h"
56d36be4
DM
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
de498c89
RD
56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
58{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
f2b7e78d 112void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
de498c89
RD
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
13ee15d3
VP
123/**
124 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter
126 * @addr_reg: register holding the indirect addresses
127 * @data_reg: register holding the value for the indirect registers
128 * @vals: values to write
129 * @nregs: how many indirect registers to write
130 * @start_idx: address of first indirect register to write
131 *
132 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair.
134 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx)
138{
139 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++);
142 }
143}
144
0abfd152
HS
145/*
146 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
147 * mechanism. This guarantees that we get the real value even if we're
148 * operating within a Virtual Machine and the Hypervisor is trapping our
149 * Configuration Space accesses.
150 */
151void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
152{
153 u32 req = ENABLE | FUNCTION(adap->fn) | reg;
154
155 if (is_t4(adap->params.chip))
156 req |= F_LOCALCFG;
157
158 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
159 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
160
161 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
162 * Configuration Space read. (None of the other fields matter when
163 * ENABLE is 0 so a simple register write is easier than a
164 * read-modify-write via t4_set_reg_field().)
165 */
166 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
167}
168
31d55c2d
HS
169/*
170 * t4_report_fw_error - report firmware error
171 * @adap: the adapter
172 *
173 * The adapter firmware can indicate error conditions to the host.
174 * If the firmware has indicated an error, print out the reason for
175 * the firmware error.
176 */
177static void t4_report_fw_error(struct adapter *adap)
178{
179 static const char *const reason[] = {
180 "Crash", /* PCIE_FW_EVAL_CRASH */
181 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
182 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
183 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
184 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
185 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
186 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
187 "Reserved", /* reserved */
188 };
189 u32 pcie_fw;
190
191 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
b2e1a3f0 192 if (pcie_fw & PCIE_FW_ERR)
31d55c2d 193 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
b2e1a3f0 194 reason[PCIE_FW_EVAL_G(pcie_fw)]);
31d55c2d
HS
195}
196
56d36be4
DM
197/*
198 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
199 */
200static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
201 u32 mbox_addr)
202{
203 for ( ; nflit; nflit--, mbox_addr += 8)
204 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
205}
206
207/*
208 * Handle a FW assertion reported in a mailbox.
209 */
210static void fw_asrt(struct adapter *adap, u32 mbox_addr)
211{
212 struct fw_debug_cmd asrt;
213
214 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
215 dev_alert(adap->pdev_dev,
216 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
217 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
218 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
219}
220
221static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
222{
223 dev_err(adap->pdev_dev,
224 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
225 (unsigned long long)t4_read_reg64(adap, data_reg),
226 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
227 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
228 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
229 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
230 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
231 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
232 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
233}
234
235/**
236 * t4_wr_mbox_meat - send a command to FW through the given mailbox
237 * @adap: the adapter
238 * @mbox: index of the mailbox to use
239 * @cmd: the command to write
240 * @size: command length in bytes
241 * @rpl: where to optionally store the reply
242 * @sleep_ok: if true we may sleep while awaiting command completion
243 *
244 * Sends the given command to FW through the selected mailbox and waits
245 * for the FW to execute the command. If @rpl is not %NULL it is used to
246 * store the FW's reply to the command. The command and its optional
247 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
248 * to respond. @sleep_ok determines whether we may sleep while awaiting
249 * the response. If sleeping is allowed we use progressive backoff
250 * otherwise we spin.
251 *
252 * The return value is 0 on success or a negative errno on failure. A
253 * failure can happen either because we are not able to execute the
254 * command or FW executes it but signals an error. In the latter case
255 * the return value is the error code indicated by FW (negated).
256 */
257int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
258 void *rpl, bool sleep_ok)
259{
005b5717 260 static const int delay[] = {
56d36be4
DM
261 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
262 };
263
264 u32 v;
265 u64 res;
266 int i, ms, delay_idx;
267 const __be64 *p = cmd;
268 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
269 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
270
271 if ((size & 15) || size > MBOX_LEN)
272 return -EINVAL;
273
204dc3c0
DM
274 /*
275 * If the device is off-line, as in EEH, commands will time out.
276 * Fail them early so we don't waste time waiting.
277 */
278 if (adap->pdev->error_state != pci_channel_io_normal)
279 return -EIO;
280
56d36be4
DM
281 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
282 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
283 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
284
285 if (v != MBOX_OWNER_DRV)
286 return v ? -EBUSY : -ETIMEDOUT;
287
288 for (i = 0; i < size; i += 8)
289 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
290
291 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
292 t4_read_reg(adap, ctl_reg); /* flush write */
293
294 delay_idx = 0;
295 ms = delay[0];
296
297 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
298 if (sleep_ok) {
299 ms = delay[delay_idx]; /* last element may repeat */
300 if (delay_idx < ARRAY_SIZE(delay) - 1)
301 delay_idx++;
302 msleep(ms);
303 } else
304 mdelay(ms);
305
306 v = t4_read_reg(adap, ctl_reg);
307 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
308 if (!(v & MBMSGVALID)) {
309 t4_write_reg(adap, ctl_reg, 0);
310 continue;
311 }
312
313 res = t4_read_reg64(adap, data_reg);
e2ac9628 314 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
56d36be4 315 fw_asrt(adap, data_reg);
e2ac9628
HS
316 res = FW_CMD_RETVAL_V(EIO);
317 } else if (rpl) {
56d36be4 318 get_mbox_rpl(adap, rpl, size / 8, data_reg);
e2ac9628 319 }
56d36be4 320
e2ac9628 321 if (FW_CMD_RETVAL_G((int)res))
56d36be4
DM
322 dump_mbox(adap, mbox, data_reg);
323 t4_write_reg(adap, ctl_reg, 0);
e2ac9628 324 return -FW_CMD_RETVAL_G((int)res);
56d36be4
DM
325 }
326 }
327
328 dump_mbox(adap, mbox, data_reg);
329 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
330 *(const u8 *)cmd, mbox);
31d55c2d 331 t4_report_fw_error(adap);
56d36be4
DM
332 return -ETIMEDOUT;
333}
334
335/**
336 * t4_mc_read - read from MC through backdoor accesses
337 * @adap: the adapter
338 * @addr: address of first byte requested
19dd37ba 339 * @idx: which MC to access
56d36be4
DM
340 * @data: 64 bytes of data containing the requested address
341 * @ecc: where to store the corresponding 64-bit ECC word
342 *
343 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
344 * that covers the requested address @addr. If @parity is not %NULL it
345 * is assigned the 64-bit ECC word for the read data.
346 */
19dd37ba 347int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
56d36be4
DM
348{
349 int i;
19dd37ba
SR
350 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
351 u32 mc_bist_status_rdata, mc_bist_data_pattern;
56d36be4 352
d14807dd 353 if (is_t4(adap->params.chip)) {
19dd37ba
SR
354 mc_bist_cmd = MC_BIST_CMD;
355 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
356 mc_bist_cmd_len = MC_BIST_CMD_LEN;
357 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
358 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
359 } else {
360 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
361 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
362 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
363 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
364 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
365 }
366
367 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
56d36be4 368 return -EBUSY;
19dd37ba
SR
369 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
370 t4_write_reg(adap, mc_bist_cmd_len, 64);
371 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
372 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
56d36be4 373 BIST_CMD_GAP(1));
19dd37ba 374 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
56d36be4
DM
375 if (i)
376 return i;
377
19dd37ba 378#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
56d36be4
DM
379
380 for (i = 15; i >= 0; i--)
381 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
382 if (ecc)
383 *ecc = t4_read_reg64(adap, MC_DATA(16));
384#undef MC_DATA
385 return 0;
386}
387
388/**
389 * t4_edc_read - read from EDC through backdoor accesses
390 * @adap: the adapter
391 * @idx: which EDC to access
392 * @addr: address of first byte requested
393 * @data: 64 bytes of data containing the requested address
394 * @ecc: where to store the corresponding 64-bit ECC word
395 *
396 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
397 * that covers the requested address @addr. If @parity is not %NULL it
398 * is assigned the 64-bit ECC word for the read data.
399 */
400int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
401{
402 int i;
19dd37ba
SR
403 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
404 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
56d36be4 405
d14807dd 406 if (is_t4(adap->params.chip)) {
19dd37ba
SR
407 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
408 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
409 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
410 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
411 idx);
412 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
413 idx);
414 } else {
415 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
416 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
417 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
418 edc_bist_cmd_data_pattern =
419 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
420 edc_bist_status_rdata =
421 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
422 }
423
424 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
56d36be4 425 return -EBUSY;
19dd37ba
SR
426 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
427 t4_write_reg(adap, edc_bist_cmd_len, 64);
428 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
429 t4_write_reg(adap, edc_bist_cmd,
56d36be4 430 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
19dd37ba 431 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
56d36be4
DM
432 if (i)
433 return i;
434
19dd37ba 435#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
56d36be4
DM
436
437 for (i = 15; i >= 0; i--)
438 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
439 if (ecc)
440 *ecc = t4_read_reg64(adap, EDC_DATA(16));
441#undef EDC_DATA
442 return 0;
443}
444
5afc8b84
VP
445/**
446 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
447 * @adap: the adapter
fc5ab020 448 * @win: PCI-E Memory Window to use
5afc8b84
VP
449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
450 * @addr: address within indicated memory type
451 * @len: amount of memory to transfer
452 * @buf: host memory buffer
fc5ab020 453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5afc8b84
VP
454 *
455 * Reads/writes an [almost] arbitrary memory region in the firmware: the
fc5ab020
HS
456 * firmware memory address and host buffer must be aligned on 32-bit
457 * boudaries; the length may be arbitrary. The memory is transferred as
458 * a raw byte sequence from/to the firmware's memory. If this memory
459 * contains data structures which contain multi-byte integers, it's the
460 * caller's responsibility to perform appropriate byte order conversions.
5afc8b84 461 */
fc5ab020
HS
462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
463 u32 len, __be32 *buf, int dir)
5afc8b84 464{
fc5ab020
HS
465 u32 pos, offset, resid, memoffset;
466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
5afc8b84 467
fc5ab020 468 /* Argument sanity checks ...
5afc8b84 469 */
fc5ab020 470 if (addr & 0x3)
5afc8b84
VP
471 return -EINVAL;
472
fc5ab020
HS
473 /* It's convenient to be able to handle lengths which aren't a
474 * multiple of 32-bits because we often end up transferring files to
475 * the firmware. So we'll handle that by normalizing the length here
476 * and then handling any residual transfer at the end.
477 */
478 resid = len & 0x3;
479 len -= resid;
8c357ebd 480
19dd37ba 481 /* Offset into the region of memory which is being accessed
5afc8b84
VP
482 * MEM_EDC0 = 0
483 * MEM_EDC1 = 1
19dd37ba
SR
484 * MEM_MC = 2 -- T4
485 * MEM_MC0 = 2 -- For T5
486 * MEM_MC1 = 3 -- For T5
5afc8b84 487 */
6559a7e8 488 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
19dd37ba
SR
489 if (mtype != MEM_MC1)
490 memoffset = (mtype * (edc_size * 1024 * 1024));
491 else {
6559a7e8
HS
492 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
493 MA_EXT_MEMORY1_BAR_A));
19dd37ba
SR
494 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
495 }
5afc8b84
VP
496
497 /* Determine the PCIE_MEM_ACCESS_OFFSET */
498 addr = addr + memoffset;
499
fc5ab020
HS
500 /* Each PCI-E Memory Window is programmed with a window size -- or
501 * "aperture" -- which controls the granularity of its mapping onto
502 * adapter memory. We need to grab that aperture in order to know
503 * how to use the specified window. The window is also programmed
504 * with the base address of the Memory Window in BAR0's address
505 * space. For T4 this is an absolute PCI-E Bus Address. For T5
506 * the address is relative to BAR0.
5afc8b84 507 */
fc5ab020
HS
508 mem_reg = t4_read_reg(adap,
509 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
510 win));
511 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
512 mem_base = GET_PCIEOFST(mem_reg) << 10;
513 if (is_t4(adap->params.chip))
514 mem_base -= adap->t4_bar0;
515 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
5afc8b84 516
fc5ab020
HS
517 /* Calculate our initial PCI-E Memory Window Position and Offset into
518 * that Window.
519 */
520 pos = addr & ~(mem_aperture-1);
521 offset = addr - pos;
5afc8b84 522
fc5ab020
HS
523 /* Set up initial PCI-E Memory Window to cover the start of our
524 * transfer. (Read it back to ensure that changes propagate before we
525 * attempt to use the new value.)
526 */
527 t4_write_reg(adap,
528 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
529 pos | win_pf);
530 t4_read_reg(adap,
531 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
532
533 /* Transfer data to/from the adapter as long as there's an integral
534 * number of 32-bit transfers to complete.
535 */
536 while (len > 0) {
537 if (dir == T4_MEMORY_READ)
538 *buf++ = (__force __be32) t4_read_reg(adap,
539 mem_base + offset);
540 else
541 t4_write_reg(adap, mem_base + offset,
542 (__force u32) *buf++);
543 offset += sizeof(__be32);
544 len -= sizeof(__be32);
545
546 /* If we've reached the end of our current window aperture,
547 * move the PCI-E Memory Window on to the next. Note that
548 * doing this here after "len" may be 0 allows us to set up
549 * the PCI-E Memory Window for a possible final residual
550 * transfer below ...
5afc8b84 551 */
fc5ab020
HS
552 if (offset == mem_aperture) {
553 pos += mem_aperture;
554 offset = 0;
555 t4_write_reg(adap,
556 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
557 win), pos | win_pf);
558 t4_read_reg(adap,
559 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
560 win));
5afc8b84 561 }
5afc8b84
VP
562 }
563
fc5ab020
HS
564 /* If the original transfer had a length which wasn't a multiple of
565 * 32-bits, now's where we need to finish off the transfer of the
566 * residual amount. The PCI-E Memory Window has already been moved
567 * above (if necessary) to cover this final transfer.
568 */
569 if (resid) {
570 union {
571 __be32 word;
572 char byte[4];
573 } last;
574 unsigned char *bp;
575 int i;
576
c81576c2 577 if (dir == T4_MEMORY_READ) {
fc5ab020
HS
578 last.word = (__force __be32) t4_read_reg(adap,
579 mem_base + offset);
580 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
581 bp[i] = last.byte[i];
582 } else {
583 last.word = *buf;
584 for (i = resid; i < 4; i++)
585 last.byte[i] = 0;
586 t4_write_reg(adap, mem_base + offset,
587 (__force u32) last.word);
588 }
589 }
5afc8b84 590
fc5ab020 591 return 0;
5afc8b84
VP
592}
593
56d36be4 594#define EEPROM_STAT_ADDR 0x7bfc
47ce9c48
SR
595#define VPD_BASE 0x400
596#define VPD_BASE_OLD 0
0a57a536 597#define VPD_LEN 1024
63a92fe6 598#define CHELSIO_VPD_UNIQUE_ID 0x82
56d36be4
DM
599
600/**
601 * t4_seeprom_wp - enable/disable EEPROM write protection
602 * @adapter: the adapter
603 * @enable: whether to enable or disable write protection
604 *
605 * Enables or disables write protection on the serial EEPROM.
606 */
607int t4_seeprom_wp(struct adapter *adapter, bool enable)
608{
609 unsigned int v = enable ? 0xc : 0;
610 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
611 return ret < 0 ? ret : 0;
612}
613
614/**
615 * get_vpd_params - read VPD parameters from VPD EEPROM
616 * @adapter: adapter to read
617 * @p: where to store the parameters
618 *
619 * Reads card parameters stored in VPD EEPROM.
620 */
636f9d37 621int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
56d36be4 622{
636f9d37 623 u32 cclk_param, cclk_val;
47ce9c48 624 int i, ret, addr;
a94cd705 625 int ec, sn, pn;
8c357ebd 626 u8 *vpd, csum;
23d88e1d 627 unsigned int vpdr_len, kw_offset, id_len;
56d36be4 628
8c357ebd
VP
629 vpd = vmalloc(VPD_LEN);
630 if (!vpd)
631 return -ENOMEM;
632
47ce9c48
SR
633 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
634 if (ret < 0)
635 goto out;
63a92fe6
HS
636
637 /* The VPD shall have a unique identifier specified by the PCI SIG.
638 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
639 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
640 * is expected to automatically put this entry at the
641 * beginning of the VPD.
642 */
643 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
47ce9c48
SR
644
645 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
56d36be4 646 if (ret < 0)
8c357ebd 647 goto out;
56d36be4 648
23d88e1d
DM
649 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
650 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
8c357ebd
VP
651 ret = -EINVAL;
652 goto out;
23d88e1d
DM
653 }
654
655 id_len = pci_vpd_lrdt_size(vpd);
656 if (id_len > ID_LEN)
657 id_len = ID_LEN;
658
659 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
660 if (i < 0) {
661 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
8c357ebd
VP
662 ret = -EINVAL;
663 goto out;
23d88e1d
DM
664 }
665
666 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
667 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
668 if (vpdr_len + kw_offset > VPD_LEN) {
226ec5fd 669 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
8c357ebd
VP
670 ret = -EINVAL;
671 goto out;
226ec5fd
DM
672 }
673
674#define FIND_VPD_KW(var, name) do { \
23d88e1d 675 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
226ec5fd
DM
676 if (var < 0) { \
677 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
8c357ebd
VP
678 ret = -EINVAL; \
679 goto out; \
226ec5fd
DM
680 } \
681 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
682} while (0)
683
684 FIND_VPD_KW(i, "RV");
685 for (csum = 0; i >= 0; i--)
686 csum += vpd[i];
56d36be4
DM
687
688 if (csum) {
689 dev_err(adapter->pdev_dev,
690 "corrupted VPD EEPROM, actual csum %u\n", csum);
8c357ebd
VP
691 ret = -EINVAL;
692 goto out;
56d36be4
DM
693 }
694
226ec5fd
DM
695 FIND_VPD_KW(ec, "EC");
696 FIND_VPD_KW(sn, "SN");
a94cd705 697 FIND_VPD_KW(pn, "PN");
226ec5fd
DM
698#undef FIND_VPD_KW
699
23d88e1d 700 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
56d36be4 701 strim(p->id);
226ec5fd 702 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 703 strim(p->ec);
226ec5fd
DM
704 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
705 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4 706 strim(p->sn);
63a92fe6 707 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
a94cd705
KS
708 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
709 strim(p->pn);
636f9d37
VP
710
711 /*
712 * Ask firmware for the Core Clock since it knows how to translate the
713 * Reference Clock ('V2') VPD field into a Core Clock value ...
714 */
5167865a
HS
715 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
716 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
636f9d37
VP
717 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
718 1, &cclk_param, &cclk_val);
8c357ebd
VP
719
720out:
721 vfree(vpd);
636f9d37
VP
722 if (ret)
723 return ret;
724 p->cclk = cclk_val;
725
56d36be4
DM
726 return 0;
727}
728
729/* serial flash and firmware constants */
730enum {
731 SF_ATTEMPTS = 10, /* max retries for SF operations */
732
733 /* flash command opcodes */
734 SF_PROG_PAGE = 2, /* program page */
735 SF_WR_DISABLE = 4, /* disable writes */
736 SF_RD_STATUS = 5, /* read status register */
737 SF_WR_ENABLE = 6, /* enable writes */
738 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 739 SF_RD_ID = 0x9f, /* read ID */
56d36be4
DM
740 SF_ERASE_SECTOR = 0xd8, /* erase sector */
741
6f1d7210 742 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
56d36be4
DM
743};
744
745/**
746 * sf1_read - read data from the serial flash
747 * @adapter: the adapter
748 * @byte_cnt: number of bytes to read
749 * @cont: whether another operation will be chained
750 * @lock: whether to lock SF for PL access only
751 * @valp: where to store the read data
752 *
753 * Reads up to 4 bytes of data from the serial flash. The location of
754 * the read needs to be specified prior to calling this by issuing the
755 * appropriate commands to the serial flash.
756 */
757static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
758 int lock, u32 *valp)
759{
760 int ret;
761
762 if (!byte_cnt || byte_cnt > 4)
763 return -EINVAL;
ce91a923 764 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
56d36be4
DM
765 return -EBUSY;
766 cont = cont ? SF_CONT : 0;
767 lock = lock ? SF_LOCK : 0;
768 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
ce91a923 769 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
56d36be4
DM
770 if (!ret)
771 *valp = t4_read_reg(adapter, SF_DATA);
772 return ret;
773}
774
775/**
776 * sf1_write - write data to the serial flash
777 * @adapter: the adapter
778 * @byte_cnt: number of bytes to write
779 * @cont: whether another operation will be chained
780 * @lock: whether to lock SF for PL access only
781 * @val: value to write
782 *
783 * Writes up to 4 bytes of data to the serial flash. The location of
784 * the write needs to be specified prior to calling this by issuing the
785 * appropriate commands to the serial flash.
786 */
787static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
788 int lock, u32 val)
789{
790 if (!byte_cnt || byte_cnt > 4)
791 return -EINVAL;
ce91a923 792 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
56d36be4
DM
793 return -EBUSY;
794 cont = cont ? SF_CONT : 0;
795 lock = lock ? SF_LOCK : 0;
796 t4_write_reg(adapter, SF_DATA, val);
797 t4_write_reg(adapter, SF_OP, lock |
798 cont | BYTECNT(byte_cnt - 1) | OP_WR);
ce91a923 799 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
56d36be4
DM
800}
801
802/**
803 * flash_wait_op - wait for a flash operation to complete
804 * @adapter: the adapter
805 * @attempts: max number of polls of the status register
806 * @delay: delay between polls in ms
807 *
808 * Wait for a flash operation to complete by polling the status register.
809 */
810static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
811{
812 int ret;
813 u32 status;
814
815 while (1) {
816 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
817 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
818 return ret;
819 if (!(status & 1))
820 return 0;
821 if (--attempts == 0)
822 return -EAGAIN;
823 if (delay)
824 msleep(delay);
825 }
826}
827
828/**
829 * t4_read_flash - read words from serial flash
830 * @adapter: the adapter
831 * @addr: the start address for the read
832 * @nwords: how many 32-bit words to read
833 * @data: where to store the read data
834 * @byte_oriented: whether to store data as bytes or as words
835 *
836 * Read the specified number of 32-bit words from the serial flash.
837 * If @byte_oriented is set the read data is stored as a byte array
838 * (i.e., big-endian), otherwise as 32-bit words in the platform's
839 * natural endianess.
840 */
de498c89
RD
841static int t4_read_flash(struct adapter *adapter, unsigned int addr,
842 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
843{
844 int ret;
845
900a6596 846 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
847 return -EINVAL;
848
849 addr = swab32(addr) | SF_RD_DATA_FAST;
850
851 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
852 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
853 return ret;
854
855 for ( ; nwords; nwords--, data++) {
856 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
857 if (nwords == 1)
858 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
859 if (ret)
860 return ret;
861 if (byte_oriented)
404d9e3f 862 *data = (__force __u32) (htonl(*data));
56d36be4
DM
863 }
864 return 0;
865}
866
867/**
868 * t4_write_flash - write up to a page of data to the serial flash
869 * @adapter: the adapter
870 * @addr: the start address to write
871 * @n: length of data to write in bytes
872 * @data: the data to write
873 *
874 * Writes up to a page of data (256 bytes) to the serial flash starting
875 * at the given address. All the data must be written to the same page.
876 */
877static int t4_write_flash(struct adapter *adapter, unsigned int addr,
878 unsigned int n, const u8 *data)
879{
880 int ret;
881 u32 buf[64];
882 unsigned int i, c, left, val, offset = addr & 0xff;
883
900a6596 884 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
885 return -EINVAL;
886
887 val = swab32(addr) | SF_PROG_PAGE;
888
889 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
890 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
891 goto unlock;
892
893 for (left = n; left; left -= c) {
894 c = min(left, 4U);
895 for (val = 0, i = 0; i < c; ++i)
896 val = (val << 8) + *data++;
897
898 ret = sf1_write(adapter, c, c != left, 1, val);
899 if (ret)
900 goto unlock;
901 }
900a6596 902 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
903 if (ret)
904 goto unlock;
905
906 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
907
908 /* Read the page to verify the write succeeded */
909 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
910 if (ret)
911 return ret;
912
913 if (memcmp(data - n, (u8 *)buf + offset, n)) {
914 dev_err(adapter->pdev_dev,
915 "failed to correctly write the flash page at %#x\n",
916 addr);
917 return -EIO;
918 }
919 return 0;
920
921unlock:
922 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
923 return ret;
924}
925
926/**
16e47624 927 * t4_get_fw_version - read the firmware version
56d36be4
DM
928 * @adapter: the adapter
929 * @vers: where to place the version
930 *
931 * Reads the FW version from flash.
932 */
16e47624 933int t4_get_fw_version(struct adapter *adapter, u32 *vers)
56d36be4 934{
16e47624
HS
935 return t4_read_flash(adapter, FLASH_FW_START +
936 offsetof(struct fw_hdr, fw_ver), 1,
937 vers, 0);
56d36be4
DM
938}
939
940/**
16e47624 941 * t4_get_tp_version - read the TP microcode version
56d36be4
DM
942 * @adapter: the adapter
943 * @vers: where to place the version
944 *
945 * Reads the TP microcode version from flash.
946 */
16e47624 947int t4_get_tp_version(struct adapter *adapter, u32 *vers)
56d36be4 948{
16e47624 949 return t4_read_flash(adapter, FLASH_FW_START +
900a6596 950 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
951 1, vers, 0);
952}
953
16e47624
HS
954/* Is the given firmware API compatible with the one the driver was compiled
955 * with?
56d36be4 956 */
16e47624 957static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
56d36be4 958{
56d36be4 959
16e47624
HS
960 /* short circuit if it's the exact same firmware version */
961 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
962 return 1;
56d36be4 963
16e47624
HS
964#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
965 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
966 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
967 return 1;
968#undef SAME_INTF
0a57a536 969
16e47624
HS
970 return 0;
971}
56d36be4 972
16e47624
HS
973/* The firmware in the filesystem is usable, but should it be installed?
974 * This routine explains itself in detail if it indicates the filesystem
975 * firmware should be installed.
976 */
977static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
978 int k, int c)
979{
980 const char *reason;
981
982 if (!card_fw_usable) {
983 reason = "incompatible or unusable";
984 goto install;
e69972f5
JH
985 }
986
16e47624
HS
987 if (k > c) {
988 reason = "older than the version supported with this driver";
989 goto install;
56d36be4
DM
990 }
991
16e47624
HS
992 return 0;
993
994install:
995 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
996 "installing firmware %u.%u.%u.%u on card.\n",
b2e1a3f0
HS
997 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
998 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
999 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
1000 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
56d36be4 1001
56d36be4
DM
1002 return 1;
1003}
1004
16e47624
HS
1005int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1006 const u8 *fw_data, unsigned int fw_size,
1007 struct fw_hdr *card_fw, enum dev_state state,
1008 int *reset)
1009{
1010 int ret, card_fw_usable, fs_fw_usable;
1011 const struct fw_hdr *fs_fw;
1012 const struct fw_hdr *drv_fw;
1013
1014 drv_fw = &fw_info->fw_hdr;
1015
1016 /* Read the header of the firmware on the card */
1017 ret = -t4_read_flash(adap, FLASH_FW_START,
1018 sizeof(*card_fw) / sizeof(uint32_t),
1019 (uint32_t *)card_fw, 1);
1020 if (ret == 0) {
1021 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
1022 } else {
1023 dev_err(adap->pdev_dev,
1024 "Unable to read card's firmware header: %d\n", ret);
1025 card_fw_usable = 0;
1026 }
1027
1028 if (fw_data != NULL) {
1029 fs_fw = (const void *)fw_data;
1030 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1031 } else {
1032 fs_fw = NULL;
1033 fs_fw_usable = 0;
1034 }
1035
1036 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1037 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1038 /* Common case: the firmware on the card is an exact match and
1039 * the filesystem one is an exact match too, or the filesystem
1040 * one is absent/incompatible.
1041 */
1042 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1043 should_install_fs_fw(adap, card_fw_usable,
1044 be32_to_cpu(fs_fw->fw_ver),
1045 be32_to_cpu(card_fw->fw_ver))) {
1046 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1047 fw_size, 0);
1048 if (ret != 0) {
1049 dev_err(adap->pdev_dev,
1050 "failed to install firmware: %d\n", ret);
1051 goto bye;
1052 }
1053
1054 /* Installed successfully, update the cached header too. */
1055 memcpy(card_fw, fs_fw, sizeof(*card_fw));
1056 card_fw_usable = 1;
1057 *reset = 0; /* already reset as part of load_fw */
1058 }
1059
1060 if (!card_fw_usable) {
1061 uint32_t d, c, k;
1062
1063 d = be32_to_cpu(drv_fw->fw_ver);
1064 c = be32_to_cpu(card_fw->fw_ver);
1065 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1066
1067 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1068 "chip state %d, "
1069 "driver compiled with %d.%d.%d.%d, "
1070 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1071 state,
b2e1a3f0
HS
1072 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
1073 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
1074 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
1075 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
1076 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
1077 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
16e47624
HS
1078 ret = EINVAL;
1079 goto bye;
1080 }
1081
1082 /* We're using whatever's on the card and it's known to be good. */
1083 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1084 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1085
1086bye:
1087 return ret;
1088}
1089
56d36be4
DM
1090/**
1091 * t4_flash_erase_sectors - erase a range of flash sectors
1092 * @adapter: the adapter
1093 * @start: the first sector to erase
1094 * @end: the last sector to erase
1095 *
1096 * Erases the sectors in the given inclusive range.
1097 */
1098static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1099{
1100 int ret = 0;
1101
c0d5b8cf
HS
1102 if (end >= adapter->params.sf_nsec)
1103 return -EINVAL;
1104
56d36be4
DM
1105 while (start <= end) {
1106 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1107 (ret = sf1_write(adapter, 4, 0, 1,
1108 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 1109 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
1110 dev_err(adapter->pdev_dev,
1111 "erase of flash sector %d failed, error %d\n",
1112 start, ret);
1113 break;
1114 }
1115 start++;
1116 }
1117 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
1118 return ret;
1119}
1120
636f9d37
VP
1121/**
1122 * t4_flash_cfg_addr - return the address of the flash configuration file
1123 * @adapter: the adapter
1124 *
1125 * Return the address within the flash where the Firmware Configuration
1126 * File is stored.
1127 */
1128unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1129{
1130 if (adapter->params.sf_size == 0x100000)
1131 return FLASH_FPGA_CFG_START;
1132 else
1133 return FLASH_CFG_START;
1134}
1135
79af221d
HS
1136/* Return TRUE if the specified firmware matches the adapter. I.e. T4
1137 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
1138 * and emit an error message for mismatched firmware to save our caller the
1139 * effort ...
1140 */
1141static bool t4_fw_matches_chip(const struct adapter *adap,
1142 const struct fw_hdr *hdr)
1143{
1144 /* The expression below will return FALSE for any unsupported adapter
1145 * which will keep us "honest" in the future ...
1146 */
1147 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
1148 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
1149 return true;
1150
1151 dev_err(adap->pdev_dev,
1152 "FW image (%d) is not suitable for this adapter (%d)\n",
1153 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
1154 return false;
1155}
1156
56d36be4
DM
1157/**
1158 * t4_load_fw - download firmware
1159 * @adap: the adapter
1160 * @fw_data: the firmware image to write
1161 * @size: image size
1162 *
1163 * Write the supplied firmware image to the card's serial flash.
1164 */
1165int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1166{
1167 u32 csum;
1168 int ret, addr;
1169 unsigned int i;
1170 u8 first_page[SF_PAGE_SIZE];
404d9e3f 1171 const __be32 *p = (const __be32 *)fw_data;
56d36be4 1172 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596
DM
1173 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1174 unsigned int fw_img_start = adap->params.sf_fw_start;
1175 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
56d36be4
DM
1176
1177 if (!size) {
1178 dev_err(adap->pdev_dev, "FW image has no data\n");
1179 return -EINVAL;
1180 }
1181 if (size & 511) {
1182 dev_err(adap->pdev_dev,
1183 "FW image size not multiple of 512 bytes\n");
1184 return -EINVAL;
1185 }
1186 if (ntohs(hdr->len512) * 512 != size) {
1187 dev_err(adap->pdev_dev,
1188 "FW image size differs from size in FW header\n");
1189 return -EINVAL;
1190 }
1191 if (size > FW_MAX_SIZE) {
1192 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1193 FW_MAX_SIZE);
1194 return -EFBIG;
1195 }
79af221d
HS
1196 if (!t4_fw_matches_chip(adap, hdr))
1197 return -EINVAL;
56d36be4
DM
1198
1199 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1200 csum += ntohl(p[i]);
1201
1202 if (csum != 0xffffffff) {
1203 dev_err(adap->pdev_dev,
1204 "corrupted firmware image, checksum %#x\n", csum);
1205 return -EINVAL;
1206 }
1207
900a6596
DM
1208 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1209 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
1210 if (ret)
1211 goto out;
1212
1213 /*
1214 * We write the correct version at the end so the driver can see a bad
1215 * version if the FW write fails. Start by writing a copy of the
1216 * first page with a bad version.
1217 */
1218 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1219 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
900a6596 1220 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
1221 if (ret)
1222 goto out;
1223
900a6596 1224 addr = fw_img_start;
56d36be4
DM
1225 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1226 addr += SF_PAGE_SIZE;
1227 fw_data += SF_PAGE_SIZE;
1228 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1229 if (ret)
1230 goto out;
1231 }
1232
1233 ret = t4_write_flash(adap,
900a6596 1234 fw_img_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
1235 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1236out:
1237 if (ret)
1238 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1239 ret);
dff04bce
HS
1240 else
1241 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
56d36be4
DM
1242 return ret;
1243}
1244
1245#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
72aca4bf
KS
1246 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1247 FW_PORT_CAP_ANEG)
56d36be4
DM
1248
1249/**
1250 * t4_link_start - apply link configuration to MAC/PHY
1251 * @phy: the PHY to setup
1252 * @mac: the MAC to setup
1253 * @lc: the requested link configuration
1254 *
1255 * Set up a port's MAC and PHY according to a desired link configuration.
1256 * - If the PHY can auto-negotiate first decide what to advertise, then
1257 * enable/disable auto-negotiation as desired, and reset.
1258 * - If the PHY does not auto-negotiate just reset it.
1259 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1260 * otherwise do it later based on the outcome of auto-negotiation.
1261 */
1262int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1263 struct link_config *lc)
1264{
1265 struct fw_port_cmd c;
2b5fb1f2 1266 unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
56d36be4
DM
1267
1268 lc->link_ok = 0;
1269 if (lc->requested_fc & PAUSE_RX)
1270 fc |= FW_PORT_CAP_FC_RX;
1271 if (lc->requested_fc & PAUSE_TX)
1272 fc |= FW_PORT_CAP_FC_TX;
1273
1274 memset(&c, 0, sizeof(c));
e2ac9628 1275 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
1276 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
1277 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
56d36be4
DM
1278 FW_LEN16(c));
1279
1280 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1281 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1282 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1283 } else if (lc->autoneg == AUTONEG_DISABLE) {
1284 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1285 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1286 } else
1287 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1288
1289 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1290}
1291
1292/**
1293 * t4_restart_aneg - restart autonegotiation
1294 * @adap: the adapter
1295 * @mbox: mbox to use for the FW command
1296 * @port: the port id
1297 *
1298 * Restarts autonegotiation for the selected port.
1299 */
1300int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1301{
1302 struct fw_port_cmd c;
1303
1304 memset(&c, 0, sizeof(c));
e2ac9628 1305 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
1306 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
1307 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
56d36be4
DM
1308 FW_LEN16(c));
1309 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1310 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1311}
1312
8caa1e84
VP
1313typedef void (*int_handler_t)(struct adapter *adap);
1314
56d36be4
DM
1315struct intr_info {
1316 unsigned int mask; /* bits to check in interrupt status */
1317 const char *msg; /* message to print or NULL */
1318 short stat_idx; /* stat counter to increment or -1 */
1319 unsigned short fatal; /* whether the condition reported is fatal */
8caa1e84 1320 int_handler_t int_handler; /* platform-specific int handler */
56d36be4
DM
1321};
1322
1323/**
1324 * t4_handle_intr_status - table driven interrupt handler
1325 * @adapter: the adapter that generated the interrupt
1326 * @reg: the interrupt status register to process
1327 * @acts: table of interrupt actions
1328 *
1329 * A table driven interrupt handler that applies a set of masks to an
1330 * interrupt status word and performs the corresponding actions if the
25985edc 1331 * interrupts described by the mask have occurred. The actions include
56d36be4
DM
1332 * optionally emitting a warning or alert message. The table is terminated
1333 * by an entry specifying mask 0. Returns the number of fatal interrupt
1334 * conditions.
1335 */
1336static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1337 const struct intr_info *acts)
1338{
1339 int fatal = 0;
1340 unsigned int mask = 0;
1341 unsigned int status = t4_read_reg(adapter, reg);
1342
1343 for ( ; acts->mask; ++acts) {
1344 if (!(status & acts->mask))
1345 continue;
1346 if (acts->fatal) {
1347 fatal++;
1348 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1349 status & acts->mask);
1350 } else if (acts->msg && printk_ratelimit())
1351 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1352 status & acts->mask);
8caa1e84
VP
1353 if (acts->int_handler)
1354 acts->int_handler(adapter);
56d36be4
DM
1355 mask |= acts->mask;
1356 }
1357 status &= mask;
1358 if (status) /* clear processed interrupts */
1359 t4_write_reg(adapter, reg, status);
1360 return fatal;
1361}
1362
1363/*
1364 * Interrupt handler for the PCIE module.
1365 */
1366static void pcie_intr_handler(struct adapter *adapter)
1367{
005b5717 1368 static const struct intr_info sysbus_intr_info[] = {
56d36be4
DM
1369 { RNPP, "RXNP array parity error", -1, 1 },
1370 { RPCP, "RXPC array parity error", -1, 1 },
1371 { RCIP, "RXCIF array parity error", -1, 1 },
1372 { RCCP, "Rx completions control array parity error", -1, 1 },
1373 { RFTP, "RXFT array parity error", -1, 1 },
1374 { 0 }
1375 };
005b5717 1376 static const struct intr_info pcie_port_intr_info[] = {
56d36be4
DM
1377 { TPCP, "TXPC array parity error", -1, 1 },
1378 { TNPP, "TXNP array parity error", -1, 1 },
1379 { TFTP, "TXFT array parity error", -1, 1 },
1380 { TCAP, "TXCA array parity error", -1, 1 },
1381 { TCIP, "TXCIF array parity error", -1, 1 },
1382 { RCAP, "RXCA array parity error", -1, 1 },
1383 { OTDD, "outbound request TLP discarded", -1, 1 },
1384 { RDPE, "Rx data parity error", -1, 1 },
1385 { TDUE, "Tx uncorrectable data error", -1, 1 },
1386 { 0 }
1387 };
005b5717 1388 static const struct intr_info pcie_intr_info[] = {
56d36be4
DM
1389 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1390 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1391 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1392 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1393 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1394 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1395 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1396 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1397 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1398 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1399 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1400 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1401 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1402 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1403 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1404 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1405 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1406 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1407 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1408 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1409 { FIDPERR, "PCI FID parity error", -1, 1 },
1410 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1411 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1412 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1413 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1414 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1415 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1416 { PCIESINT, "PCI core secondary fault", -1, 1 },
1417 { PCIEPINT, "PCI core primary fault", -1, 1 },
1418 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1419 { 0 }
1420 };
1421
0a57a536
SR
1422 static struct intr_info t5_pcie_intr_info[] = {
1423 { MSTGRPPERR, "Master Response Read Queue parity error",
1424 -1, 1 },
1425 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1426 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1427 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1428 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1429 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1430 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1431 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1432 -1, 1 },
1433 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1434 -1, 1 },
1435 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1436 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1437 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1438 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1439 { DREQWRPERR, "PCI DMA channel write request parity error",
1440 -1, 1 },
1441 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1442 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1443 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1444 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1445 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1446 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1447 { FIDPERR, "PCI FID parity error", -1, 1 },
1448 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1449 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1450 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1451 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1452 -1, 1 },
1453 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1454 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1455 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1456 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1457 { READRSPERR, "Outbound read error", -1, 0 },
1458 { 0 }
1459 };
1460
56d36be4
DM
1461 int fat;
1462
9bb59b96
HS
1463 if (is_t4(adapter->params.chip))
1464 fat = t4_handle_intr_status(adapter,
1465 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1466 sysbus_intr_info) +
1467 t4_handle_intr_status(adapter,
1468 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1469 pcie_port_intr_info) +
1470 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1471 pcie_intr_info);
1472 else
1473 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1474 t5_pcie_intr_info);
0a57a536 1475
56d36be4
DM
1476 if (fat)
1477 t4_fatal_err(adapter);
1478}
1479
1480/*
1481 * TP interrupt handler.
1482 */
1483static void tp_intr_handler(struct adapter *adapter)
1484{
005b5717 1485 static const struct intr_info tp_intr_info[] = {
56d36be4
DM
1486 { 0x3fffffff, "TP parity error", -1, 1 },
1487 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1488 { 0 }
1489 };
1490
1491 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1492 t4_fatal_err(adapter);
1493}
1494
1495/*
1496 * SGE interrupt handler.
1497 */
1498static void sge_intr_handler(struct adapter *adapter)
1499{
1500 u64 v;
1501
005b5717 1502 static const struct intr_info sge_intr_info[] = {
f612b815 1503 { ERR_CPL_EXCEED_IQE_SIZE_F,
56d36be4 1504 "SGE received CPL exceeding IQE size", -1, 1 },
f612b815 1505 { ERR_INVALID_CIDX_INC_F,
56d36be4 1506 "SGE GTS CIDX increment too large", -1, 0 },
f612b815
HS
1507 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
1508 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
1509 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
1510 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
1511 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
56d36be4 1512 "SGE IQID > 1023 received CPL for FL", -1, 0 },
f612b815 1513 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
56d36be4 1514 0 },
f612b815 1515 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
56d36be4 1516 0 },
f612b815 1517 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
56d36be4 1518 0 },
f612b815 1519 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
56d36be4 1520 0 },
f612b815 1521 { ERR_ING_CTXT_PRIO_F,
56d36be4 1522 "SGE too many priority ingress contexts", -1, 0 },
f612b815 1523 { ERR_EGR_CTXT_PRIO_F,
56d36be4 1524 "SGE too many priority egress contexts", -1, 0 },
f612b815
HS
1525 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
1526 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
56d36be4
DM
1527 { 0 }
1528 };
1529
f612b815
HS
1530 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
1531 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
56d36be4
DM
1532 if (v) {
1533 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
8caa1e84 1534 (unsigned long long)v);
f612b815
HS
1535 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
1536 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
56d36be4
DM
1537 }
1538
f612b815 1539 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
56d36be4
DM
1540 v != 0)
1541 t4_fatal_err(adapter);
1542}
1543
1544/*
1545 * CIM interrupt handler.
1546 */
1547static void cim_intr_handler(struct adapter *adapter)
1548{
005b5717 1549 static const struct intr_info cim_intr_info[] = {
56d36be4
DM
1550 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1551 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1552 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1553 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1554 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1555 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1556 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1557 { 0 }
1558 };
005b5717 1559 static const struct intr_info cim_upintr_info[] = {
56d36be4
DM
1560 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1561 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1562 { ILLWRINT, "CIM illegal write", -1, 1 },
1563 { ILLRDINT, "CIM illegal read", -1, 1 },
1564 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1565 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1566 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1567 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1568 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1569 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1570 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1571 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1572 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1573 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1574 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1575 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1576 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1577 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1578 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1579 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1580 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1581 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1582 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1583 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1584 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1585 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1586 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1587 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1588 { 0 }
1589 };
1590
1591 int fat;
1592
b2e1a3f0 1593 if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR)
31d55c2d
HS
1594 t4_report_fw_error(adapter);
1595
56d36be4
DM
1596 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1597 cim_intr_info) +
1598 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1599 cim_upintr_info);
1600 if (fat)
1601 t4_fatal_err(adapter);
1602}
1603
1604/*
1605 * ULP RX interrupt handler.
1606 */
1607static void ulprx_intr_handler(struct adapter *adapter)
1608{
005b5717 1609 static const struct intr_info ulprx_intr_info[] = {
91e9a1ec 1610 { 0x1800000, "ULPRX context error", -1, 1 },
56d36be4
DM
1611 { 0x7fffff, "ULPRX parity error", -1, 1 },
1612 { 0 }
1613 };
1614
1615 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1616 t4_fatal_err(adapter);
1617}
1618
1619/*
1620 * ULP TX interrupt handler.
1621 */
1622static void ulptx_intr_handler(struct adapter *adapter)
1623{
005b5717 1624 static const struct intr_info ulptx_intr_info[] = {
56d36be4
DM
1625 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1626 0 },
1627 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1628 0 },
1629 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1630 0 },
1631 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1632 0 },
1633 { 0xfffffff, "ULPTX parity error", -1, 1 },
1634 { 0 }
1635 };
1636
1637 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1638 t4_fatal_err(adapter);
1639}
1640
1641/*
1642 * PM TX interrupt handler.
1643 */
1644static void pmtx_intr_handler(struct adapter *adapter)
1645{
005b5717 1646 static const struct intr_info pmtx_intr_info[] = {
56d36be4
DM
1647 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1648 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1649 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1650 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1651 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1652 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1653 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1654 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1655 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1656 { 0 }
1657 };
1658
1659 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1660 t4_fatal_err(adapter);
1661}
1662
1663/*
1664 * PM RX interrupt handler.
1665 */
1666static void pmrx_intr_handler(struct adapter *adapter)
1667{
005b5717 1668 static const struct intr_info pmrx_intr_info[] = {
56d36be4
DM
1669 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1670 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1671 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1672 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1673 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1674 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1675 { 0 }
1676 };
1677
1678 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1679 t4_fatal_err(adapter);
1680}
1681
1682/*
1683 * CPL switch interrupt handler.
1684 */
1685static void cplsw_intr_handler(struct adapter *adapter)
1686{
005b5717 1687 static const struct intr_info cplsw_intr_info[] = {
56d36be4
DM
1688 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1689 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1690 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1691 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1692 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1693 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1694 { 0 }
1695 };
1696
1697 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1698 t4_fatal_err(adapter);
1699}
1700
1701/*
1702 * LE interrupt handler.
1703 */
1704static void le_intr_handler(struct adapter *adap)
1705{
005b5717 1706 static const struct intr_info le_intr_info[] = {
56d36be4
DM
1707 { LIPMISS, "LE LIP miss", -1, 0 },
1708 { LIP0, "LE 0 LIP error", -1, 0 },
1709 { PARITYERR, "LE parity error", -1, 1 },
1710 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1711 { REQQPARERR, "LE request queue parity error", -1, 1 },
1712 { 0 }
1713 };
1714
1715 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1716 t4_fatal_err(adap);
1717}
1718
1719/*
1720 * MPS interrupt handler.
1721 */
1722static void mps_intr_handler(struct adapter *adapter)
1723{
005b5717 1724 static const struct intr_info mps_rx_intr_info[] = {
56d36be4
DM
1725 { 0xffffff, "MPS Rx parity error", -1, 1 },
1726 { 0 }
1727 };
005b5717 1728 static const struct intr_info mps_tx_intr_info[] = {
56d36be4
DM
1729 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1730 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1731 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1732 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1733 { BUBBLE, "MPS Tx underflow", -1, 1 },
1734 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1735 { FRMERR, "MPS Tx framing error", -1, 1 },
1736 { 0 }
1737 };
005b5717 1738 static const struct intr_info mps_trc_intr_info[] = {
56d36be4
DM
1739 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1740 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1741 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1742 { 0 }
1743 };
005b5717 1744 static const struct intr_info mps_stat_sram_intr_info[] = {
56d36be4
DM
1745 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1746 { 0 }
1747 };
005b5717 1748 static const struct intr_info mps_stat_tx_intr_info[] = {
56d36be4
DM
1749 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1750 { 0 }
1751 };
005b5717 1752 static const struct intr_info mps_stat_rx_intr_info[] = {
56d36be4
DM
1753 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1754 { 0 }
1755 };
005b5717 1756 static const struct intr_info mps_cls_intr_info[] = {
56d36be4
DM
1757 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1758 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1759 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1760 { 0 }
1761 };
1762
1763 int fat;
1764
1765 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1766 mps_rx_intr_info) +
1767 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1768 mps_tx_intr_info) +
1769 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1770 mps_trc_intr_info) +
1771 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1772 mps_stat_sram_intr_info) +
1773 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1774 mps_stat_tx_intr_info) +
1775 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1776 mps_stat_rx_intr_info) +
1777 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1778 mps_cls_intr_info);
1779
1780 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1781 RXINT | TXINT | STATINT);
1782 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1783 if (fat)
1784 t4_fatal_err(adapter);
1785}
1786
1787#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1788
1789/*
1790 * EDC/MC interrupt handler.
1791 */
1792static void mem_intr_handler(struct adapter *adapter, int idx)
1793{
822dd8a8 1794 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
56d36be4
DM
1795
1796 unsigned int addr, cnt_addr, v;
1797
1798 if (idx <= MEM_EDC1) {
1799 addr = EDC_REG(EDC_INT_CAUSE, idx);
1800 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
822dd8a8
HS
1801 } else if (idx == MEM_MC) {
1802 if (is_t4(adapter->params.chip)) {
1803 addr = MC_INT_CAUSE;
1804 cnt_addr = MC_ECC_STATUS;
1805 } else {
1806 addr = MC_P_INT_CAUSE;
1807 cnt_addr = MC_P_ECC_STATUS;
1808 }
56d36be4 1809 } else {
822dd8a8
HS
1810 addr = MC_REG(MC_P_INT_CAUSE, 1);
1811 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
56d36be4
DM
1812 }
1813
1814 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1815 if (v & PERR_INT_CAUSE)
1816 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1817 name[idx]);
1818 if (v & ECC_CE_INT_CAUSE) {
1819 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1820
1821 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1822 if (printk_ratelimit())
1823 dev_warn(adapter->pdev_dev,
1824 "%u %s correctable ECC data error%s\n",
1825 cnt, name[idx], cnt > 1 ? "s" : "");
1826 }
1827 if (v & ECC_UE_INT_CAUSE)
1828 dev_alert(adapter->pdev_dev,
1829 "%s uncorrectable ECC data error\n", name[idx]);
1830
1831 t4_write_reg(adapter, addr, v);
1832 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1833 t4_fatal_err(adapter);
1834}
1835
1836/*
1837 * MA interrupt handler.
1838 */
1839static void ma_intr_handler(struct adapter *adap)
1840{
1841 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1842
9bb59b96 1843 if (status & MEM_PERR_INT_CAUSE) {
56d36be4
DM
1844 dev_alert(adap->pdev_dev,
1845 "MA parity error, parity status %#x\n",
1846 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
9bb59b96
HS
1847 if (is_t5(adap->params.chip))
1848 dev_alert(adap->pdev_dev,
1849 "MA parity error, parity status %#x\n",
1850 t4_read_reg(adap,
1851 MA_PARITY_ERROR_STATUS2));
1852 }
56d36be4
DM
1853 if (status & MEM_WRAP_INT_CAUSE) {
1854 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1855 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1856 "client %u to address %#x\n",
1857 MEM_WRAP_CLIENT_NUM_GET(v),
1858 MEM_WRAP_ADDRESS_GET(v) << 4);
1859 }
1860 t4_write_reg(adap, MA_INT_CAUSE, status);
1861 t4_fatal_err(adap);
1862}
1863
1864/*
1865 * SMB interrupt handler.
1866 */
1867static void smb_intr_handler(struct adapter *adap)
1868{
005b5717 1869 static const struct intr_info smb_intr_info[] = {
56d36be4
DM
1870 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1871 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1872 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1873 { 0 }
1874 };
1875
1876 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1877 t4_fatal_err(adap);
1878}
1879
1880/*
1881 * NC-SI interrupt handler.
1882 */
1883static void ncsi_intr_handler(struct adapter *adap)
1884{
005b5717 1885 static const struct intr_info ncsi_intr_info[] = {
56d36be4
DM
1886 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1887 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1888 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1889 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1890 { 0 }
1891 };
1892
1893 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1894 t4_fatal_err(adap);
1895}
1896
1897/*
1898 * XGMAC interrupt handler.
1899 */
1900static void xgmac_intr_handler(struct adapter *adap, int port)
1901{
0a57a536
SR
1902 u32 v, int_cause_reg;
1903
d14807dd 1904 if (is_t4(adap->params.chip))
0a57a536
SR
1905 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1906 else
1907 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1908
1909 v = t4_read_reg(adap, int_cause_reg);
56d36be4
DM
1910
1911 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1912 if (!v)
1913 return;
1914
1915 if (v & TXFIFO_PRTY_ERR)
1916 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1917 port);
1918 if (v & RXFIFO_PRTY_ERR)
1919 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1920 port);
1921 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1922 t4_fatal_err(adap);
1923}
1924
1925/*
1926 * PL interrupt handler.
1927 */
1928static void pl_intr_handler(struct adapter *adap)
1929{
005b5717 1930 static const struct intr_info pl_intr_info[] = {
56d36be4
DM
1931 { FATALPERR, "T4 fatal parity error", -1, 1 },
1932 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1933 { 0 }
1934 };
1935
1936 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1937 t4_fatal_err(adap);
1938}
1939
63bcceec 1940#define PF_INTR_MASK (PFSW)
56d36be4
DM
1941#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1942 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1943 CPL_SWITCH | SGE | ULP_TX)
1944
1945/**
1946 * t4_slow_intr_handler - control path interrupt handler
1947 * @adapter: the adapter
1948 *
1949 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1950 * The designation 'slow' is because it involves register reads, while
1951 * data interrupts typically don't involve any MMIOs.
1952 */
1953int t4_slow_intr_handler(struct adapter *adapter)
1954{
1955 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1956
1957 if (!(cause & GLBL_INTR_MASK))
1958 return 0;
1959 if (cause & CIM)
1960 cim_intr_handler(adapter);
1961 if (cause & MPS)
1962 mps_intr_handler(adapter);
1963 if (cause & NCSI)
1964 ncsi_intr_handler(adapter);
1965 if (cause & PL)
1966 pl_intr_handler(adapter);
1967 if (cause & SMB)
1968 smb_intr_handler(adapter);
1969 if (cause & XGMAC0)
1970 xgmac_intr_handler(adapter, 0);
1971 if (cause & XGMAC1)
1972 xgmac_intr_handler(adapter, 1);
1973 if (cause & XGMAC_KR0)
1974 xgmac_intr_handler(adapter, 2);
1975 if (cause & XGMAC_KR1)
1976 xgmac_intr_handler(adapter, 3);
1977 if (cause & PCIE)
1978 pcie_intr_handler(adapter);
1979 if (cause & MC)
1980 mem_intr_handler(adapter, MEM_MC);
822dd8a8
HS
1981 if (!is_t4(adapter->params.chip) && (cause & MC1))
1982 mem_intr_handler(adapter, MEM_MC1);
56d36be4
DM
1983 if (cause & EDC0)
1984 mem_intr_handler(adapter, MEM_EDC0);
1985 if (cause & EDC1)
1986 mem_intr_handler(adapter, MEM_EDC1);
1987 if (cause & LE)
1988 le_intr_handler(adapter);
1989 if (cause & TP)
1990 tp_intr_handler(adapter);
1991 if (cause & MA)
1992 ma_intr_handler(adapter);
1993 if (cause & PM_TX)
1994 pmtx_intr_handler(adapter);
1995 if (cause & PM_RX)
1996 pmrx_intr_handler(adapter);
1997 if (cause & ULP_RX)
1998 ulprx_intr_handler(adapter);
1999 if (cause & CPL_SWITCH)
2000 cplsw_intr_handler(adapter);
2001 if (cause & SGE)
2002 sge_intr_handler(adapter);
2003 if (cause & ULP_TX)
2004 ulptx_intr_handler(adapter);
2005
2006 /* Clear the interrupts just processed for which we are the master. */
2007 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2008 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
2009 return 1;
2010}
2011
2012/**
2013 * t4_intr_enable - enable interrupts
2014 * @adapter: the adapter whose interrupts should be enabled
2015 *
2016 * Enable PF-specific interrupts for the calling function and the top-level
2017 * interrupt concentrator for global interrupts. Interrupts are already
2018 * enabled at each module, here we just enable the roots of the interrupt
2019 * hierarchies.
2020 *
2021 * Note: this function should be called only when the driver manages
2022 * non PF-specific interrupts from the various HW modules. Only one PCI
2023 * function at a time should be doing this.
2024 */
2025void t4_intr_enable(struct adapter *adapter)
2026{
2027 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2028
f612b815
HS
2029 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
2030 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
2031 ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
2032 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
2033 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
2034 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
2035 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
2036 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
2037 EGRESS_SIZE_ERR_F);
56d36be4
DM
2038 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
2039 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
2040}
2041
2042/**
2043 * t4_intr_disable - disable interrupts
2044 * @adapter: the adapter whose interrupts should be disabled
2045 *
2046 * Disable interrupts. We only disable the top-level interrupt
2047 * concentrators. The caller must be a PCI function managing global
2048 * interrupts.
2049 */
2050void t4_intr_disable(struct adapter *adapter)
2051{
2052 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2053
2054 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
2055 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
2056}
2057
56d36be4
DM
2058/**
2059 * hash_mac_addr - return the hash value of a MAC address
2060 * @addr: the 48-bit Ethernet MAC address
2061 *
2062 * Hashes a MAC address according to the hash function used by HW inexact
2063 * (hash) address matching.
2064 */
2065static int hash_mac_addr(const u8 *addr)
2066{
2067 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2068 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2069 a ^= b;
2070 a ^= (a >> 12);
2071 a ^= (a >> 6);
2072 return a & 0x3f;
2073}
2074
2075/**
2076 * t4_config_rss_range - configure a portion of the RSS mapping table
2077 * @adapter: the adapter
2078 * @mbox: mbox to use for the FW command
2079 * @viid: virtual interface whose RSS subtable is to be written
2080 * @start: start entry in the table to write
2081 * @n: how many table entries to write
2082 * @rspq: values for the response queue lookup table
2083 * @nrspq: number of values in @rspq
2084 *
2085 * Programs the selected part of the VI's RSS mapping table with the
2086 * provided values. If @nrspq < @n the supplied values are used repeatedly
2087 * until the full table range is populated.
2088 *
2089 * The caller must ensure the values in @rspq are in the range allowed for
2090 * @viid.
2091 */
2092int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2093 int start, int n, const u16 *rspq, unsigned int nrspq)
2094{
2095 int ret;
2096 const u16 *rsp = rspq;
2097 const u16 *rsp_end = rspq + nrspq;
2098 struct fw_rss_ind_tbl_cmd cmd;
2099
2100 memset(&cmd, 0, sizeof(cmd));
e2ac9628
HS
2101 cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
2102 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
b2e1a3f0 2103 FW_RSS_IND_TBL_CMD_VIID_V(viid));
56d36be4
DM
2104 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2105
2106 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2107 while (n > 0) {
2108 int nq = min(n, 32);
2109 __be32 *qp = &cmd.iq0_to_iq2;
2110
2111 cmd.niqid = htons(nq);
2112 cmd.startidx = htons(start);
2113
2114 start += nq;
2115 n -= nq;
2116
2117 while (nq > 0) {
2118 unsigned int v;
2119
b2e1a3f0 2120 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
56d36be4
DM
2121 if (++rsp >= rsp_end)
2122 rsp = rspq;
b2e1a3f0 2123 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
56d36be4
DM
2124 if (++rsp >= rsp_end)
2125 rsp = rspq;
b2e1a3f0 2126 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
56d36be4
DM
2127 if (++rsp >= rsp_end)
2128 rsp = rspq;
2129
2130 *qp++ = htonl(v);
2131 nq -= 3;
2132 }
2133
2134 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2135 if (ret)
2136 return ret;
2137 }
2138 return 0;
2139}
2140
2141/**
2142 * t4_config_glbl_rss - configure the global RSS mode
2143 * @adapter: the adapter
2144 * @mbox: mbox to use for the FW command
2145 * @mode: global RSS mode
2146 * @flags: mode-specific flags
2147 *
2148 * Sets the global RSS mode.
2149 */
2150int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2151 unsigned int flags)
2152{
2153 struct fw_rss_glb_config_cmd c;
2154
2155 memset(&c, 0, sizeof(c));
e2ac9628
HS
2156 c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
2157 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
56d36be4
DM
2158 c.retval_len16 = htonl(FW_LEN16(c));
2159 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
b2e1a3f0 2160 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
56d36be4
DM
2161 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2162 c.u.basicvirtual.mode_pkd =
b2e1a3f0 2163 htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
56d36be4
DM
2164 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2165 } else
2166 return -EINVAL;
2167 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2168}
2169
56d36be4
DM
2170/**
2171 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2172 * @adap: the adapter
2173 * @v4: holds the TCP/IP counter values
2174 * @v6: holds the TCP/IPv6 counter values
2175 *
2176 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2177 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2178 */
2179void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2180 struct tp_tcp_stats *v6)
2181{
2182 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2183
2184#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2185#define STAT(x) val[STAT_IDX(x)]
2186#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2187
2188 if (v4) {
2189 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2190 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2191 v4->tcpOutRsts = STAT(OUT_RST);
2192 v4->tcpInSegs = STAT64(IN_SEG);
2193 v4->tcpOutSegs = STAT64(OUT_SEG);
2194 v4->tcpRetransSegs = STAT64(RXT_SEG);
2195 }
2196 if (v6) {
2197 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2198 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2199 v6->tcpOutRsts = STAT(OUT_RST);
2200 v6->tcpInSegs = STAT64(IN_SEG);
2201 v6->tcpOutSegs = STAT64(OUT_SEG);
2202 v6->tcpRetransSegs = STAT64(RXT_SEG);
2203 }
2204#undef STAT64
2205#undef STAT
2206#undef STAT_IDX
2207}
2208
56d36be4
DM
2209/**
2210 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2211 * @adap: the adapter
2212 * @mtus: where to store the MTU values
2213 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2214 *
2215 * Reads the HW path MTU table.
2216 */
2217void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2218{
2219 u32 v;
2220 int i;
2221
2222 for (i = 0; i < NMTUS; ++i) {
2223 t4_write_reg(adap, TP_MTU_TABLE,
2224 MTUINDEX(0xff) | MTUVALUE(i));
2225 v = t4_read_reg(adap, TP_MTU_TABLE);
2226 mtus[i] = MTUVALUE_GET(v);
2227 if (mtu_log)
2228 mtu_log[i] = MTUWIDTH_GET(v);
2229 }
2230}
2231
636f9d37
VP
2232/**
2233 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2234 * @adap: the adapter
2235 * @addr: the indirect TP register address
2236 * @mask: specifies the field within the register to modify
2237 * @val: new value for the field
2238 *
2239 * Sets a field of an indirect TP register to the given value.
2240 */
2241void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2242 unsigned int mask, unsigned int val)
2243{
2244 t4_write_reg(adap, TP_PIO_ADDR, addr);
2245 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2246 t4_write_reg(adap, TP_PIO_DATA, val);
2247}
2248
56d36be4
DM
2249/**
2250 * init_cong_ctrl - initialize congestion control parameters
2251 * @a: the alpha values for congestion control
2252 * @b: the beta values for congestion control
2253 *
2254 * Initialize the congestion control parameters.
2255 */
91744948 2256static void init_cong_ctrl(unsigned short *a, unsigned short *b)
56d36be4
DM
2257{
2258 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2259 a[9] = 2;
2260 a[10] = 3;
2261 a[11] = 4;
2262 a[12] = 5;
2263 a[13] = 6;
2264 a[14] = 7;
2265 a[15] = 8;
2266 a[16] = 9;
2267 a[17] = 10;
2268 a[18] = 14;
2269 a[19] = 17;
2270 a[20] = 21;
2271 a[21] = 25;
2272 a[22] = 30;
2273 a[23] = 35;
2274 a[24] = 45;
2275 a[25] = 60;
2276 a[26] = 80;
2277 a[27] = 100;
2278 a[28] = 200;
2279 a[29] = 300;
2280 a[30] = 400;
2281 a[31] = 500;
2282
2283 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2284 b[9] = b[10] = 1;
2285 b[11] = b[12] = 2;
2286 b[13] = b[14] = b[15] = b[16] = 3;
2287 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2288 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2289 b[28] = b[29] = 6;
2290 b[30] = b[31] = 7;
2291}
2292
2293/* The minimum additive increment value for the congestion control table */
2294#define CC_MIN_INCR 2U
2295
2296/**
2297 * t4_load_mtus - write the MTU and congestion control HW tables
2298 * @adap: the adapter
2299 * @mtus: the values for the MTU table
2300 * @alpha: the values for the congestion control alpha parameter
2301 * @beta: the values for the congestion control beta parameter
2302 *
2303 * Write the HW MTU table with the supplied MTUs and the high-speed
2304 * congestion control table with the supplied alpha, beta, and MTUs.
2305 * We write the two tables together because the additive increments
2306 * depend on the MTUs.
2307 */
2308void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2309 const unsigned short *alpha, const unsigned short *beta)
2310{
2311 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2312 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2313 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2314 28672, 40960, 57344, 81920, 114688, 163840, 229376
2315 };
2316
2317 unsigned int i, w;
2318
2319 for (i = 0; i < NMTUS; ++i) {
2320 unsigned int mtu = mtus[i];
2321 unsigned int log2 = fls(mtu);
2322
2323 if (!(mtu & ((1 << log2) >> 2))) /* round */
2324 log2--;
2325 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2326 MTUWIDTH(log2) | MTUVALUE(mtu));
2327
2328 for (w = 0; w < NCCTRL_WIN; ++w) {
2329 unsigned int inc;
2330
2331 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2332 CC_MIN_INCR);
2333
2334 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2335 (w << 16) | (beta[w] << 13) | inc);
2336 }
2337 }
2338}
2339
56d36be4
DM
2340/**
2341 * get_mps_bg_map - return the buffer groups associated with a port
2342 * @adap: the adapter
2343 * @idx: the port index
2344 *
2345 * Returns a bitmap indicating which MPS buffer groups are associated
2346 * with the given port. Bit i is set if buffer group i is used by the
2347 * port.
2348 */
2349static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2350{
2351 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2352
2353 if (n == 0)
2354 return idx == 0 ? 0xf : 0;
2355 if (n == 1)
2356 return idx < 2 ? (3 << (2 * idx)) : 0;
2357 return 1 << idx;
2358}
2359
72aca4bf
KS
2360/**
2361 * t4_get_port_type_description - return Port Type string description
2362 * @port_type: firmware Port Type enumeration
2363 */
2364const char *t4_get_port_type_description(enum fw_port_type port_type)
2365{
2366 static const char *const port_type_description[] = {
2367 "R XFI",
2368 "R XAUI",
2369 "T SGMII",
2370 "T XFI",
2371 "T XAUI",
2372 "KX4",
2373 "CX4",
2374 "KX",
2375 "KR",
2376 "R SFP+",
2377 "KR/KX",
2378 "KR/KX/KX4",
2379 "R QSFP_10G",
5aa80e51 2380 "R QSA",
72aca4bf
KS
2381 "R QSFP",
2382 "R BP40_BA",
2383 };
2384
2385 if (port_type < ARRAY_SIZE(port_type_description))
2386 return port_type_description[port_type];
2387 return "UNKNOWN";
2388}
2389
56d36be4
DM
2390/**
2391 * t4_get_port_stats - collect port statistics
2392 * @adap: the adapter
2393 * @idx: the port index
2394 * @p: the stats structure to fill
2395 *
2396 * Collect statistics related to the given port from HW.
2397 */
2398void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2399{
2400 u32 bgmap = get_mps_bg_map(adap, idx);
2401
2402#define GET_STAT(name) \
0a57a536 2403 t4_read_reg64(adap, \
d14807dd 2404 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
0a57a536 2405 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
56d36be4
DM
2406#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2407
2408 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2409 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2410 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2411 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2412 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2413 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2414 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2415 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2416 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2417 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2418 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2419 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2420 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2421 p->tx_drop = GET_STAT(TX_PORT_DROP);
2422 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2423 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2424 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2425 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2426 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2427 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2428 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2429 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2430 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2431
2432 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2433 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2434 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2435 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2436 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2437 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2438 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2439 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2440 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2441 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2442 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2443 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2444 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2445 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2446 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2447 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2448 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2449 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2450 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2451 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2452 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2453 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2454 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2455 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2456 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2457 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2458 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2459
2460 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2461 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2462 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2463 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2464 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2465 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2466 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2467 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2468
2469#undef GET_STAT
2470#undef GET_STAT_COM
2471}
2472
56d36be4
DM
2473/**
2474 * t4_wol_magic_enable - enable/disable magic packet WoL
2475 * @adap: the adapter
2476 * @port: the physical port index
2477 * @addr: MAC address expected in magic packets, %NULL to disable
2478 *
2479 * Enables/disables magic packet wake-on-LAN for the selected port.
2480 */
2481void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2482 const u8 *addr)
2483{
0a57a536
SR
2484 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2485
d14807dd 2486 if (is_t4(adap->params.chip)) {
0a57a536
SR
2487 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2488 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2489 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2490 } else {
2491 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2492 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2493 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2494 }
2495
56d36be4 2496 if (addr) {
0a57a536 2497 t4_write_reg(adap, mag_id_reg_l,
56d36be4
DM
2498 (addr[2] << 24) | (addr[3] << 16) |
2499 (addr[4] << 8) | addr[5]);
0a57a536 2500 t4_write_reg(adap, mag_id_reg_h,
56d36be4
DM
2501 (addr[0] << 8) | addr[1]);
2502 }
0a57a536 2503 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
56d36be4
DM
2504 addr ? MAGICEN : 0);
2505}
2506
2507/**
2508 * t4_wol_pat_enable - enable/disable pattern-based WoL
2509 * @adap: the adapter
2510 * @port: the physical port index
2511 * @map: bitmap of which HW pattern filters to set
2512 * @mask0: byte mask for bytes 0-63 of a packet
2513 * @mask1: byte mask for bytes 64-127 of a packet
2514 * @crc: Ethernet CRC for selected bytes
2515 * @enable: enable/disable switch
2516 *
2517 * Sets the pattern filters indicated in @map to mask out the bytes
2518 * specified in @mask0/@mask1 in received packets and compare the CRC of
2519 * the resulting packet against @crc. If @enable is %true pattern-based
2520 * WoL is enabled, otherwise disabled.
2521 */
2522int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2523 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2524{
2525 int i;
0a57a536
SR
2526 u32 port_cfg_reg;
2527
d14807dd 2528 if (is_t4(adap->params.chip))
0a57a536
SR
2529 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2530 else
2531 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
56d36be4
DM
2532
2533 if (!enable) {
0a57a536 2534 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
56d36be4
DM
2535 return 0;
2536 }
2537 if (map > 0xff)
2538 return -EINVAL;
2539
0a57a536 2540#define EPIO_REG(name) \
d14807dd 2541 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
0a57a536 2542 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
56d36be4
DM
2543
2544 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2545 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2546 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2547
2548 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2549 if (!(map & 1))
2550 continue;
2551
2552 /* write byte masks */
2553 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2554 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2555 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
ce91a923 2556 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
56d36be4
DM
2557 return -ETIMEDOUT;
2558
2559 /* write CRC */
2560 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2561 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2562 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
ce91a923 2563 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
56d36be4
DM
2564 return -ETIMEDOUT;
2565 }
2566#undef EPIO_REG
2567
2568 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2569 return 0;
2570}
2571
f2b7e78d
VP
2572/* t4_mk_filtdelwr - create a delete filter WR
2573 * @ftid: the filter ID
2574 * @wr: the filter work request to populate
2575 * @qid: ingress queue to receive the delete notification
2576 *
2577 * Creates a filter work request to delete the supplied filter. If @qid is
2578 * negative the delete notification is suppressed.
2579 */
2580void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2581{
2582 memset(wr, 0, sizeof(*wr));
e2ac9628
HS
2583 wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
2584 wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
77a80e23
HS
2585 wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
2586 FW_FILTER_WR_NOREPLY_V(qid < 0));
2587 wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
f2b7e78d 2588 if (qid >= 0)
77a80e23 2589 wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
f2b7e78d
VP
2590}
2591
56d36be4 2592#define INIT_CMD(var, cmd, rd_wr) do { \
e2ac9628
HS
2593 (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
2594 FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
56d36be4
DM
2595 (var).retval_len16 = htonl(FW_LEN16(var)); \
2596} while (0)
2597
8caa1e84
VP
2598int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2599 u32 addr, u32 val)
2600{
2601 struct fw_ldst_cmd c;
2602
2603 memset(&c, 0, sizeof(c));
e2ac9628
HS
2604 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2605 FW_CMD_WRITE_F |
5167865a 2606 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
8caa1e84
VP
2607 c.cycles_to_len16 = htonl(FW_LEN16(c));
2608 c.u.addrval.addr = htonl(addr);
2609 c.u.addrval.val = htonl(val);
2610
2611 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2612}
2613
56d36be4
DM
2614/**
2615 * t4_mdio_rd - read a PHY register through MDIO
2616 * @adap: the adapter
2617 * @mbox: mailbox to use for the FW command
2618 * @phy_addr: the PHY address
2619 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2620 * @reg: the register to read
2621 * @valp: where to store the value
2622 *
2623 * Issues a FW command through the given mailbox to read a PHY register.
2624 */
2625int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2626 unsigned int mmd, unsigned int reg, u16 *valp)
2627{
2628 int ret;
2629 struct fw_ldst_cmd c;
2630
2631 memset(&c, 0, sizeof(c));
e2ac9628 2632 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
5167865a 2633 FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
56d36be4 2634 c.cycles_to_len16 = htonl(FW_LEN16(c));
5167865a
HS
2635 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
2636 FW_LDST_CMD_MMD_V(mmd));
56d36be4
DM
2637 c.u.mdio.raddr = htons(reg);
2638
2639 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2640 if (ret == 0)
2641 *valp = ntohs(c.u.mdio.rval);
2642 return ret;
2643}
2644
2645/**
2646 * t4_mdio_wr - write a PHY register through MDIO
2647 * @adap: the adapter
2648 * @mbox: mailbox to use for the FW command
2649 * @phy_addr: the PHY address
2650 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2651 * @reg: the register to write
2652 * @valp: value to write
2653 *
2654 * Issues a FW command through the given mailbox to write a PHY register.
2655 */
2656int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2657 unsigned int mmd, unsigned int reg, u16 val)
2658{
2659 struct fw_ldst_cmd c;
2660
2661 memset(&c, 0, sizeof(c));
e2ac9628 2662 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
5167865a 2663 FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
56d36be4 2664 c.cycles_to_len16 = htonl(FW_LEN16(c));
5167865a
HS
2665 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
2666 FW_LDST_CMD_MMD_V(mmd));
56d36be4
DM
2667 c.u.mdio.raddr = htons(reg);
2668 c.u.mdio.rval = htons(val);
2669
2670 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2671}
2672
68bce192
KS
2673/**
2674 * t4_sge_decode_idma_state - decode the idma state
2675 * @adap: the adapter
2676 * @state: the state idma is stuck in
2677 */
2678void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2679{
2680 static const char * const t4_decode[] = {
2681 "IDMA_IDLE",
2682 "IDMA_PUSH_MORE_CPL_FIFO",
2683 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2684 "Not used",
2685 "IDMA_PHYSADDR_SEND_PCIEHDR",
2686 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2687 "IDMA_PHYSADDR_SEND_PAYLOAD",
2688 "IDMA_SEND_FIFO_TO_IMSG",
2689 "IDMA_FL_REQ_DATA_FL_PREP",
2690 "IDMA_FL_REQ_DATA_FL",
2691 "IDMA_FL_DROP",
2692 "IDMA_FL_H_REQ_HEADER_FL",
2693 "IDMA_FL_H_SEND_PCIEHDR",
2694 "IDMA_FL_H_PUSH_CPL_FIFO",
2695 "IDMA_FL_H_SEND_CPL",
2696 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2697 "IDMA_FL_H_SEND_IP_HDR",
2698 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2699 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2700 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2701 "IDMA_FL_D_SEND_PCIEHDR",
2702 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2703 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2704 "IDMA_FL_SEND_PCIEHDR",
2705 "IDMA_FL_PUSH_CPL_FIFO",
2706 "IDMA_FL_SEND_CPL",
2707 "IDMA_FL_SEND_PAYLOAD_FIRST",
2708 "IDMA_FL_SEND_PAYLOAD",
2709 "IDMA_FL_REQ_NEXT_DATA_FL",
2710 "IDMA_FL_SEND_NEXT_PCIEHDR",
2711 "IDMA_FL_SEND_PADDING",
2712 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2713 "IDMA_FL_SEND_FIFO_TO_IMSG",
2714 "IDMA_FL_REQ_DATAFL_DONE",
2715 "IDMA_FL_REQ_HEADERFL_DONE",
2716 };
2717 static const char * const t5_decode[] = {
2718 "IDMA_IDLE",
2719 "IDMA_ALMOST_IDLE",
2720 "IDMA_PUSH_MORE_CPL_FIFO",
2721 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2722 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2723 "IDMA_PHYSADDR_SEND_PCIEHDR",
2724 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2725 "IDMA_PHYSADDR_SEND_PAYLOAD",
2726 "IDMA_SEND_FIFO_TO_IMSG",
2727 "IDMA_FL_REQ_DATA_FL",
2728 "IDMA_FL_DROP",
2729 "IDMA_FL_DROP_SEND_INC",
2730 "IDMA_FL_H_REQ_HEADER_FL",
2731 "IDMA_FL_H_SEND_PCIEHDR",
2732 "IDMA_FL_H_PUSH_CPL_FIFO",
2733 "IDMA_FL_H_SEND_CPL",
2734 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2735 "IDMA_FL_H_SEND_IP_HDR",
2736 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2737 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2738 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2739 "IDMA_FL_D_SEND_PCIEHDR",
2740 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2741 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2742 "IDMA_FL_SEND_PCIEHDR",
2743 "IDMA_FL_PUSH_CPL_FIFO",
2744 "IDMA_FL_SEND_CPL",
2745 "IDMA_FL_SEND_PAYLOAD_FIRST",
2746 "IDMA_FL_SEND_PAYLOAD",
2747 "IDMA_FL_REQ_NEXT_DATA_FL",
2748 "IDMA_FL_SEND_NEXT_PCIEHDR",
2749 "IDMA_FL_SEND_PADDING",
2750 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2751 };
2752 static const u32 sge_regs[] = {
2753 SGE_DEBUG_DATA_LOW_INDEX_2,
2754 SGE_DEBUG_DATA_LOW_INDEX_3,
2755 SGE_DEBUG_DATA_HIGH_INDEX_10,
2756 };
2757 const char **sge_idma_decode;
2758 int sge_idma_decode_nstates;
2759 int i;
2760
2761 if (is_t4(adapter->params.chip)) {
2762 sge_idma_decode = (const char **)t4_decode;
2763 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2764 } else {
2765 sge_idma_decode = (const char **)t5_decode;
2766 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2767 }
2768
2769 if (state < sge_idma_decode_nstates)
2770 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2771 else
2772 CH_WARN(adapter, "idma state %d unknown\n", state);
2773
2774 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2775 CH_WARN(adapter, "SGE register %#x value %#x\n",
2776 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2777}
2778
56d36be4 2779/**
636f9d37
VP
2780 * t4_fw_hello - establish communication with FW
2781 * @adap: the adapter
2782 * @mbox: mailbox to use for the FW command
2783 * @evt_mbox: mailbox to receive async FW events
2784 * @master: specifies the caller's willingness to be the device master
2785 * @state: returns the current device state (if non-NULL)
56d36be4 2786 *
636f9d37
VP
2787 * Issues a command to establish communication with FW. Returns either
2788 * an error (negative integer) or the mailbox of the Master PF.
56d36be4
DM
2789 */
2790int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2791 enum dev_master master, enum dev_state *state)
2792{
2793 int ret;
2794 struct fw_hello_cmd c;
636f9d37
VP
2795 u32 v;
2796 unsigned int master_mbox;
2797 int retries = FW_CMD_HELLO_RETRIES;
56d36be4 2798
636f9d37
VP
2799retry:
2800 memset(&c, 0, sizeof(c));
56d36be4 2801 INIT_CMD(c, HELLO, WRITE);
ce91a923 2802 c.err_to_clearinit = htonl(
5167865a
HS
2803 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
2804 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
2805 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
2806 FW_HELLO_CMD_MBMASTER_M) |
2807 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
2808 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
2809 FW_HELLO_CMD_CLEARINIT_F);
56d36be4 2810
636f9d37
VP
2811 /*
2812 * Issue the HELLO command to the firmware. If it's not successful
2813 * but indicates that we got a "busy" or "timeout" condition, retry
31d55c2d
HS
2814 * the HELLO until we exhaust our retry limit. If we do exceed our
2815 * retry limit, check to see if the firmware left us any error
2816 * information and report that if so.
636f9d37 2817 */
56d36be4 2818 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
636f9d37
VP
2819 if (ret < 0) {
2820 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2821 goto retry;
b2e1a3f0 2822 if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR)
31d55c2d 2823 t4_report_fw_error(adap);
636f9d37
VP
2824 return ret;
2825 }
2826
ce91a923 2827 v = ntohl(c.err_to_clearinit);
5167865a 2828 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
636f9d37 2829 if (state) {
5167865a 2830 if (v & FW_HELLO_CMD_ERR_F)
56d36be4 2831 *state = DEV_STATE_ERR;
5167865a 2832 else if (v & FW_HELLO_CMD_INIT_F)
636f9d37 2833 *state = DEV_STATE_INIT;
56d36be4
DM
2834 else
2835 *state = DEV_STATE_UNINIT;
2836 }
636f9d37
VP
2837
2838 /*
2839 * If we're not the Master PF then we need to wait around for the
2840 * Master PF Driver to finish setting up the adapter.
2841 *
2842 * Note that we also do this wait if we're a non-Master-capable PF and
2843 * there is no current Master PF; a Master PF may show up momentarily
2844 * and we wouldn't want to fail pointlessly. (This can happen when an
2845 * OS loads lots of different drivers rapidly at the same time). In
2846 * this case, the Master PF returned by the firmware will be
b2e1a3f0 2847 * PCIE_FW_MASTER_M so the test below will work ...
636f9d37 2848 */
5167865a 2849 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
636f9d37
VP
2850 master_mbox != mbox) {
2851 int waiting = FW_CMD_HELLO_TIMEOUT;
2852
2853 /*
2854 * Wait for the firmware to either indicate an error or
2855 * initialized state. If we see either of these we bail out
2856 * and report the issue to the caller. If we exhaust the
2857 * "hello timeout" and we haven't exhausted our retries, try
2858 * again. Otherwise bail with a timeout error.
2859 */
2860 for (;;) {
2861 u32 pcie_fw;
2862
2863 msleep(50);
2864 waiting -= 50;
2865
2866 /*
2867 * If neither Error nor Initialialized are indicated
2868 * by the firmware keep waiting till we exaust our
2869 * timeout ... and then retry if we haven't exhausted
2870 * our retries ...
2871 */
2872 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
b2e1a3f0 2873 if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
636f9d37
VP
2874 if (waiting <= 0) {
2875 if (retries-- > 0)
2876 goto retry;
2877
2878 return -ETIMEDOUT;
2879 }
2880 continue;
2881 }
2882
2883 /*
2884 * We either have an Error or Initialized condition
2885 * report errors preferentially.
2886 */
2887 if (state) {
b2e1a3f0 2888 if (pcie_fw & PCIE_FW_ERR)
636f9d37 2889 *state = DEV_STATE_ERR;
b2e1a3f0 2890 else if (pcie_fw & PCIE_FW_INIT)
636f9d37
VP
2891 *state = DEV_STATE_INIT;
2892 }
2893
2894 /*
2895 * If we arrived before a Master PF was selected and
2896 * there's not a valid Master PF, grab its identity
2897 * for our caller.
2898 */
b2e1a3f0
HS
2899 if (master_mbox == PCIE_FW_MASTER_M &&
2900 (pcie_fw & PCIE_FW_MASTER_VLD))
2901 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
636f9d37
VP
2902 break;
2903 }
2904 }
2905
2906 return master_mbox;
56d36be4
DM
2907}
2908
2909/**
2910 * t4_fw_bye - end communication with FW
2911 * @adap: the adapter
2912 * @mbox: mailbox to use for the FW command
2913 *
2914 * Issues a command to terminate communication with FW.
2915 */
2916int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2917{
2918 struct fw_bye_cmd c;
2919
0062b15c 2920 memset(&c, 0, sizeof(c));
56d36be4
DM
2921 INIT_CMD(c, BYE, WRITE);
2922 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2923}
2924
2925/**
2926 * t4_init_cmd - ask FW to initialize the device
2927 * @adap: the adapter
2928 * @mbox: mailbox to use for the FW command
2929 *
2930 * Issues a command to FW to partially initialize the device. This
2931 * performs initialization that generally doesn't depend on user input.
2932 */
2933int t4_early_init(struct adapter *adap, unsigned int mbox)
2934{
2935 struct fw_initialize_cmd c;
2936
0062b15c 2937 memset(&c, 0, sizeof(c));
56d36be4
DM
2938 INIT_CMD(c, INITIALIZE, WRITE);
2939 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2940}
2941
2942/**
2943 * t4_fw_reset - issue a reset to FW
2944 * @adap: the adapter
2945 * @mbox: mailbox to use for the FW command
2946 * @reset: specifies the type of reset to perform
2947 *
2948 * Issues a reset command of the specified type to FW.
2949 */
2950int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2951{
2952 struct fw_reset_cmd c;
2953
0062b15c 2954 memset(&c, 0, sizeof(c));
56d36be4
DM
2955 INIT_CMD(c, RESET, WRITE);
2956 c.val = htonl(reset);
2957 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2958}
2959
26f7cbc0
VP
2960/**
2961 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2962 * @adap: the adapter
2963 * @mbox: mailbox to use for the FW RESET command (if desired)
2964 * @force: force uP into RESET even if FW RESET command fails
2965 *
2966 * Issues a RESET command to firmware (if desired) with a HALT indication
2967 * and then puts the microprocessor into RESET state. The RESET command
2968 * will only be issued if a legitimate mailbox is provided (mbox <=
b2e1a3f0 2969 * PCIE_FW_MASTER_M).
26f7cbc0
VP
2970 *
2971 * This is generally used in order for the host to safely manipulate the
2972 * adapter without fear of conflicting with whatever the firmware might
2973 * be doing. The only way out of this state is to RESTART the firmware
2974 * ...
2975 */
de5b8677 2976static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
26f7cbc0
VP
2977{
2978 int ret = 0;
2979
2980 /*
2981 * If a legitimate mailbox is provided, issue a RESET command
2982 * with a HALT indication.
2983 */
b2e1a3f0 2984 if (mbox <= PCIE_FW_MASTER_M) {
26f7cbc0
VP
2985 struct fw_reset_cmd c;
2986
2987 memset(&c, 0, sizeof(c));
2988 INIT_CMD(c, RESET, WRITE);
2989 c.val = htonl(PIORST | PIORSTMODE);
5167865a 2990 c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
26f7cbc0
VP
2991 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2992 }
2993
2994 /*
2995 * Normally we won't complete the operation if the firmware RESET
2996 * command fails but if our caller insists we'll go ahead and put the
2997 * uP into RESET. This can be useful if the firmware is hung or even
2998 * missing ... We'll have to take the risk of putting the uP into
2999 * RESET without the cooperation of firmware in that case.
3000 *
3001 * We also force the firmware's HALT flag to be on in case we bypassed
3002 * the firmware RESET command above or we're dealing with old firmware
3003 * which doesn't have the HALT capability. This will serve as a flag
3004 * for the incoming firmware to know that it's coming out of a HALT
3005 * rather than a RESET ... if it's new enough to understand that ...
3006 */
3007 if (ret == 0 || force) {
3008 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
b2e1a3f0
HS
3009 t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F,
3010 PCIE_FW_HALT_F);
26f7cbc0
VP
3011 }
3012
3013 /*
3014 * And we always return the result of the firmware RESET command
3015 * even when we force the uP into RESET ...
3016 */
3017 return ret;
3018}
3019
3020/**
3021 * t4_fw_restart - restart the firmware by taking the uP out of RESET
3022 * @adap: the adapter
3023 * @reset: if we want to do a RESET to restart things
3024 *
3025 * Restart firmware previously halted by t4_fw_halt(). On successful
3026 * return the previous PF Master remains as the new PF Master and there
3027 * is no need to issue a new HELLO command, etc.
3028 *
3029 * We do this in two ways:
3030 *
3031 * 1. If we're dealing with newer firmware we'll simply want to take
3032 * the chip's microprocessor out of RESET. This will cause the
3033 * firmware to start up from its start vector. And then we'll loop
3034 * until the firmware indicates it's started again (PCIE_FW.HALT
3035 * reset to 0) or we timeout.
3036 *
3037 * 2. If we're dealing with older firmware then we'll need to RESET
3038 * the chip since older firmware won't recognize the PCIE_FW.HALT
3039 * flag and automatically RESET itself on startup.
3040 */
de5b8677 3041static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
26f7cbc0
VP
3042{
3043 if (reset) {
3044 /*
3045 * Since we're directing the RESET instead of the firmware
3046 * doing it automatically, we need to clear the PCIE_FW.HALT
3047 * bit.
3048 */
b2e1a3f0 3049 t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0);
26f7cbc0
VP
3050
3051 /*
3052 * If we've been given a valid mailbox, first try to get the
3053 * firmware to do the RESET. If that works, great and we can
3054 * return success. Otherwise, if we haven't been given a
3055 * valid mailbox or the RESET command failed, fall back to
3056 * hitting the chip with a hammer.
3057 */
b2e1a3f0 3058 if (mbox <= PCIE_FW_MASTER_M) {
26f7cbc0
VP
3059 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3060 msleep(100);
3061 if (t4_fw_reset(adap, mbox,
3062 PIORST | PIORSTMODE) == 0)
3063 return 0;
3064 }
3065
3066 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
3067 msleep(2000);
3068 } else {
3069 int ms;
3070
3071 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3072 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
b2e1a3f0 3073 if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F))
26f7cbc0
VP
3074 return 0;
3075 msleep(100);
3076 ms += 100;
3077 }
3078 return -ETIMEDOUT;
3079 }
3080 return 0;
3081}
3082
3083/**
3084 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3085 * @adap: the adapter
3086 * @mbox: mailbox to use for the FW RESET command (if desired)
3087 * @fw_data: the firmware image to write
3088 * @size: image size
3089 * @force: force upgrade even if firmware doesn't cooperate
3090 *
3091 * Perform all of the steps necessary for upgrading an adapter's
3092 * firmware image. Normally this requires the cooperation of the
3093 * existing firmware in order to halt all existing activities
3094 * but if an invalid mailbox token is passed in we skip that step
3095 * (though we'll still put the adapter microprocessor into RESET in
3096 * that case).
3097 *
3098 * On successful return the new firmware will have been loaded and
3099 * the adapter will have been fully RESET losing all previous setup
3100 * state. On unsuccessful return the adapter may be completely hosed ...
3101 * positive errno indicates that the adapter is ~probably~ intact, a
3102 * negative errno indicates that things are looking bad ...
3103 */
22c0b963
HS
3104int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3105 const u8 *fw_data, unsigned int size, int force)
26f7cbc0
VP
3106{
3107 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3108 int reset, ret;
3109
79af221d
HS
3110 if (!t4_fw_matches_chip(adap, fw_hdr))
3111 return -EINVAL;
3112
26f7cbc0
VP
3113 ret = t4_fw_halt(adap, mbox, force);
3114 if (ret < 0 && !force)
3115 return ret;
3116
3117 ret = t4_load_fw(adap, fw_data, size);
3118 if (ret < 0)
3119 return ret;
3120
3121 /*
3122 * Older versions of the firmware don't understand the new
3123 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3124 * restart. So for newly loaded older firmware we'll have to do the
3125 * RESET for it so it starts up on a clean slate. We can tell if
3126 * the newly loaded firmware will handle this right by checking
3127 * its header flags to see if it advertises the capability.
3128 */
3129 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3130 return t4_fw_restart(adap, mbox, reset);
3131}
3132
636f9d37
VP
3133/**
3134 * t4_fixup_host_params - fix up host-dependent parameters
3135 * @adap: the adapter
3136 * @page_size: the host's Base Page Size
3137 * @cache_line_size: the host's Cache Line Size
3138 *
3139 * Various registers in T4 contain values which are dependent on the
3140 * host's Base Page and Cache Line Sizes. This function will fix all of
3141 * those registers with the appropriate values as passed in ...
3142 */
3143int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3144 unsigned int cache_line_size)
3145{
3146 unsigned int page_shift = fls(page_size) - 1;
3147 unsigned int sge_hps = page_shift - 10;
3148 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3149 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3150 unsigned int fl_align_log = fls(fl_align) - 1;
3151
f612b815
HS
3152 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
3153 HOSTPAGESIZEPF0_V(sge_hps) |
3154 HOSTPAGESIZEPF1_V(sge_hps) |
3155 HOSTPAGESIZEPF2_V(sge_hps) |
3156 HOSTPAGESIZEPF3_V(sge_hps) |
3157 HOSTPAGESIZEPF4_V(sge_hps) |
3158 HOSTPAGESIZEPF5_V(sge_hps) |
3159 HOSTPAGESIZEPF6_V(sge_hps) |
3160 HOSTPAGESIZEPF7_V(sge_hps));
636f9d37 3161
ce8f407a 3162 if (is_t4(adap->params.chip)) {
f612b815
HS
3163 t4_set_reg_field(adap, SGE_CONTROL_A,
3164 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
3165 EGRSTATUSPAGESIZE_F,
3166 INGPADBOUNDARY_V(fl_align_log -
3167 INGPADBOUNDARY_SHIFT_X) |
3168 EGRSTATUSPAGESIZE_V(stat_len != 64));
ce8f407a
HS
3169 } else {
3170 /* T5 introduced the separation of the Free List Padding and
3171 * Packing Boundaries. Thus, we can select a smaller Padding
3172 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3173 * Bandwidth, and use a Packing Boundary which is large enough
3174 * to avoid false sharing between CPUs, etc.
3175 *
3176 * For the PCI Link, the smaller the Padding Boundary the
3177 * better. For the Memory Controller, a smaller Padding
3178 * Boundary is better until we cross under the Memory Line
3179 * Size (the minimum unit of transfer to/from Memory). If we
3180 * have a Padding Boundary which is smaller than the Memory
3181 * Line Size, that'll involve a Read-Modify-Write cycle on the
3182 * Memory Controller which is never good. For T5 the smallest
3183 * Padding Boundary which we can select is 32 bytes which is
3184 * larger than any known Memory Controller Line Size so we'll
3185 * use that.
3186 *
3187 * T5 has a different interpretation of the "0" value for the
3188 * Packing Boundary. This corresponds to 16 bytes instead of
3189 * the expected 32 bytes. We never have a Packing Boundary
3190 * less than 32 bytes so we can't use that special value but
3191 * on the other hand, if we wanted 32 bytes, the best we can
3192 * really do is 64 bytes.
3193 */
3194 if (fl_align <= 32) {
3195 fl_align = 64;
3196 fl_align_log = 6;
3197 }
f612b815
HS
3198 t4_set_reg_field(adap, SGE_CONTROL_A,
3199 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
3200 EGRSTATUSPAGESIZE_F,
3201 INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) |
3202 EGRSTATUSPAGESIZE_V(stat_len != 64));
ce8f407a
HS
3203 t4_set_reg_field(adap, SGE_CONTROL2_A,
3204 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3205 INGPACKBOUNDARY_V(fl_align_log -
f612b815 3206 INGPACKBOUNDARY_SHIFT_X));
ce8f407a 3207 }
636f9d37
VP
3208 /*
3209 * Adjust various SGE Free List Host Buffer Sizes.
3210 *
3211 * This is something of a crock since we're using fixed indices into
3212 * the array which are also known by the sge.c code and the T4
3213 * Firmware Configuration File. We need to come up with a much better
3214 * approach to managing this array. For now, the first four entries
3215 * are:
3216 *
3217 * 0: Host Page Size
3218 * 1: 64KB
3219 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3220 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3221 *
3222 * For the single-MTU buffers in unpacked mode we need to include
3223 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3224 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3225 * Padding boundry. All of these are accommodated in the Factory
3226 * Default Firmware Configuration File but we need to adjust it for
3227 * this host's cache line size.
3228 */
f612b815
HS
3229 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
3230 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
3231 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
636f9d37 3232 & ~(fl_align-1));
f612b815
HS
3233 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
3234 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
636f9d37
VP
3235 & ~(fl_align-1));
3236
3237 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3238
3239 return 0;
3240}
3241
3242/**
3243 * t4_fw_initialize - ask FW to initialize the device
3244 * @adap: the adapter
3245 * @mbox: mailbox to use for the FW command
3246 *
3247 * Issues a command to FW to partially initialize the device. This
3248 * performs initialization that generally doesn't depend on user input.
3249 */
3250int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3251{
3252 struct fw_initialize_cmd c;
3253
3254 memset(&c, 0, sizeof(c));
3255 INIT_CMD(c, INITIALIZE, WRITE);
3256 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3257}
3258
56d36be4
DM
3259/**
3260 * t4_query_params - query FW or device parameters
3261 * @adap: the adapter
3262 * @mbox: mailbox to use for the FW command
3263 * @pf: the PF
3264 * @vf: the VF
3265 * @nparams: the number of parameters
3266 * @params: the parameter names
3267 * @val: the parameter values
3268 *
3269 * Reads the value of FW or device parameters. Up to 7 parameters can be
3270 * queried at once.
3271 */
3272int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3273 unsigned int vf, unsigned int nparams, const u32 *params,
3274 u32 *val)
3275{
3276 int i, ret;
3277 struct fw_params_cmd c;
3278 __be32 *p = &c.param[0].mnem;
3279
3280 if (nparams > 7)
3281 return -EINVAL;
3282
3283 memset(&c, 0, sizeof(c));
e2ac9628 3284 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
5167865a
HS
3285 FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
3286 FW_PARAMS_CMD_VFN_V(vf));
56d36be4
DM
3287 c.retval_len16 = htonl(FW_LEN16(c));
3288 for (i = 0; i < nparams; i++, p += 2)
3289 *p = htonl(*params++);
3290
3291 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3292 if (ret == 0)
3293 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3294 *val++ = ntohl(*p);
3295 return ret;
3296}
3297
688848b1
AB
3298/**
3299 * t4_set_params_nosleep - sets FW or device parameters
3300 * @adap: the adapter
3301 * @mbox: mailbox to use for the FW command
3302 * @pf: the PF
3303 * @vf: the VF
3304 * @nparams: the number of parameters
3305 * @params: the parameter names
3306 * @val: the parameter values
3307 *
3308 * Does not ever sleep
3309 * Sets the value of FW or device parameters. Up to 7 parameters can be
3310 * specified at once.
3311 */
3312int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3313 unsigned int pf, unsigned int vf,
3314 unsigned int nparams, const u32 *params,
3315 const u32 *val)
3316{
3317 struct fw_params_cmd c;
3318 __be32 *p = &c.param[0].mnem;
3319
3320 if (nparams > 7)
3321 return -EINVAL;
3322
3323 memset(&c, 0, sizeof(c));
e2ac9628
HS
3324 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3325 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5167865a
HS
3326 FW_PARAMS_CMD_PFN_V(pf) |
3327 FW_PARAMS_CMD_VFN_V(vf));
688848b1
AB
3328 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3329
3330 while (nparams--) {
3331 *p++ = cpu_to_be32(*params++);
3332 *p++ = cpu_to_be32(*val++);
3333 }
3334
3335 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3336}
3337
56d36be4
DM
3338/**
3339 * t4_set_params - sets FW or device parameters
3340 * @adap: the adapter
3341 * @mbox: mailbox to use for the FW command
3342 * @pf: the PF
3343 * @vf: the VF
3344 * @nparams: the number of parameters
3345 * @params: the parameter names
3346 * @val: the parameter values
3347 *
3348 * Sets the value of FW or device parameters. Up to 7 parameters can be
3349 * specified at once.
3350 */
3351int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3352 unsigned int vf, unsigned int nparams, const u32 *params,
3353 const u32 *val)
3354{
3355 struct fw_params_cmd c;
3356 __be32 *p = &c.param[0].mnem;
3357
3358 if (nparams > 7)
3359 return -EINVAL;
3360
3361 memset(&c, 0, sizeof(c));
e2ac9628 3362 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
5167865a
HS
3363 FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
3364 FW_PARAMS_CMD_VFN_V(vf));
56d36be4
DM
3365 c.retval_len16 = htonl(FW_LEN16(c));
3366 while (nparams--) {
3367 *p++ = htonl(*params++);
3368 *p++ = htonl(*val++);
3369 }
3370
3371 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3372}
3373
3374/**
3375 * t4_cfg_pfvf - configure PF/VF resource limits
3376 * @adap: the adapter
3377 * @mbox: mailbox to use for the FW command
3378 * @pf: the PF being configured
3379 * @vf: the VF being configured
3380 * @txq: the max number of egress queues
3381 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
3382 * @rxqi: the max number of interrupt-capable ingress queues
3383 * @rxq: the max number of interruptless ingress queues
3384 * @tc: the PCI traffic class
3385 * @vi: the max number of virtual interfaces
3386 * @cmask: the channel access rights mask for the PF/VF
3387 * @pmask: the port access rights mask for the PF/VF
3388 * @nexact: the maximum number of exact MPS filters
3389 * @rcaps: read capabilities
3390 * @wxcaps: write/execute capabilities
3391 *
3392 * Configures resource limits and capabilities for a physical or virtual
3393 * function.
3394 */
3395int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3396 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3397 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3398 unsigned int vi, unsigned int cmask, unsigned int pmask,
3399 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3400{
3401 struct fw_pfvf_cmd c;
3402
3403 memset(&c, 0, sizeof(c));
e2ac9628 3404 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
5167865a
HS
3405 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
3406 FW_PFVF_CMD_VFN_V(vf));
56d36be4 3407 c.retval_len16 = htonl(FW_LEN16(c));
5167865a
HS
3408 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
3409 FW_PFVF_CMD_NIQ_V(rxq));
3410 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
3411 FW_PFVF_CMD_PMASK_V(pmask) |
3412 FW_PFVF_CMD_NEQ_V(txq));
3413 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
3414 FW_PFVF_CMD_NEXACTF_V(nexact));
3415 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
3416 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
3417 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
56d36be4
DM
3418 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3419}
3420
3421/**
3422 * t4_alloc_vi - allocate a virtual interface
3423 * @adap: the adapter
3424 * @mbox: mailbox to use for the FW command
3425 * @port: physical port associated with the VI
3426 * @pf: the PF owning the VI
3427 * @vf: the VF owning the VI
3428 * @nmac: number of MAC addresses needed (1 to 5)
3429 * @mac: the MAC addresses of the VI
3430 * @rss_size: size of RSS table slice associated with this VI
3431 *
3432 * Allocates a virtual interface for the given physical port. If @mac is
3433 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3434 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3435 * stored consecutively so the space needed is @nmac * 6 bytes.
3436 * Returns a negative error number or the non-negative VI id.
3437 */
3438int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3439 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3440 unsigned int *rss_size)
3441{
3442 int ret;
3443 struct fw_vi_cmd c;
3444
3445 memset(&c, 0, sizeof(c));
e2ac9628
HS
3446 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
3447 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2b5fb1f2
HS
3448 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
3449 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
3450 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
56d36be4
DM
3451 c.nmac = nmac - 1;
3452
3453 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3454 if (ret)
3455 return ret;
3456
3457 if (mac) {
3458 memcpy(mac, c.mac, sizeof(c.mac));
3459 switch (nmac) {
3460 case 5:
3461 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3462 case 4:
3463 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3464 case 3:
3465 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3466 case 2:
3467 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3468 }
3469 }
3470 if (rss_size)
2b5fb1f2
HS
3471 *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd));
3472 return FW_VI_CMD_VIID_G(ntohs(c.type_viid));
56d36be4
DM
3473}
3474
56d36be4
DM
3475/**
3476 * t4_set_rxmode - set Rx properties of a virtual interface
3477 * @adap: the adapter
3478 * @mbox: mailbox to use for the FW command
3479 * @viid: the VI id
3480 * @mtu: the new MTU or -1
3481 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3482 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3483 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 3484 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
3485 * @sleep_ok: if true we may sleep while awaiting command completion
3486 *
3487 * Sets Rx properties of a virtual interface.
3488 */
3489int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
3490 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3491 bool sleep_ok)
56d36be4
DM
3492{
3493 struct fw_vi_rxmode_cmd c;
3494
3495 /* convert to FW values */
3496 if (mtu < 0)
3497 mtu = FW_RXMODE_MTU_NO_CHG;
3498 if (promisc < 0)
2b5fb1f2 3499 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
56d36be4 3500 if (all_multi < 0)
2b5fb1f2 3501 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
56d36be4 3502 if (bcast < 0)
2b5fb1f2 3503 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
f8f5aafa 3504 if (vlanex < 0)
2b5fb1f2 3505 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
56d36be4
DM
3506
3507 memset(&c, 0, sizeof(c));
e2ac9628 3508 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2 3509 FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid));
56d36be4 3510 c.retval_len16 = htonl(FW_LEN16(c));
2b5fb1f2
HS
3511 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) |
3512 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
3513 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
3514 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
3515 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
56d36be4
DM
3516 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3517}
3518
3519/**
3520 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3521 * @adap: the adapter
3522 * @mbox: mailbox to use for the FW command
3523 * @viid: the VI id
3524 * @free: if true any existing filters for this VI id are first removed
3525 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3526 * @addr: the MAC address(es)
3527 * @idx: where to store the index of each allocated filter
3528 * @hash: pointer to hash address filter bitmap
3529 * @sleep_ok: call is allowed to sleep
3530 *
3531 * Allocates an exact-match filter for each of the supplied addresses and
3532 * sets it to the corresponding address. If @idx is not %NULL it should
3533 * have at least @naddr entries, each of which will be set to the index of
3534 * the filter allocated for the corresponding MAC address. If a filter
3535 * could not be allocated for an address its index is set to 0xffff.
3536 * If @hash is not %NULL addresses that fail to allocate an exact filter
3537 * are hashed and update the hash filter bitmap pointed at by @hash.
3538 *
3539 * Returns a negative error number or the number of filters allocated.
3540 */
3541int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3542 unsigned int viid, bool free, unsigned int naddr,
3543 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3544{
3545 int i, ret;
3546 struct fw_vi_mac_cmd c;
3547 struct fw_vi_mac_exact *p;
d14807dd 3548 unsigned int max_naddr = is_t4(adap->params.chip) ?
0a57a536
SR
3549 NUM_MPS_CLS_SRAM_L_INSTANCES :
3550 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
56d36be4
DM
3551
3552 if (naddr > 7)
3553 return -EINVAL;
3554
3555 memset(&c, 0, sizeof(c));
e2ac9628
HS
3556 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3557 FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
2b5fb1f2
HS
3558 FW_VI_MAC_CMD_VIID_V(viid));
3559 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) |
e2ac9628 3560 FW_CMD_LEN16_V((naddr + 2) / 2));
56d36be4
DM
3561
3562 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2b5fb1f2
HS
3563 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
3564 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
56d36be4
DM
3565 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3566 }
3567
3568 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3569 if (ret)
3570 return ret;
3571
3572 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2b5fb1f2 3573 u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
56d36be4
DM
3574
3575 if (idx)
0a57a536
SR
3576 idx[i] = index >= max_naddr ? 0xffff : index;
3577 if (index < max_naddr)
56d36be4
DM
3578 ret++;
3579 else if (hash)
ce9aeb58 3580 *hash |= (1ULL << hash_mac_addr(addr[i]));
56d36be4
DM
3581 }
3582 return ret;
3583}
3584
3585/**
3586 * t4_change_mac - modifies the exact-match filter for a MAC address
3587 * @adap: the adapter
3588 * @mbox: mailbox to use for the FW command
3589 * @viid: the VI id
3590 * @idx: index of existing filter for old value of MAC address, or -1
3591 * @addr: the new MAC address value
3592 * @persist: whether a new MAC allocation should be persistent
3593 * @add_smt: if true also add the address to the HW SMT
3594 *
3595 * Modifies an exact-match filter and sets it to the new MAC address.
3596 * Note that in general it is not possible to modify the value of a given
3597 * filter so the generic way to modify an address filter is to free the one
3598 * being used by the old address value and allocate a new filter for the
3599 * new address value. @idx can be -1 if the address is a new addition.
3600 *
3601 * Returns a negative error number or the index of the filter with the new
3602 * MAC value.
3603 */
3604int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3605 int idx, const u8 *addr, bool persist, bool add_smt)
3606{
3607 int ret, mode;
3608 struct fw_vi_mac_cmd c;
3609 struct fw_vi_mac_exact *p = c.u.exact;
d14807dd 3610 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
0a57a536
SR
3611 NUM_MPS_CLS_SRAM_L_INSTANCES :
3612 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
56d36be4
DM
3613
3614 if (idx < 0) /* new allocation */
3615 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3616 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3617
3618 memset(&c, 0, sizeof(c));
e2ac9628 3619 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2 3620 FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid));
e2ac9628 3621 c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
2b5fb1f2
HS
3622 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
3623 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
3624 FW_VI_MAC_CMD_IDX_V(idx));
56d36be4
DM
3625 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3626
3627 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3628 if (ret == 0) {
2b5fb1f2 3629 ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
0a57a536 3630 if (ret >= max_mac_addr)
56d36be4
DM
3631 ret = -ENOMEM;
3632 }
3633 return ret;
3634}
3635
3636/**
3637 * t4_set_addr_hash - program the MAC inexact-match hash filter
3638 * @adap: the adapter
3639 * @mbox: mailbox to use for the FW command
3640 * @viid: the VI id
3641 * @ucast: whether the hash filter should also match unicast addresses
3642 * @vec: the value to be written to the hash filter
3643 * @sleep_ok: call is allowed to sleep
3644 *
3645 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3646 */
3647int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3648 bool ucast, u64 vec, bool sleep_ok)
3649{
3650 struct fw_vi_mac_cmd c;
3651
3652 memset(&c, 0, sizeof(c));
e2ac9628 3653 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
3654 FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid));
3655 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F |
3656 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
e2ac9628 3657 FW_CMD_LEN16_V(1));
56d36be4
DM
3658 c.u.hash.hashvec = cpu_to_be64(vec);
3659 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3660}
3661
688848b1
AB
3662/**
3663 * t4_enable_vi_params - enable/disable a virtual interface
3664 * @adap: the adapter
3665 * @mbox: mailbox to use for the FW command
3666 * @viid: the VI id
3667 * @rx_en: 1=enable Rx, 0=disable Rx
3668 * @tx_en: 1=enable Tx, 0=disable Tx
3669 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3670 *
3671 * Enables/disables a virtual interface. Note that setting DCB Enable
3672 * only makes sense when enabling a Virtual Interface ...
3673 */
3674int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3675 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3676{
3677 struct fw_vi_enable_cmd c;
3678
3679 memset(&c, 0, sizeof(c));
e2ac9628 3680 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2 3681 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
688848b1 3682
2b5fb1f2
HS
3683 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
3684 FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) |
3685 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en));
30f00847 3686 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
688848b1
AB
3687}
3688
56d36be4
DM
3689/**
3690 * t4_enable_vi - enable/disable a virtual interface
3691 * @adap: the adapter
3692 * @mbox: mailbox to use for the FW command
3693 * @viid: the VI id
3694 * @rx_en: 1=enable Rx, 0=disable Rx
3695 * @tx_en: 1=enable Tx, 0=disable Tx
3696 *
3697 * Enables/disables a virtual interface.
3698 */
3699int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3700 bool rx_en, bool tx_en)
3701{
688848b1 3702 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
56d36be4
DM
3703}
3704
3705/**
3706 * t4_identify_port - identify a VI's port by blinking its LED
3707 * @adap: the adapter
3708 * @mbox: mailbox to use for the FW command
3709 * @viid: the VI id
3710 * @nblinks: how many times to blink LED at 2.5 Hz
3711 *
3712 * Identifies a VI's port by blinking its LED.
3713 */
3714int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3715 unsigned int nblinks)
3716{
3717 struct fw_vi_enable_cmd c;
3718
0062b15c 3719 memset(&c, 0, sizeof(c));
e2ac9628 3720 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
3721 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
3722 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
56d36be4
DM
3723 c.blinkdur = htons(nblinks);
3724 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
56d36be4
DM
3725}
3726
3727/**
3728 * t4_iq_free - free an ingress queue and its FLs
3729 * @adap: the adapter
3730 * @mbox: mailbox to use for the FW command
3731 * @pf: the PF owning the queues
3732 * @vf: the VF owning the queues
3733 * @iqtype: the ingress queue type
3734 * @iqid: ingress queue id
3735 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3736 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3737 *
3738 * Frees an ingress queue and its associated FLs, if any.
3739 */
3740int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3741 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3742 unsigned int fl0id, unsigned int fl1id)
3743{
3744 struct fw_iq_cmd c;
3745
3746 memset(&c, 0, sizeof(c));
e2ac9628 3747 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3748 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
3749 FW_IQ_CMD_VFN_V(vf));
3750 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
3751 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
56d36be4
DM
3752 c.iqid = htons(iqid);
3753 c.fl0id = htons(fl0id);
3754 c.fl1id = htons(fl1id);
3755 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3756}
3757
3758/**
3759 * t4_eth_eq_free - free an Ethernet egress queue
3760 * @adap: the adapter
3761 * @mbox: mailbox to use for the FW command
3762 * @pf: the PF owning the queue
3763 * @vf: the VF owning the queue
3764 * @eqid: egress queue id
3765 *
3766 * Frees an Ethernet egress queue.
3767 */
3768int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3769 unsigned int vf, unsigned int eqid)
3770{
3771 struct fw_eq_eth_cmd c;
3772
3773 memset(&c, 0, sizeof(c));
e2ac9628 3774 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3775 FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
3776 FW_EQ_ETH_CMD_VFN_V(vf));
3777 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
3778 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
56d36be4
DM
3779 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3780}
3781
3782/**
3783 * t4_ctrl_eq_free - free a control egress queue
3784 * @adap: the adapter
3785 * @mbox: mailbox to use for the FW command
3786 * @pf: the PF owning the queue
3787 * @vf: the VF owning the queue
3788 * @eqid: egress queue id
3789 *
3790 * Frees a control egress queue.
3791 */
3792int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3793 unsigned int vf, unsigned int eqid)
3794{
3795 struct fw_eq_ctrl_cmd c;
3796
3797 memset(&c, 0, sizeof(c));
e2ac9628 3798 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3799 FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
3800 FW_EQ_CTRL_CMD_VFN_V(vf));
3801 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
3802 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
56d36be4
DM
3803 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3804}
3805
3806/**
3807 * t4_ofld_eq_free - free an offload egress queue
3808 * @adap: the adapter
3809 * @mbox: mailbox to use for the FW command
3810 * @pf: the PF owning the queue
3811 * @vf: the VF owning the queue
3812 * @eqid: egress queue id
3813 *
3814 * Frees a control egress queue.
3815 */
3816int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3817 unsigned int vf, unsigned int eqid)
3818{
3819 struct fw_eq_ofld_cmd c;
3820
3821 memset(&c, 0, sizeof(c));
e2ac9628 3822 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3823 FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
3824 FW_EQ_OFLD_CMD_VFN_V(vf));
3825 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
3826 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
56d36be4
DM
3827 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3828}
3829
3830/**
3831 * t4_handle_fw_rpl - process a FW reply message
3832 * @adap: the adapter
3833 * @rpl: start of the FW message
3834 *
3835 * Processes a FW message, such as link state change messages.
3836 */
3837int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3838{
3839 u8 opcode = *(const u8 *)rpl;
3840
3841 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3842 int speed = 0, fc = 0;
3843 const struct fw_port_cmd *p = (void *)rpl;
2b5fb1f2 3844 int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid));
56d36be4
DM
3845 int port = adap->chan_map[chan];
3846 struct port_info *pi = adap2pinfo(adap, port);
3847 struct link_config *lc = &pi->link_cfg;
3848 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2b5fb1f2
HS
3849 int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
3850 u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
56d36be4 3851
2b5fb1f2 3852 if (stat & FW_PORT_CMD_RXPAUSE_F)
56d36be4 3853 fc |= PAUSE_RX;
2b5fb1f2 3854 if (stat & FW_PORT_CMD_TXPAUSE_F)
56d36be4 3855 fc |= PAUSE_TX;
2b5fb1f2 3856 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
e8b39015 3857 speed = 100;
2b5fb1f2 3858 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
e8b39015 3859 speed = 1000;
2b5fb1f2 3860 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
e8b39015 3861 speed = 10000;
2b5fb1f2 3862 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
e8b39015 3863 speed = 40000;
56d36be4
DM
3864
3865 if (link_ok != lc->link_ok || speed != lc->speed ||
3866 fc != lc->fc) { /* something changed */
3867 lc->link_ok = link_ok;
3868 lc->speed = speed;
3869 lc->fc = fc;
444018a7 3870 lc->supported = be16_to_cpu(p->u.info.pcap);
56d36be4
DM
3871 t4_os_link_changed(adap, port, link_ok);
3872 }
3873 if (mod != pi->mod_type) {
3874 pi->mod_type = mod;
3875 t4_os_portmod_changed(adap, port);
3876 }
3877 }
3878 return 0;
3879}
3880
1dd06ae8 3881static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
56d36be4
DM
3882{
3883 u16 val;
56d36be4 3884
e5c8ae5f
JL
3885 if (pci_is_pcie(adapter->pdev)) {
3886 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
56d36be4
DM
3887 p->speed = val & PCI_EXP_LNKSTA_CLS;
3888 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3889 }
3890}
3891
3892/**
3893 * init_link_config - initialize a link's SW state
3894 * @lc: structure holding the link state
3895 * @caps: link capabilities
3896 *
3897 * Initializes the SW state maintained for each link, including the link's
3898 * capabilities and default speed/flow-control/autonegotiation settings.
3899 */
1dd06ae8 3900static void init_link_config(struct link_config *lc, unsigned int caps)
56d36be4
DM
3901{
3902 lc->supported = caps;
3903 lc->requested_speed = 0;
3904 lc->speed = 0;
3905 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3906 if (lc->supported & FW_PORT_CAP_ANEG) {
3907 lc->advertising = lc->supported & ADVERT_MASK;
3908 lc->autoneg = AUTONEG_ENABLE;
3909 lc->requested_fc |= PAUSE_AUTONEG;
3910 } else {
3911 lc->advertising = 0;
3912 lc->autoneg = AUTONEG_DISABLE;
3913 }
3914}
3915
8203b509
HS
3916#define CIM_PF_NOACCESS 0xeeeeeeee
3917
3918int t4_wait_dev_ready(void __iomem *regs)
56d36be4 3919{
8203b509
HS
3920 u32 whoami;
3921
3922 whoami = readl(regs + PL_WHOAMI);
3923 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
56d36be4 3924 return 0;
8203b509 3925
56d36be4 3926 msleep(500);
8203b509
HS
3927 whoami = readl(regs + PL_WHOAMI);
3928 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
56d36be4
DM
3929}
3930
fe2ee139
HS
3931struct flash_desc {
3932 u32 vendor_and_model_id;
3933 u32 size_mb;
3934};
3935
91744948 3936static int get_flash_params(struct adapter *adap)
900a6596 3937{
fe2ee139
HS
3938 /* Table for non-Numonix supported flash parts. Numonix parts are left
3939 * to the preexisting code. All flash parts have 64KB sectors.
3940 */
3941 static struct flash_desc supported_flash[] = {
3942 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
3943 };
3944
900a6596
DM
3945 int ret;
3946 u32 info;
3947
3948 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3949 if (!ret)
3950 ret = sf1_read(adap, 3, 0, 1, &info);
3951 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3952 if (ret)
3953 return ret;
3954
fe2ee139
HS
3955 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
3956 if (supported_flash[ret].vendor_and_model_id == info) {
3957 adap->params.sf_size = supported_flash[ret].size_mb;
3958 adap->params.sf_nsec =
3959 adap->params.sf_size / SF_SEC_SIZE;
3960 return 0;
3961 }
3962
900a6596
DM
3963 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3964 return -EINVAL;
3965 info >>= 16; /* log2 of size */
3966 if (info >= 0x14 && info < 0x18)
3967 adap->params.sf_nsec = 1 << (info - 16);
3968 else if (info == 0x18)
3969 adap->params.sf_nsec = 64;
3970 else
3971 return -EINVAL;
3972 adap->params.sf_size = 1 << info;
3973 adap->params.sf_fw_start =
3974 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
c290607e
HS
3975
3976 if (adap->params.sf_size < FLASH_MIN_SIZE)
3977 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
3978 adap->params.sf_size, FLASH_MIN_SIZE);
900a6596
DM
3979 return 0;
3980}
3981
56d36be4
DM
3982/**
3983 * t4_prep_adapter - prepare SW and HW for operation
3984 * @adapter: the adapter
3985 * @reset: if true perform a HW reset
3986 *
3987 * Initialize adapter SW state for the various HW modules, set initial
3988 * values for some adapter tunables, take PHYs out of reset, and
3989 * initialize the MDIO interface.
3990 */
91744948 3991int t4_prep_adapter(struct adapter *adapter)
56d36be4 3992{
0a57a536
SR
3993 int ret, ver;
3994 uint16_t device_id;
d14807dd 3995 u32 pl_rev;
56d36be4 3996
56d36be4 3997 get_pci_mode(adapter, &adapter->params.pci);
d14807dd 3998 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
56d36be4 3999
900a6596
DM
4000 ret = get_flash_params(adapter);
4001 if (ret < 0) {
4002 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
4003 return ret;
4004 }
4005
0a57a536
SR
4006 /* Retrieve adapter's device ID
4007 */
4008 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
4009 ver = device_id >> 12;
d14807dd 4010 adapter->params.chip = 0;
0a57a536
SR
4011 switch (ver) {
4012 case CHELSIO_T4:
d14807dd 4013 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
0a57a536
SR
4014 break;
4015 case CHELSIO_T5:
d14807dd 4016 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
0a57a536
SR
4017 break;
4018 default:
4019 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4020 device_id);
4021 return -EINVAL;
4022 }
4023
56d36be4
DM
4024 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4025
4026 /*
4027 * Default port for debugging in case we can't reach FW.
4028 */
4029 adapter->params.nports = 1;
4030 adapter->params.portvec = 1;
636f9d37 4031 adapter->params.vpd.cclk = 50000;
56d36be4
DM
4032 return 0;
4033}
4034
e85c9a7a 4035/**
dd0bcc0b 4036 * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
e85c9a7a
HS
4037 * @adapter: the adapter
4038 * @qid: the Queue ID
4039 * @qtype: the Ingress or Egress type for @qid
4040 * @pbar2_qoffset: BAR2 Queue Offset
4041 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4042 *
4043 * Returns the BAR2 SGE Queue Registers information associated with the
4044 * indicated Absolute Queue ID. These are passed back in return value
4045 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
4046 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
4047 *
4048 * This may return an error which indicates that BAR2 SGE Queue
4049 * registers aren't available. If an error is not returned, then the
4050 * following values are returned:
4051 *
4052 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
4053 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
4054 *
4055 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
4056 * require the "Inferred Queue ID" ability may be used. E.g. the
4057 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
4058 * then these "Inferred Queue ID" register may not be used.
4059 */
dd0bcc0b 4060int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
e85c9a7a
HS
4061 unsigned int qid,
4062 enum t4_bar2_qtype qtype,
4063 u64 *pbar2_qoffset,
4064 unsigned int *pbar2_qid)
4065{
4066 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
4067 u64 bar2_page_offset, bar2_qoffset;
4068 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
4069
4070 /* T4 doesn't support BAR2 SGE Queue registers.
4071 */
4072 if (is_t4(adapter->params.chip))
4073 return -EINVAL;
4074
4075 /* Get our SGE Page Size parameters.
4076 */
4077 page_shift = adapter->params.sge.hps + 10;
4078 page_size = 1 << page_shift;
4079
4080 /* Get the right Queues per Page parameters for our Queue.
4081 */
4082 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
4083 ? adapter->params.sge.eq_qpp
4084 : adapter->params.sge.iq_qpp);
4085 qpp_mask = (1 << qpp_shift) - 1;
4086
4087 /* Calculate the basics of the BAR2 SGE Queue register area:
4088 * o The BAR2 page the Queue registers will be in.
4089 * o The BAR2 Queue ID.
4090 * o The BAR2 Queue ID Offset into the BAR2 page.
4091 */
4092 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
4093 bar2_qid = qid & qpp_mask;
4094 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
4095
4096 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
4097 * hardware will infer the Absolute Queue ID simply from the writes to
4098 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
4099 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
4100 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
4101 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
4102 * from the BAR2 Page and BAR2 Queue ID.
4103 *
4104 * One important censequence of this is that some BAR2 SGE registers
4105 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
4106 * there. But other registers synthesize the SGE Queue ID purely
4107 * from the writes to the registers -- the Write Combined Doorbell
4108 * Buffer is a good example. These BAR2 SGE Registers are only
4109 * available for those BAR2 SGE Register areas where the SGE Absolute
4110 * Queue ID can be inferred from simple writes.
4111 */
4112 bar2_qoffset = bar2_page_offset;
4113 bar2_qinferred = (bar2_qid_offset < page_size);
4114 if (bar2_qinferred) {
4115 bar2_qoffset += bar2_qid_offset;
4116 bar2_qid = 0;
4117 }
4118
4119 *pbar2_qoffset = bar2_qoffset;
4120 *pbar2_qid = bar2_qid;
4121 return 0;
4122}
4123
4124/**
4125 * t4_init_sge_params - initialize adap->params.sge
4126 * @adapter: the adapter
4127 *
4128 * Initialize various fields of the adapter's SGE Parameters structure.
4129 */
4130int t4_init_sge_params(struct adapter *adapter)
4131{
4132 struct sge_params *sge_params = &adapter->params.sge;
4133 u32 hps, qpp;
4134 unsigned int s_hps, s_qpp;
4135
4136 /* Extract the SGE Page Size for our PF.
4137 */
f612b815 4138 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
e85c9a7a
HS
4139 s_hps = (HOSTPAGESIZEPF0_S +
4140 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
4141 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
4142
4143 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
4144 */
4145 s_qpp = (QUEUESPERPAGEPF0_S +
4146 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
f612b815
HS
4147 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
4148 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
e85c9a7a 4149 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF);
f612b815 4150 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
e85c9a7a
HS
4151
4152 return 0;
4153}
4154
dcf7b6f5
KS
4155/**
4156 * t4_init_tp_params - initialize adap->params.tp
4157 * @adap: the adapter
4158 *
4159 * Initialize various fields of the adapter's TP Parameters structure.
4160 */
4161int t4_init_tp_params(struct adapter *adap)
4162{
4163 int chan;
4164 u32 v;
4165
4166 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
4167 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
4168 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
4169
4170 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4171 for (chan = 0; chan < NCHAN; chan++)
4172 adap->params.tp.tx_modq[chan] = chan;
4173
4174 /* Cache the adapter's Compressed Filter Mode and global Incress
4175 * Configuration.
4176 */
4177 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4178 &adap->params.tp.vlan_pri_map, 1,
4179 TP_VLAN_PRI_MAP);
4180 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4181 &adap->params.tp.ingress_config, 1,
4182 TP_INGRESS_CONFIG);
4183
4184 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
4185 * shift positions of several elements of the Compressed Filter Tuple
4186 * for this adapter which we need frequently ...
4187 */
4188 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
4189 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
4190 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
4191 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
4192 F_PROTOCOL);
4193
4194 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4195 * represents the presense of an Outer VLAN instead of a VNIC ID.
4196 */
4197 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
4198 adap->params.tp.vnic_shift = -1;
4199
4200 return 0;
4201}
4202
4203/**
4204 * t4_filter_field_shift - calculate filter field shift
4205 * @adap: the adapter
4206 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
4207 *
4208 * Return the shift position of a filter field within the Compressed
4209 * Filter Tuple. The filter field is specified via its selection bit
4210 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
4211 */
4212int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
4213{
4214 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
4215 unsigned int sel;
4216 int field_shift;
4217
4218 if ((filter_mode & filter_sel) == 0)
4219 return -1;
4220
4221 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4222 switch (filter_mode & sel) {
4223 case F_FCOE:
4224 field_shift += W_FT_FCOE;
4225 break;
4226 case F_PORT:
4227 field_shift += W_FT_PORT;
4228 break;
4229 case F_VNIC_ID:
4230 field_shift += W_FT_VNIC_ID;
4231 break;
4232 case F_VLAN:
4233 field_shift += W_FT_VLAN;
4234 break;
4235 case F_TOS:
4236 field_shift += W_FT_TOS;
4237 break;
4238 case F_PROTOCOL:
4239 field_shift += W_FT_PROTOCOL;
4240 break;
4241 case F_ETHERTYPE:
4242 field_shift += W_FT_ETHERTYPE;
4243 break;
4244 case F_MACMATCH:
4245 field_shift += W_FT_MACMATCH;
4246 break;
4247 case F_MPSHITTYPE:
4248 field_shift += W_FT_MPSHITTYPE;
4249 break;
4250 case F_FRAGMENTATION:
4251 field_shift += W_FT_FRAGMENTATION;
4252 break;
4253 }
4254 }
4255 return field_shift;
4256}
4257
91744948 4258int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
56d36be4
DM
4259{
4260 u8 addr[6];
4261 int ret, i, j = 0;
4262 struct fw_port_cmd c;
f796564a 4263 struct fw_rss_vi_config_cmd rvc;
56d36be4
DM
4264
4265 memset(&c, 0, sizeof(c));
f796564a 4266 memset(&rvc, 0, sizeof(rvc));
56d36be4
DM
4267
4268 for_each_port(adap, i) {
4269 unsigned int rss_size;
4270 struct port_info *p = adap2pinfo(adap, i);
4271
4272 while ((adap->params.portvec & (1 << j)) == 0)
4273 j++;
4274
e2ac9628
HS
4275 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
4276 FW_CMD_REQUEST_F | FW_CMD_READ_F |
2b5fb1f2 4277 FW_PORT_CMD_PORTID_V(j));
56d36be4 4278 c.action_to_len16 = htonl(
2b5fb1f2 4279 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
56d36be4
DM
4280 FW_LEN16(c));
4281 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4282 if (ret)
4283 return ret;
4284
4285 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4286 if (ret < 0)
4287 return ret;
4288
4289 p->viid = ret;
4290 p->tx_chan = j;
4291 p->lport = j;
4292 p->rss_size = rss_size;
4293 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
40c9f8ab 4294 adap->port[i]->dev_port = j;
56d36be4
DM
4295
4296 ret = ntohl(c.u.info.lstatus_to_modtype);
2b5fb1f2
HS
4297 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
4298 FW_PORT_CMD_MDIOADDR_G(ret) : -1;
4299 p->port_type = FW_PORT_CMD_PTYPE_G(ret);
a0881cab 4300 p->mod_type = FW_PORT_MOD_TYPE_NA;
56d36be4 4301
e2ac9628
HS
4302 rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
4303 FW_CMD_REQUEST_F | FW_CMD_READ_F |
f796564a
DM
4304 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4305 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4306 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4307 if (ret)
4308 return ret;
4309 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4310
56d36be4
DM
4311 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4312 j++;
4313 }
4314 return 0;
4315}