be2net: support TX batching using skb->xmit_more flag
[linux-block.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
ce100b8b 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
56d36be4
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
56d36be4
DM
35#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
38#include "t4fw_api.h"
39
40/**
41 * t4_wait_op_done_val - wait until an operation is completed
42 * @adapter: the adapter performing the operation
43 * @reg: the register to check for completion
44 * @mask: a single-bit field within @reg that indicates completion
45 * @polarity: the value of the field when the operation is completed
46 * @attempts: number of check iterations
47 * @delay: delay in usecs between iterations
48 * @valp: where to store the value of the register at completion time
49 *
50 * Wait until an operation is completed by checking a bit in a register
51 * up to @attempts times. If @valp is not NULL the value of the register
52 * at the time it indicated completion is stored there. Returns 0 if the
53 * operation completes and -EAGAIN otherwise.
54 */
de498c89
RD
55static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
57{
58 while (1) {
59 u32 val = t4_read_reg(adapter, reg);
60
61 if (!!(val & mask) == polarity) {
62 if (valp)
63 *valp = val;
64 return 0;
65 }
66 if (--attempts == 0)
67 return -EAGAIN;
68 if (delay)
69 udelay(delay);
70 }
71}
72
73static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
74 int polarity, int attempts, int delay)
75{
76 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
77 delay, NULL);
78}
79
80/**
81 * t4_set_reg_field - set a register field to a value
82 * @adapter: the adapter to program
83 * @addr: the register address
84 * @mask: specifies the portion of the register to modify
85 * @val: the new value for the register field
86 *
87 * Sets a register field specified by the supplied mask to the
88 * given value.
89 */
90void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
91 u32 val)
92{
93 u32 v = t4_read_reg(adapter, addr) & ~mask;
94
95 t4_write_reg(adapter, addr, v | val);
96 (void) t4_read_reg(adapter, addr); /* flush */
97}
98
99/**
100 * t4_read_indirect - read indirectly addressed registers
101 * @adap: the adapter
102 * @addr_reg: register holding the indirect address
103 * @data_reg: register holding the value of the indirect register
104 * @vals: where the read register values are stored
105 * @nregs: how many indirect registers to read
106 * @start_idx: index of first indirect register to read
107 *
108 * Reads registers that are accessed indirectly through an address/data
109 * register pair.
110 */
f2b7e78d 111void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
de498c89
RD
112 unsigned int data_reg, u32 *vals,
113 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
114{
115 while (nregs--) {
116 t4_write_reg(adap, addr_reg, start_idx);
117 *vals++ = t4_read_reg(adap, data_reg);
118 start_idx++;
119 }
120}
121
13ee15d3
VP
122/**
123 * t4_write_indirect - write indirectly addressed registers
124 * @adap: the adapter
125 * @addr_reg: register holding the indirect addresses
126 * @data_reg: register holding the value for the indirect registers
127 * @vals: values to write
128 * @nregs: how many indirect registers to write
129 * @start_idx: address of first indirect register to write
130 *
131 * Writes a sequential block of registers that are accessed indirectly
132 * through an address/data register pair.
133 */
134void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
135 unsigned int data_reg, const u32 *vals,
136 unsigned int nregs, unsigned int start_idx)
137{
138 while (nregs--) {
139 t4_write_reg(adap, addr_reg, start_idx++);
140 t4_write_reg(adap, data_reg, *vals++);
141 }
142}
143
0abfd152
HS
144/*
145 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
146 * mechanism. This guarantees that we get the real value even if we're
147 * operating within a Virtual Machine and the Hypervisor is trapping our
148 * Configuration Space accesses.
149 */
150void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
151{
152 u32 req = ENABLE | FUNCTION(adap->fn) | reg;
153
154 if (is_t4(adap->params.chip))
155 req |= F_LOCALCFG;
156
157 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
158 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
159
160 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
161 * Configuration Space read. (None of the other fields matter when
162 * ENABLE is 0 so a simple register write is easier than a
163 * read-modify-write via t4_set_reg_field().)
164 */
165 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
166}
167
31d55c2d
HS
168/*
169 * t4_report_fw_error - report firmware error
170 * @adap: the adapter
171 *
172 * The adapter firmware can indicate error conditions to the host.
173 * If the firmware has indicated an error, print out the reason for
174 * the firmware error.
175 */
176static void t4_report_fw_error(struct adapter *adap)
177{
178 static const char *const reason[] = {
179 "Crash", /* PCIE_FW_EVAL_CRASH */
180 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
181 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
182 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
183 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
184 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
185 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
186 "Reserved", /* reserved */
187 };
188 u32 pcie_fw;
189
190 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
b2e1a3f0 191 if (pcie_fw & PCIE_FW_ERR)
31d55c2d 192 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
b2e1a3f0 193 reason[PCIE_FW_EVAL_G(pcie_fw)]);
31d55c2d
HS
194}
195
56d36be4
DM
196/*
197 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
198 */
199static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
200 u32 mbox_addr)
201{
202 for ( ; nflit; nflit--, mbox_addr += 8)
203 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
204}
205
206/*
207 * Handle a FW assertion reported in a mailbox.
208 */
209static void fw_asrt(struct adapter *adap, u32 mbox_addr)
210{
211 struct fw_debug_cmd asrt;
212
213 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
214 dev_alert(adap->pdev_dev,
215 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
216 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
217 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
218}
219
220static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
221{
222 dev_err(adap->pdev_dev,
223 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
224 (unsigned long long)t4_read_reg64(adap, data_reg),
225 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
226 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
227 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
228 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
229 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
230 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
231 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
232}
233
234/**
235 * t4_wr_mbox_meat - send a command to FW through the given mailbox
236 * @adap: the adapter
237 * @mbox: index of the mailbox to use
238 * @cmd: the command to write
239 * @size: command length in bytes
240 * @rpl: where to optionally store the reply
241 * @sleep_ok: if true we may sleep while awaiting command completion
242 *
243 * Sends the given command to FW through the selected mailbox and waits
244 * for the FW to execute the command. If @rpl is not %NULL it is used to
245 * store the FW's reply to the command. The command and its optional
246 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
247 * to respond. @sleep_ok determines whether we may sleep while awaiting
248 * the response. If sleeping is allowed we use progressive backoff
249 * otherwise we spin.
250 *
251 * The return value is 0 on success or a negative errno on failure. A
252 * failure can happen either because we are not able to execute the
253 * command or FW executes it but signals an error. In the latter case
254 * the return value is the error code indicated by FW (negated).
255 */
256int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
257 void *rpl, bool sleep_ok)
258{
005b5717 259 static const int delay[] = {
56d36be4
DM
260 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
261 };
262
263 u32 v;
264 u64 res;
265 int i, ms, delay_idx;
266 const __be64 *p = cmd;
267 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
268 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
269
270 if ((size & 15) || size > MBOX_LEN)
271 return -EINVAL;
272
204dc3c0
DM
273 /*
274 * If the device is off-line, as in EEH, commands will time out.
275 * Fail them early so we don't waste time waiting.
276 */
277 if (adap->pdev->error_state != pci_channel_io_normal)
278 return -EIO;
279
56d36be4
DM
280 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
281 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
282 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
283
284 if (v != MBOX_OWNER_DRV)
285 return v ? -EBUSY : -ETIMEDOUT;
286
287 for (i = 0; i < size; i += 8)
288 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
289
290 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
291 t4_read_reg(adap, ctl_reg); /* flush write */
292
293 delay_idx = 0;
294 ms = delay[0];
295
296 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
297 if (sleep_ok) {
298 ms = delay[delay_idx]; /* last element may repeat */
299 if (delay_idx < ARRAY_SIZE(delay) - 1)
300 delay_idx++;
301 msleep(ms);
302 } else
303 mdelay(ms);
304
305 v = t4_read_reg(adap, ctl_reg);
306 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
307 if (!(v & MBMSGVALID)) {
308 t4_write_reg(adap, ctl_reg, 0);
309 continue;
310 }
311
312 res = t4_read_reg64(adap, data_reg);
e2ac9628 313 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
56d36be4 314 fw_asrt(adap, data_reg);
e2ac9628
HS
315 res = FW_CMD_RETVAL_V(EIO);
316 } else if (rpl) {
56d36be4 317 get_mbox_rpl(adap, rpl, size / 8, data_reg);
e2ac9628 318 }
56d36be4 319
e2ac9628 320 if (FW_CMD_RETVAL_G((int)res))
56d36be4
DM
321 dump_mbox(adap, mbox, data_reg);
322 t4_write_reg(adap, ctl_reg, 0);
e2ac9628 323 return -FW_CMD_RETVAL_G((int)res);
56d36be4
DM
324 }
325 }
326
327 dump_mbox(adap, mbox, data_reg);
328 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
329 *(const u8 *)cmd, mbox);
31d55c2d 330 t4_report_fw_error(adap);
56d36be4
DM
331 return -ETIMEDOUT;
332}
333
334/**
335 * t4_mc_read - read from MC through backdoor accesses
336 * @adap: the adapter
337 * @addr: address of first byte requested
19dd37ba 338 * @idx: which MC to access
56d36be4
DM
339 * @data: 64 bytes of data containing the requested address
340 * @ecc: where to store the corresponding 64-bit ECC word
341 *
342 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
343 * that covers the requested address @addr. If @parity is not %NULL it
344 * is assigned the 64-bit ECC word for the read data.
345 */
19dd37ba 346int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
56d36be4
DM
347{
348 int i;
19dd37ba
SR
349 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
350 u32 mc_bist_status_rdata, mc_bist_data_pattern;
56d36be4 351
d14807dd 352 if (is_t4(adap->params.chip)) {
19dd37ba
SR
353 mc_bist_cmd = MC_BIST_CMD;
354 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
355 mc_bist_cmd_len = MC_BIST_CMD_LEN;
356 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
357 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
358 } else {
359 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
360 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
361 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
362 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
363 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
364 }
365
366 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
56d36be4 367 return -EBUSY;
19dd37ba
SR
368 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
369 t4_write_reg(adap, mc_bist_cmd_len, 64);
370 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
371 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
56d36be4 372 BIST_CMD_GAP(1));
19dd37ba 373 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
56d36be4
DM
374 if (i)
375 return i;
376
19dd37ba 377#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
56d36be4
DM
378
379 for (i = 15; i >= 0; i--)
380 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
381 if (ecc)
382 *ecc = t4_read_reg64(adap, MC_DATA(16));
383#undef MC_DATA
384 return 0;
385}
386
387/**
388 * t4_edc_read - read from EDC through backdoor accesses
389 * @adap: the adapter
390 * @idx: which EDC to access
391 * @addr: address of first byte requested
392 * @data: 64 bytes of data containing the requested address
393 * @ecc: where to store the corresponding 64-bit ECC word
394 *
395 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
396 * that covers the requested address @addr. If @parity is not %NULL it
397 * is assigned the 64-bit ECC word for the read data.
398 */
399int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
400{
401 int i;
19dd37ba
SR
402 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
403 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
56d36be4 404
d14807dd 405 if (is_t4(adap->params.chip)) {
19dd37ba
SR
406 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
407 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
408 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
409 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
410 idx);
411 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
412 idx);
413 } else {
414 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
415 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
416 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
417 edc_bist_cmd_data_pattern =
418 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
419 edc_bist_status_rdata =
420 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
421 }
422
423 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
56d36be4 424 return -EBUSY;
19dd37ba
SR
425 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
426 t4_write_reg(adap, edc_bist_cmd_len, 64);
427 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
428 t4_write_reg(adap, edc_bist_cmd,
56d36be4 429 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
19dd37ba 430 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
56d36be4
DM
431 if (i)
432 return i;
433
19dd37ba 434#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
56d36be4
DM
435
436 for (i = 15; i >= 0; i--)
437 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
438 if (ecc)
439 *ecc = t4_read_reg64(adap, EDC_DATA(16));
440#undef EDC_DATA
441 return 0;
442}
443
5afc8b84
VP
444/**
445 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
446 * @adap: the adapter
fc5ab020 447 * @win: PCI-E Memory Window to use
5afc8b84
VP
448 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
449 * @addr: address within indicated memory type
450 * @len: amount of memory to transfer
451 * @buf: host memory buffer
fc5ab020 452 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5afc8b84
VP
453 *
454 * Reads/writes an [almost] arbitrary memory region in the firmware: the
fc5ab020
HS
455 * firmware memory address and host buffer must be aligned on 32-bit
456 * boudaries; the length may be arbitrary. The memory is transferred as
457 * a raw byte sequence from/to the firmware's memory. If this memory
458 * contains data structures which contain multi-byte integers, it's the
459 * caller's responsibility to perform appropriate byte order conversions.
5afc8b84 460 */
fc5ab020
HS
461int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
462 u32 len, __be32 *buf, int dir)
5afc8b84 463{
fc5ab020
HS
464 u32 pos, offset, resid, memoffset;
465 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
5afc8b84 466
fc5ab020 467 /* Argument sanity checks ...
5afc8b84 468 */
fc5ab020 469 if (addr & 0x3)
5afc8b84
VP
470 return -EINVAL;
471
fc5ab020
HS
472 /* It's convenient to be able to handle lengths which aren't a
473 * multiple of 32-bits because we often end up transferring files to
474 * the firmware. So we'll handle that by normalizing the length here
475 * and then handling any residual transfer at the end.
476 */
477 resid = len & 0x3;
478 len -= resid;
8c357ebd 479
19dd37ba 480 /* Offset into the region of memory which is being accessed
5afc8b84
VP
481 * MEM_EDC0 = 0
482 * MEM_EDC1 = 1
19dd37ba
SR
483 * MEM_MC = 2 -- T4
484 * MEM_MC0 = 2 -- For T5
485 * MEM_MC1 = 3 -- For T5
5afc8b84 486 */
6559a7e8 487 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
19dd37ba
SR
488 if (mtype != MEM_MC1)
489 memoffset = (mtype * (edc_size * 1024 * 1024));
490 else {
6559a7e8
HS
491 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
492 MA_EXT_MEMORY1_BAR_A));
19dd37ba
SR
493 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
494 }
5afc8b84
VP
495
496 /* Determine the PCIE_MEM_ACCESS_OFFSET */
497 addr = addr + memoffset;
498
fc5ab020
HS
499 /* Each PCI-E Memory Window is programmed with a window size -- or
500 * "aperture" -- which controls the granularity of its mapping onto
501 * adapter memory. We need to grab that aperture in order to know
502 * how to use the specified window. The window is also programmed
503 * with the base address of the Memory Window in BAR0's address
504 * space. For T4 this is an absolute PCI-E Bus Address. For T5
505 * the address is relative to BAR0.
5afc8b84 506 */
fc5ab020
HS
507 mem_reg = t4_read_reg(adap,
508 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
509 win));
510 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
511 mem_base = GET_PCIEOFST(mem_reg) << 10;
512 if (is_t4(adap->params.chip))
513 mem_base -= adap->t4_bar0;
514 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
5afc8b84 515
fc5ab020
HS
516 /* Calculate our initial PCI-E Memory Window Position and Offset into
517 * that Window.
518 */
519 pos = addr & ~(mem_aperture-1);
520 offset = addr - pos;
5afc8b84 521
fc5ab020
HS
522 /* Set up initial PCI-E Memory Window to cover the start of our
523 * transfer. (Read it back to ensure that changes propagate before we
524 * attempt to use the new value.)
525 */
526 t4_write_reg(adap,
527 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
528 pos | win_pf);
529 t4_read_reg(adap,
530 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
531
532 /* Transfer data to/from the adapter as long as there's an integral
533 * number of 32-bit transfers to complete.
534 */
535 while (len > 0) {
536 if (dir == T4_MEMORY_READ)
537 *buf++ = (__force __be32) t4_read_reg(adap,
538 mem_base + offset);
539 else
540 t4_write_reg(adap, mem_base + offset,
541 (__force u32) *buf++);
542 offset += sizeof(__be32);
543 len -= sizeof(__be32);
544
545 /* If we've reached the end of our current window aperture,
546 * move the PCI-E Memory Window on to the next. Note that
547 * doing this here after "len" may be 0 allows us to set up
548 * the PCI-E Memory Window for a possible final residual
549 * transfer below ...
5afc8b84 550 */
fc5ab020
HS
551 if (offset == mem_aperture) {
552 pos += mem_aperture;
553 offset = 0;
554 t4_write_reg(adap,
555 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
556 win), pos | win_pf);
557 t4_read_reg(adap,
558 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
559 win));
5afc8b84 560 }
5afc8b84
VP
561 }
562
fc5ab020
HS
563 /* If the original transfer had a length which wasn't a multiple of
564 * 32-bits, now's where we need to finish off the transfer of the
565 * residual amount. The PCI-E Memory Window has already been moved
566 * above (if necessary) to cover this final transfer.
567 */
568 if (resid) {
569 union {
570 __be32 word;
571 char byte[4];
572 } last;
573 unsigned char *bp;
574 int i;
575
c81576c2 576 if (dir == T4_MEMORY_READ) {
fc5ab020
HS
577 last.word = (__force __be32) t4_read_reg(adap,
578 mem_base + offset);
579 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
580 bp[i] = last.byte[i];
581 } else {
582 last.word = *buf;
583 for (i = resid; i < 4; i++)
584 last.byte[i] = 0;
585 t4_write_reg(adap, mem_base + offset,
586 (__force u32) last.word);
587 }
588 }
5afc8b84 589
fc5ab020 590 return 0;
5afc8b84
VP
591}
592
56d36be4 593#define EEPROM_STAT_ADDR 0x7bfc
47ce9c48
SR
594#define VPD_BASE 0x400
595#define VPD_BASE_OLD 0
0a57a536 596#define VPD_LEN 1024
63a92fe6 597#define CHELSIO_VPD_UNIQUE_ID 0x82
56d36be4
DM
598
599/**
600 * t4_seeprom_wp - enable/disable EEPROM write protection
601 * @adapter: the adapter
602 * @enable: whether to enable or disable write protection
603 *
604 * Enables or disables write protection on the serial EEPROM.
605 */
606int t4_seeprom_wp(struct adapter *adapter, bool enable)
607{
608 unsigned int v = enable ? 0xc : 0;
609 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
610 return ret < 0 ? ret : 0;
611}
612
613/**
614 * get_vpd_params - read VPD parameters from VPD EEPROM
615 * @adapter: adapter to read
616 * @p: where to store the parameters
617 *
618 * Reads card parameters stored in VPD EEPROM.
619 */
636f9d37 620int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
56d36be4 621{
636f9d37 622 u32 cclk_param, cclk_val;
47ce9c48 623 int i, ret, addr;
a94cd705 624 int ec, sn, pn;
8c357ebd 625 u8 *vpd, csum;
23d88e1d 626 unsigned int vpdr_len, kw_offset, id_len;
56d36be4 627
8c357ebd
VP
628 vpd = vmalloc(VPD_LEN);
629 if (!vpd)
630 return -ENOMEM;
631
47ce9c48
SR
632 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
633 if (ret < 0)
634 goto out;
63a92fe6
HS
635
636 /* The VPD shall have a unique identifier specified by the PCI SIG.
637 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
638 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
639 * is expected to automatically put this entry at the
640 * beginning of the VPD.
641 */
642 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
47ce9c48
SR
643
644 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
56d36be4 645 if (ret < 0)
8c357ebd 646 goto out;
56d36be4 647
23d88e1d
DM
648 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
649 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
8c357ebd
VP
650 ret = -EINVAL;
651 goto out;
23d88e1d
DM
652 }
653
654 id_len = pci_vpd_lrdt_size(vpd);
655 if (id_len > ID_LEN)
656 id_len = ID_LEN;
657
658 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
659 if (i < 0) {
660 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
8c357ebd
VP
661 ret = -EINVAL;
662 goto out;
23d88e1d
DM
663 }
664
665 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
666 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
667 if (vpdr_len + kw_offset > VPD_LEN) {
226ec5fd 668 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
8c357ebd
VP
669 ret = -EINVAL;
670 goto out;
226ec5fd
DM
671 }
672
673#define FIND_VPD_KW(var, name) do { \
23d88e1d 674 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
226ec5fd
DM
675 if (var < 0) { \
676 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
8c357ebd
VP
677 ret = -EINVAL; \
678 goto out; \
226ec5fd
DM
679 } \
680 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
681} while (0)
682
683 FIND_VPD_KW(i, "RV");
684 for (csum = 0; i >= 0; i--)
685 csum += vpd[i];
56d36be4
DM
686
687 if (csum) {
688 dev_err(adapter->pdev_dev,
689 "corrupted VPD EEPROM, actual csum %u\n", csum);
8c357ebd
VP
690 ret = -EINVAL;
691 goto out;
56d36be4
DM
692 }
693
226ec5fd
DM
694 FIND_VPD_KW(ec, "EC");
695 FIND_VPD_KW(sn, "SN");
a94cd705 696 FIND_VPD_KW(pn, "PN");
226ec5fd
DM
697#undef FIND_VPD_KW
698
23d88e1d 699 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
56d36be4 700 strim(p->id);
226ec5fd 701 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 702 strim(p->ec);
226ec5fd
DM
703 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
704 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4 705 strim(p->sn);
63a92fe6 706 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
a94cd705
KS
707 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
708 strim(p->pn);
636f9d37
VP
709
710 /*
711 * Ask firmware for the Core Clock since it knows how to translate the
712 * Reference Clock ('V2') VPD field into a Core Clock value ...
713 */
5167865a
HS
714 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
715 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
636f9d37
VP
716 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
717 1, &cclk_param, &cclk_val);
8c357ebd
VP
718
719out:
720 vfree(vpd);
636f9d37
VP
721 if (ret)
722 return ret;
723 p->cclk = cclk_val;
724
56d36be4
DM
725 return 0;
726}
727
728/* serial flash and firmware constants */
729enum {
730 SF_ATTEMPTS = 10, /* max retries for SF operations */
731
732 /* flash command opcodes */
733 SF_PROG_PAGE = 2, /* program page */
734 SF_WR_DISABLE = 4, /* disable writes */
735 SF_RD_STATUS = 5, /* read status register */
736 SF_WR_ENABLE = 6, /* enable writes */
737 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 738 SF_RD_ID = 0x9f, /* read ID */
56d36be4
DM
739 SF_ERASE_SECTOR = 0xd8, /* erase sector */
740
6f1d7210 741 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
56d36be4
DM
742};
743
744/**
745 * sf1_read - read data from the serial flash
746 * @adapter: the adapter
747 * @byte_cnt: number of bytes to read
748 * @cont: whether another operation will be chained
749 * @lock: whether to lock SF for PL access only
750 * @valp: where to store the read data
751 *
752 * Reads up to 4 bytes of data from the serial flash. The location of
753 * the read needs to be specified prior to calling this by issuing the
754 * appropriate commands to the serial flash.
755 */
756static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
757 int lock, u32 *valp)
758{
759 int ret;
760
761 if (!byte_cnt || byte_cnt > 4)
762 return -EINVAL;
ce91a923 763 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
56d36be4
DM
764 return -EBUSY;
765 cont = cont ? SF_CONT : 0;
766 lock = lock ? SF_LOCK : 0;
767 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
ce91a923 768 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
56d36be4
DM
769 if (!ret)
770 *valp = t4_read_reg(adapter, SF_DATA);
771 return ret;
772}
773
774/**
775 * sf1_write - write data to the serial flash
776 * @adapter: the adapter
777 * @byte_cnt: number of bytes to write
778 * @cont: whether another operation will be chained
779 * @lock: whether to lock SF for PL access only
780 * @val: value to write
781 *
782 * Writes up to 4 bytes of data to the serial flash. The location of
783 * the write needs to be specified prior to calling this by issuing the
784 * appropriate commands to the serial flash.
785 */
786static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
787 int lock, u32 val)
788{
789 if (!byte_cnt || byte_cnt > 4)
790 return -EINVAL;
ce91a923 791 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
56d36be4
DM
792 return -EBUSY;
793 cont = cont ? SF_CONT : 0;
794 lock = lock ? SF_LOCK : 0;
795 t4_write_reg(adapter, SF_DATA, val);
796 t4_write_reg(adapter, SF_OP, lock |
797 cont | BYTECNT(byte_cnt - 1) | OP_WR);
ce91a923 798 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
56d36be4
DM
799}
800
801/**
802 * flash_wait_op - wait for a flash operation to complete
803 * @adapter: the adapter
804 * @attempts: max number of polls of the status register
805 * @delay: delay between polls in ms
806 *
807 * Wait for a flash operation to complete by polling the status register.
808 */
809static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
810{
811 int ret;
812 u32 status;
813
814 while (1) {
815 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
816 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
817 return ret;
818 if (!(status & 1))
819 return 0;
820 if (--attempts == 0)
821 return -EAGAIN;
822 if (delay)
823 msleep(delay);
824 }
825}
826
827/**
828 * t4_read_flash - read words from serial flash
829 * @adapter: the adapter
830 * @addr: the start address for the read
831 * @nwords: how many 32-bit words to read
832 * @data: where to store the read data
833 * @byte_oriented: whether to store data as bytes or as words
834 *
835 * Read the specified number of 32-bit words from the serial flash.
836 * If @byte_oriented is set the read data is stored as a byte array
837 * (i.e., big-endian), otherwise as 32-bit words in the platform's
838 * natural endianess.
839 */
de498c89
RD
840static int t4_read_flash(struct adapter *adapter, unsigned int addr,
841 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
842{
843 int ret;
844
900a6596 845 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
846 return -EINVAL;
847
848 addr = swab32(addr) | SF_RD_DATA_FAST;
849
850 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
851 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
852 return ret;
853
854 for ( ; nwords; nwords--, data++) {
855 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
856 if (nwords == 1)
857 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
858 if (ret)
859 return ret;
860 if (byte_oriented)
404d9e3f 861 *data = (__force __u32) (htonl(*data));
56d36be4
DM
862 }
863 return 0;
864}
865
866/**
867 * t4_write_flash - write up to a page of data to the serial flash
868 * @adapter: the adapter
869 * @addr: the start address to write
870 * @n: length of data to write in bytes
871 * @data: the data to write
872 *
873 * Writes up to a page of data (256 bytes) to the serial flash starting
874 * at the given address. All the data must be written to the same page.
875 */
876static int t4_write_flash(struct adapter *adapter, unsigned int addr,
877 unsigned int n, const u8 *data)
878{
879 int ret;
880 u32 buf[64];
881 unsigned int i, c, left, val, offset = addr & 0xff;
882
900a6596 883 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
884 return -EINVAL;
885
886 val = swab32(addr) | SF_PROG_PAGE;
887
888 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
889 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
890 goto unlock;
891
892 for (left = n; left; left -= c) {
893 c = min(left, 4U);
894 for (val = 0, i = 0; i < c; ++i)
895 val = (val << 8) + *data++;
896
897 ret = sf1_write(adapter, c, c != left, 1, val);
898 if (ret)
899 goto unlock;
900 }
900a6596 901 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
902 if (ret)
903 goto unlock;
904
905 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
906
907 /* Read the page to verify the write succeeded */
908 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
909 if (ret)
910 return ret;
911
912 if (memcmp(data - n, (u8 *)buf + offset, n)) {
913 dev_err(adapter->pdev_dev,
914 "failed to correctly write the flash page at %#x\n",
915 addr);
916 return -EIO;
917 }
918 return 0;
919
920unlock:
921 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
922 return ret;
923}
924
925/**
16e47624 926 * t4_get_fw_version - read the firmware version
56d36be4
DM
927 * @adapter: the adapter
928 * @vers: where to place the version
929 *
930 * Reads the FW version from flash.
931 */
16e47624 932int t4_get_fw_version(struct adapter *adapter, u32 *vers)
56d36be4 933{
16e47624
HS
934 return t4_read_flash(adapter, FLASH_FW_START +
935 offsetof(struct fw_hdr, fw_ver), 1,
936 vers, 0);
56d36be4
DM
937}
938
939/**
16e47624 940 * t4_get_tp_version - read the TP microcode version
56d36be4
DM
941 * @adapter: the adapter
942 * @vers: where to place the version
943 *
944 * Reads the TP microcode version from flash.
945 */
16e47624 946int t4_get_tp_version(struct adapter *adapter, u32 *vers)
56d36be4 947{
16e47624 948 return t4_read_flash(adapter, FLASH_FW_START +
900a6596 949 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
950 1, vers, 0);
951}
952
16e47624
HS
953/* Is the given firmware API compatible with the one the driver was compiled
954 * with?
56d36be4 955 */
16e47624 956static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
56d36be4 957{
56d36be4 958
16e47624
HS
959 /* short circuit if it's the exact same firmware version */
960 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
961 return 1;
56d36be4 962
16e47624
HS
963#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
964 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
965 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
966 return 1;
967#undef SAME_INTF
0a57a536 968
16e47624
HS
969 return 0;
970}
56d36be4 971
16e47624
HS
972/* The firmware in the filesystem is usable, but should it be installed?
973 * This routine explains itself in detail if it indicates the filesystem
974 * firmware should be installed.
975 */
976static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
977 int k, int c)
978{
979 const char *reason;
980
981 if (!card_fw_usable) {
982 reason = "incompatible or unusable";
983 goto install;
e69972f5
JH
984 }
985
16e47624
HS
986 if (k > c) {
987 reason = "older than the version supported with this driver";
988 goto install;
56d36be4
DM
989 }
990
16e47624
HS
991 return 0;
992
993install:
994 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
995 "installing firmware %u.%u.%u.%u on card.\n",
b2e1a3f0
HS
996 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
997 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
998 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
999 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
56d36be4 1000
56d36be4
DM
1001 return 1;
1002}
1003
16e47624
HS
1004int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1005 const u8 *fw_data, unsigned int fw_size,
1006 struct fw_hdr *card_fw, enum dev_state state,
1007 int *reset)
1008{
1009 int ret, card_fw_usable, fs_fw_usable;
1010 const struct fw_hdr *fs_fw;
1011 const struct fw_hdr *drv_fw;
1012
1013 drv_fw = &fw_info->fw_hdr;
1014
1015 /* Read the header of the firmware on the card */
1016 ret = -t4_read_flash(adap, FLASH_FW_START,
1017 sizeof(*card_fw) / sizeof(uint32_t),
1018 (uint32_t *)card_fw, 1);
1019 if (ret == 0) {
1020 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
1021 } else {
1022 dev_err(adap->pdev_dev,
1023 "Unable to read card's firmware header: %d\n", ret);
1024 card_fw_usable = 0;
1025 }
1026
1027 if (fw_data != NULL) {
1028 fs_fw = (const void *)fw_data;
1029 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1030 } else {
1031 fs_fw = NULL;
1032 fs_fw_usable = 0;
1033 }
1034
1035 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1036 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1037 /* Common case: the firmware on the card is an exact match and
1038 * the filesystem one is an exact match too, or the filesystem
1039 * one is absent/incompatible.
1040 */
1041 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1042 should_install_fs_fw(adap, card_fw_usable,
1043 be32_to_cpu(fs_fw->fw_ver),
1044 be32_to_cpu(card_fw->fw_ver))) {
1045 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1046 fw_size, 0);
1047 if (ret != 0) {
1048 dev_err(adap->pdev_dev,
1049 "failed to install firmware: %d\n", ret);
1050 goto bye;
1051 }
1052
1053 /* Installed successfully, update the cached header too. */
1054 memcpy(card_fw, fs_fw, sizeof(*card_fw));
1055 card_fw_usable = 1;
1056 *reset = 0; /* already reset as part of load_fw */
1057 }
1058
1059 if (!card_fw_usable) {
1060 uint32_t d, c, k;
1061
1062 d = be32_to_cpu(drv_fw->fw_ver);
1063 c = be32_to_cpu(card_fw->fw_ver);
1064 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1065
1066 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1067 "chip state %d, "
1068 "driver compiled with %d.%d.%d.%d, "
1069 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1070 state,
b2e1a3f0
HS
1071 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
1072 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
1073 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
1074 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
1075 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
1076 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
16e47624
HS
1077 ret = EINVAL;
1078 goto bye;
1079 }
1080
1081 /* We're using whatever's on the card and it's known to be good. */
1082 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1083 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1084
1085bye:
1086 return ret;
1087}
1088
56d36be4
DM
1089/**
1090 * t4_flash_erase_sectors - erase a range of flash sectors
1091 * @adapter: the adapter
1092 * @start: the first sector to erase
1093 * @end: the last sector to erase
1094 *
1095 * Erases the sectors in the given inclusive range.
1096 */
1097static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1098{
1099 int ret = 0;
1100
c0d5b8cf
HS
1101 if (end >= adapter->params.sf_nsec)
1102 return -EINVAL;
1103
56d36be4
DM
1104 while (start <= end) {
1105 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1106 (ret = sf1_write(adapter, 4, 0, 1,
1107 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 1108 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
1109 dev_err(adapter->pdev_dev,
1110 "erase of flash sector %d failed, error %d\n",
1111 start, ret);
1112 break;
1113 }
1114 start++;
1115 }
1116 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
1117 return ret;
1118}
1119
636f9d37
VP
1120/**
1121 * t4_flash_cfg_addr - return the address of the flash configuration file
1122 * @adapter: the adapter
1123 *
1124 * Return the address within the flash where the Firmware Configuration
1125 * File is stored.
1126 */
1127unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1128{
1129 if (adapter->params.sf_size == 0x100000)
1130 return FLASH_FPGA_CFG_START;
1131 else
1132 return FLASH_CFG_START;
1133}
1134
79af221d
HS
1135/* Return TRUE if the specified firmware matches the adapter. I.e. T4
1136 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
1137 * and emit an error message for mismatched firmware to save our caller the
1138 * effort ...
1139 */
1140static bool t4_fw_matches_chip(const struct adapter *adap,
1141 const struct fw_hdr *hdr)
1142{
1143 /* The expression below will return FALSE for any unsupported adapter
1144 * which will keep us "honest" in the future ...
1145 */
1146 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
1147 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
1148 return true;
1149
1150 dev_err(adap->pdev_dev,
1151 "FW image (%d) is not suitable for this adapter (%d)\n",
1152 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
1153 return false;
1154}
1155
56d36be4
DM
1156/**
1157 * t4_load_fw - download firmware
1158 * @adap: the adapter
1159 * @fw_data: the firmware image to write
1160 * @size: image size
1161 *
1162 * Write the supplied firmware image to the card's serial flash.
1163 */
1164int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1165{
1166 u32 csum;
1167 int ret, addr;
1168 unsigned int i;
1169 u8 first_page[SF_PAGE_SIZE];
404d9e3f 1170 const __be32 *p = (const __be32 *)fw_data;
56d36be4 1171 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596
DM
1172 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1173 unsigned int fw_img_start = adap->params.sf_fw_start;
1174 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
56d36be4
DM
1175
1176 if (!size) {
1177 dev_err(adap->pdev_dev, "FW image has no data\n");
1178 return -EINVAL;
1179 }
1180 if (size & 511) {
1181 dev_err(adap->pdev_dev,
1182 "FW image size not multiple of 512 bytes\n");
1183 return -EINVAL;
1184 }
1185 if (ntohs(hdr->len512) * 512 != size) {
1186 dev_err(adap->pdev_dev,
1187 "FW image size differs from size in FW header\n");
1188 return -EINVAL;
1189 }
1190 if (size > FW_MAX_SIZE) {
1191 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1192 FW_MAX_SIZE);
1193 return -EFBIG;
1194 }
79af221d
HS
1195 if (!t4_fw_matches_chip(adap, hdr))
1196 return -EINVAL;
56d36be4
DM
1197
1198 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1199 csum += ntohl(p[i]);
1200
1201 if (csum != 0xffffffff) {
1202 dev_err(adap->pdev_dev,
1203 "corrupted firmware image, checksum %#x\n", csum);
1204 return -EINVAL;
1205 }
1206
900a6596
DM
1207 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1208 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
1209 if (ret)
1210 goto out;
1211
1212 /*
1213 * We write the correct version at the end so the driver can see a bad
1214 * version if the FW write fails. Start by writing a copy of the
1215 * first page with a bad version.
1216 */
1217 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1218 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
900a6596 1219 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
1220 if (ret)
1221 goto out;
1222
900a6596 1223 addr = fw_img_start;
56d36be4
DM
1224 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1225 addr += SF_PAGE_SIZE;
1226 fw_data += SF_PAGE_SIZE;
1227 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1228 if (ret)
1229 goto out;
1230 }
1231
1232 ret = t4_write_flash(adap,
900a6596 1233 fw_img_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
1234 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1235out:
1236 if (ret)
1237 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1238 ret);
dff04bce
HS
1239 else
1240 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
56d36be4
DM
1241 return ret;
1242}
1243
1244#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
72aca4bf
KS
1245 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1246 FW_PORT_CAP_ANEG)
56d36be4
DM
1247
1248/**
1249 * t4_link_start - apply link configuration to MAC/PHY
1250 * @phy: the PHY to setup
1251 * @mac: the MAC to setup
1252 * @lc: the requested link configuration
1253 *
1254 * Set up a port's MAC and PHY according to a desired link configuration.
1255 * - If the PHY can auto-negotiate first decide what to advertise, then
1256 * enable/disable auto-negotiation as desired, and reset.
1257 * - If the PHY does not auto-negotiate just reset it.
1258 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1259 * otherwise do it later based on the outcome of auto-negotiation.
1260 */
1261int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1262 struct link_config *lc)
1263{
1264 struct fw_port_cmd c;
2b5fb1f2 1265 unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
56d36be4
DM
1266
1267 lc->link_ok = 0;
1268 if (lc->requested_fc & PAUSE_RX)
1269 fc |= FW_PORT_CAP_FC_RX;
1270 if (lc->requested_fc & PAUSE_TX)
1271 fc |= FW_PORT_CAP_FC_TX;
1272
1273 memset(&c, 0, sizeof(c));
e2ac9628 1274 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
1275 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
1276 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
56d36be4
DM
1277 FW_LEN16(c));
1278
1279 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1280 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1281 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1282 } else if (lc->autoneg == AUTONEG_DISABLE) {
1283 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1284 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1285 } else
1286 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1287
1288 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1289}
1290
1291/**
1292 * t4_restart_aneg - restart autonegotiation
1293 * @adap: the adapter
1294 * @mbox: mbox to use for the FW command
1295 * @port: the port id
1296 *
1297 * Restarts autonegotiation for the selected port.
1298 */
1299int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1300{
1301 struct fw_port_cmd c;
1302
1303 memset(&c, 0, sizeof(c));
e2ac9628 1304 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
1305 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
1306 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
56d36be4
DM
1307 FW_LEN16(c));
1308 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1309 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1310}
1311
8caa1e84
VP
1312typedef void (*int_handler_t)(struct adapter *adap);
1313
56d36be4
DM
1314struct intr_info {
1315 unsigned int mask; /* bits to check in interrupt status */
1316 const char *msg; /* message to print or NULL */
1317 short stat_idx; /* stat counter to increment or -1 */
1318 unsigned short fatal; /* whether the condition reported is fatal */
8caa1e84 1319 int_handler_t int_handler; /* platform-specific int handler */
56d36be4
DM
1320};
1321
1322/**
1323 * t4_handle_intr_status - table driven interrupt handler
1324 * @adapter: the adapter that generated the interrupt
1325 * @reg: the interrupt status register to process
1326 * @acts: table of interrupt actions
1327 *
1328 * A table driven interrupt handler that applies a set of masks to an
1329 * interrupt status word and performs the corresponding actions if the
25985edc 1330 * interrupts described by the mask have occurred. The actions include
56d36be4
DM
1331 * optionally emitting a warning or alert message. The table is terminated
1332 * by an entry specifying mask 0. Returns the number of fatal interrupt
1333 * conditions.
1334 */
1335static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1336 const struct intr_info *acts)
1337{
1338 int fatal = 0;
1339 unsigned int mask = 0;
1340 unsigned int status = t4_read_reg(adapter, reg);
1341
1342 for ( ; acts->mask; ++acts) {
1343 if (!(status & acts->mask))
1344 continue;
1345 if (acts->fatal) {
1346 fatal++;
1347 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1348 status & acts->mask);
1349 } else if (acts->msg && printk_ratelimit())
1350 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1351 status & acts->mask);
8caa1e84
VP
1352 if (acts->int_handler)
1353 acts->int_handler(adapter);
56d36be4
DM
1354 mask |= acts->mask;
1355 }
1356 status &= mask;
1357 if (status) /* clear processed interrupts */
1358 t4_write_reg(adapter, reg, status);
1359 return fatal;
1360}
1361
1362/*
1363 * Interrupt handler for the PCIE module.
1364 */
1365static void pcie_intr_handler(struct adapter *adapter)
1366{
005b5717 1367 static const struct intr_info sysbus_intr_info[] = {
56d36be4
DM
1368 { RNPP, "RXNP array parity error", -1, 1 },
1369 { RPCP, "RXPC array parity error", -1, 1 },
1370 { RCIP, "RXCIF array parity error", -1, 1 },
1371 { RCCP, "Rx completions control array parity error", -1, 1 },
1372 { RFTP, "RXFT array parity error", -1, 1 },
1373 { 0 }
1374 };
005b5717 1375 static const struct intr_info pcie_port_intr_info[] = {
56d36be4
DM
1376 { TPCP, "TXPC array parity error", -1, 1 },
1377 { TNPP, "TXNP array parity error", -1, 1 },
1378 { TFTP, "TXFT array parity error", -1, 1 },
1379 { TCAP, "TXCA array parity error", -1, 1 },
1380 { TCIP, "TXCIF array parity error", -1, 1 },
1381 { RCAP, "RXCA array parity error", -1, 1 },
1382 { OTDD, "outbound request TLP discarded", -1, 1 },
1383 { RDPE, "Rx data parity error", -1, 1 },
1384 { TDUE, "Tx uncorrectable data error", -1, 1 },
1385 { 0 }
1386 };
005b5717 1387 static const struct intr_info pcie_intr_info[] = {
56d36be4
DM
1388 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1389 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1390 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1391 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1392 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1393 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1394 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1395 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1396 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1397 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1398 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1399 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1400 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1401 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1402 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1403 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1404 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1405 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1406 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1407 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1408 { FIDPERR, "PCI FID parity error", -1, 1 },
1409 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1410 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1411 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1412 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1413 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1414 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1415 { PCIESINT, "PCI core secondary fault", -1, 1 },
1416 { PCIEPINT, "PCI core primary fault", -1, 1 },
1417 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1418 { 0 }
1419 };
1420
0a57a536
SR
1421 static struct intr_info t5_pcie_intr_info[] = {
1422 { MSTGRPPERR, "Master Response Read Queue parity error",
1423 -1, 1 },
1424 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1425 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1426 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1427 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1428 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1429 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1430 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1431 -1, 1 },
1432 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1433 -1, 1 },
1434 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1435 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1436 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1437 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1438 { DREQWRPERR, "PCI DMA channel write request parity error",
1439 -1, 1 },
1440 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1441 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1442 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1443 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1444 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1445 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1446 { FIDPERR, "PCI FID parity error", -1, 1 },
1447 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1448 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1449 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1450 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1451 -1, 1 },
1452 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1453 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1454 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1455 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1456 { READRSPERR, "Outbound read error", -1, 0 },
1457 { 0 }
1458 };
1459
56d36be4
DM
1460 int fat;
1461
9bb59b96
HS
1462 if (is_t4(adapter->params.chip))
1463 fat = t4_handle_intr_status(adapter,
1464 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1465 sysbus_intr_info) +
1466 t4_handle_intr_status(adapter,
1467 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1468 pcie_port_intr_info) +
1469 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1470 pcie_intr_info);
1471 else
1472 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1473 t5_pcie_intr_info);
0a57a536 1474
56d36be4
DM
1475 if (fat)
1476 t4_fatal_err(adapter);
1477}
1478
1479/*
1480 * TP interrupt handler.
1481 */
1482static void tp_intr_handler(struct adapter *adapter)
1483{
005b5717 1484 static const struct intr_info tp_intr_info[] = {
56d36be4
DM
1485 { 0x3fffffff, "TP parity error", -1, 1 },
1486 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1487 { 0 }
1488 };
1489
1490 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1491 t4_fatal_err(adapter);
1492}
1493
1494/*
1495 * SGE interrupt handler.
1496 */
1497static void sge_intr_handler(struct adapter *adapter)
1498{
1499 u64 v;
1500
005b5717 1501 static const struct intr_info sge_intr_info[] = {
56d36be4
DM
1502 { ERR_CPL_EXCEED_IQE_SIZE,
1503 "SGE received CPL exceeding IQE size", -1, 1 },
1504 { ERR_INVALID_CIDX_INC,
1505 "SGE GTS CIDX increment too large", -1, 0 },
1506 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
840f3000
VP
1507 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1508 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1509 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
56d36be4
DM
1510 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1511 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1512 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1513 0 },
1514 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1515 0 },
1516 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1517 0 },
1518 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1519 0 },
1520 { ERR_ING_CTXT_PRIO,
1521 "SGE too many priority ingress contexts", -1, 0 },
1522 { ERR_EGR_CTXT_PRIO,
1523 "SGE too many priority egress contexts", -1, 0 },
1524 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1525 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1526 { 0 }
1527 };
1528
1529 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
8caa1e84 1530 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
56d36be4
DM
1531 if (v) {
1532 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
8caa1e84 1533 (unsigned long long)v);
56d36be4
DM
1534 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1535 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1536 }
1537
1538 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1539 v != 0)
1540 t4_fatal_err(adapter);
1541}
1542
1543/*
1544 * CIM interrupt handler.
1545 */
1546static void cim_intr_handler(struct adapter *adapter)
1547{
005b5717 1548 static const struct intr_info cim_intr_info[] = {
56d36be4
DM
1549 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1550 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1551 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1552 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1553 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1554 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1555 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1556 { 0 }
1557 };
005b5717 1558 static const struct intr_info cim_upintr_info[] = {
56d36be4
DM
1559 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1560 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1561 { ILLWRINT, "CIM illegal write", -1, 1 },
1562 { ILLRDINT, "CIM illegal read", -1, 1 },
1563 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1564 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1565 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1566 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1567 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1568 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1569 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1570 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1571 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1572 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1573 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1574 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1575 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1576 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1577 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1578 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1579 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1580 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1581 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1582 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1583 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1584 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1585 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1586 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1587 { 0 }
1588 };
1589
1590 int fat;
1591
b2e1a3f0 1592 if (t4_read_reg(adapter, MA_PCIE_FW) & PCIE_FW_ERR)
31d55c2d
HS
1593 t4_report_fw_error(adapter);
1594
56d36be4
DM
1595 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1596 cim_intr_info) +
1597 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1598 cim_upintr_info);
1599 if (fat)
1600 t4_fatal_err(adapter);
1601}
1602
1603/*
1604 * ULP RX interrupt handler.
1605 */
1606static void ulprx_intr_handler(struct adapter *adapter)
1607{
005b5717 1608 static const struct intr_info ulprx_intr_info[] = {
91e9a1ec 1609 { 0x1800000, "ULPRX context error", -1, 1 },
56d36be4
DM
1610 { 0x7fffff, "ULPRX parity error", -1, 1 },
1611 { 0 }
1612 };
1613
1614 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1615 t4_fatal_err(adapter);
1616}
1617
1618/*
1619 * ULP TX interrupt handler.
1620 */
1621static void ulptx_intr_handler(struct adapter *adapter)
1622{
005b5717 1623 static const struct intr_info ulptx_intr_info[] = {
56d36be4
DM
1624 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1625 0 },
1626 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1627 0 },
1628 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1629 0 },
1630 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1631 0 },
1632 { 0xfffffff, "ULPTX parity error", -1, 1 },
1633 { 0 }
1634 };
1635
1636 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1637 t4_fatal_err(adapter);
1638}
1639
1640/*
1641 * PM TX interrupt handler.
1642 */
1643static void pmtx_intr_handler(struct adapter *adapter)
1644{
005b5717 1645 static const struct intr_info pmtx_intr_info[] = {
56d36be4
DM
1646 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1647 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1648 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1649 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1650 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1651 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1652 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1653 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1654 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1655 { 0 }
1656 };
1657
1658 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1659 t4_fatal_err(adapter);
1660}
1661
1662/*
1663 * PM RX interrupt handler.
1664 */
1665static void pmrx_intr_handler(struct adapter *adapter)
1666{
005b5717 1667 static const struct intr_info pmrx_intr_info[] = {
56d36be4
DM
1668 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1669 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1670 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1671 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1672 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1673 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1674 { 0 }
1675 };
1676
1677 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1678 t4_fatal_err(adapter);
1679}
1680
1681/*
1682 * CPL switch interrupt handler.
1683 */
1684static void cplsw_intr_handler(struct adapter *adapter)
1685{
005b5717 1686 static const struct intr_info cplsw_intr_info[] = {
56d36be4
DM
1687 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1688 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1689 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1690 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1691 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1692 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1693 { 0 }
1694 };
1695
1696 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1697 t4_fatal_err(adapter);
1698}
1699
1700/*
1701 * LE interrupt handler.
1702 */
1703static void le_intr_handler(struct adapter *adap)
1704{
005b5717 1705 static const struct intr_info le_intr_info[] = {
56d36be4
DM
1706 { LIPMISS, "LE LIP miss", -1, 0 },
1707 { LIP0, "LE 0 LIP error", -1, 0 },
1708 { PARITYERR, "LE parity error", -1, 1 },
1709 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1710 { REQQPARERR, "LE request queue parity error", -1, 1 },
1711 { 0 }
1712 };
1713
1714 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1715 t4_fatal_err(adap);
1716}
1717
1718/*
1719 * MPS interrupt handler.
1720 */
1721static void mps_intr_handler(struct adapter *adapter)
1722{
005b5717 1723 static const struct intr_info mps_rx_intr_info[] = {
56d36be4
DM
1724 { 0xffffff, "MPS Rx parity error", -1, 1 },
1725 { 0 }
1726 };
005b5717 1727 static const struct intr_info mps_tx_intr_info[] = {
56d36be4
DM
1728 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1729 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1730 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1731 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1732 { BUBBLE, "MPS Tx underflow", -1, 1 },
1733 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1734 { FRMERR, "MPS Tx framing error", -1, 1 },
1735 { 0 }
1736 };
005b5717 1737 static const struct intr_info mps_trc_intr_info[] = {
56d36be4
DM
1738 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1739 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1740 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1741 { 0 }
1742 };
005b5717 1743 static const struct intr_info mps_stat_sram_intr_info[] = {
56d36be4
DM
1744 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1745 { 0 }
1746 };
005b5717 1747 static const struct intr_info mps_stat_tx_intr_info[] = {
56d36be4
DM
1748 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1749 { 0 }
1750 };
005b5717 1751 static const struct intr_info mps_stat_rx_intr_info[] = {
56d36be4
DM
1752 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1753 { 0 }
1754 };
005b5717 1755 static const struct intr_info mps_cls_intr_info[] = {
56d36be4
DM
1756 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1757 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1758 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1759 { 0 }
1760 };
1761
1762 int fat;
1763
1764 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1765 mps_rx_intr_info) +
1766 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1767 mps_tx_intr_info) +
1768 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1769 mps_trc_intr_info) +
1770 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1771 mps_stat_sram_intr_info) +
1772 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1773 mps_stat_tx_intr_info) +
1774 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1775 mps_stat_rx_intr_info) +
1776 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1777 mps_cls_intr_info);
1778
1779 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1780 RXINT | TXINT | STATINT);
1781 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1782 if (fat)
1783 t4_fatal_err(adapter);
1784}
1785
1786#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1787
1788/*
1789 * EDC/MC interrupt handler.
1790 */
1791static void mem_intr_handler(struct adapter *adapter, int idx)
1792{
822dd8a8 1793 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
56d36be4
DM
1794
1795 unsigned int addr, cnt_addr, v;
1796
1797 if (idx <= MEM_EDC1) {
1798 addr = EDC_REG(EDC_INT_CAUSE, idx);
1799 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
822dd8a8
HS
1800 } else if (idx == MEM_MC) {
1801 if (is_t4(adapter->params.chip)) {
1802 addr = MC_INT_CAUSE;
1803 cnt_addr = MC_ECC_STATUS;
1804 } else {
1805 addr = MC_P_INT_CAUSE;
1806 cnt_addr = MC_P_ECC_STATUS;
1807 }
56d36be4 1808 } else {
822dd8a8
HS
1809 addr = MC_REG(MC_P_INT_CAUSE, 1);
1810 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
56d36be4
DM
1811 }
1812
1813 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1814 if (v & PERR_INT_CAUSE)
1815 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1816 name[idx]);
1817 if (v & ECC_CE_INT_CAUSE) {
1818 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1819
1820 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1821 if (printk_ratelimit())
1822 dev_warn(adapter->pdev_dev,
1823 "%u %s correctable ECC data error%s\n",
1824 cnt, name[idx], cnt > 1 ? "s" : "");
1825 }
1826 if (v & ECC_UE_INT_CAUSE)
1827 dev_alert(adapter->pdev_dev,
1828 "%s uncorrectable ECC data error\n", name[idx]);
1829
1830 t4_write_reg(adapter, addr, v);
1831 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1832 t4_fatal_err(adapter);
1833}
1834
1835/*
1836 * MA interrupt handler.
1837 */
1838static void ma_intr_handler(struct adapter *adap)
1839{
1840 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1841
9bb59b96 1842 if (status & MEM_PERR_INT_CAUSE) {
56d36be4
DM
1843 dev_alert(adap->pdev_dev,
1844 "MA parity error, parity status %#x\n",
1845 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
9bb59b96
HS
1846 if (is_t5(adap->params.chip))
1847 dev_alert(adap->pdev_dev,
1848 "MA parity error, parity status %#x\n",
1849 t4_read_reg(adap,
1850 MA_PARITY_ERROR_STATUS2));
1851 }
56d36be4
DM
1852 if (status & MEM_WRAP_INT_CAUSE) {
1853 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1854 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1855 "client %u to address %#x\n",
1856 MEM_WRAP_CLIENT_NUM_GET(v),
1857 MEM_WRAP_ADDRESS_GET(v) << 4);
1858 }
1859 t4_write_reg(adap, MA_INT_CAUSE, status);
1860 t4_fatal_err(adap);
1861}
1862
1863/*
1864 * SMB interrupt handler.
1865 */
1866static void smb_intr_handler(struct adapter *adap)
1867{
005b5717 1868 static const struct intr_info smb_intr_info[] = {
56d36be4
DM
1869 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1870 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1871 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1872 { 0 }
1873 };
1874
1875 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1876 t4_fatal_err(adap);
1877}
1878
1879/*
1880 * NC-SI interrupt handler.
1881 */
1882static void ncsi_intr_handler(struct adapter *adap)
1883{
005b5717 1884 static const struct intr_info ncsi_intr_info[] = {
56d36be4
DM
1885 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1886 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1887 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1888 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1889 { 0 }
1890 };
1891
1892 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1893 t4_fatal_err(adap);
1894}
1895
1896/*
1897 * XGMAC interrupt handler.
1898 */
1899static void xgmac_intr_handler(struct adapter *adap, int port)
1900{
0a57a536
SR
1901 u32 v, int_cause_reg;
1902
d14807dd 1903 if (is_t4(adap->params.chip))
0a57a536
SR
1904 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1905 else
1906 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1907
1908 v = t4_read_reg(adap, int_cause_reg);
56d36be4
DM
1909
1910 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1911 if (!v)
1912 return;
1913
1914 if (v & TXFIFO_PRTY_ERR)
1915 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1916 port);
1917 if (v & RXFIFO_PRTY_ERR)
1918 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1919 port);
1920 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1921 t4_fatal_err(adap);
1922}
1923
1924/*
1925 * PL interrupt handler.
1926 */
1927static void pl_intr_handler(struct adapter *adap)
1928{
005b5717 1929 static const struct intr_info pl_intr_info[] = {
56d36be4
DM
1930 { FATALPERR, "T4 fatal parity error", -1, 1 },
1931 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1932 { 0 }
1933 };
1934
1935 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1936 t4_fatal_err(adap);
1937}
1938
63bcceec 1939#define PF_INTR_MASK (PFSW)
56d36be4
DM
1940#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1941 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1942 CPL_SWITCH | SGE | ULP_TX)
1943
1944/**
1945 * t4_slow_intr_handler - control path interrupt handler
1946 * @adapter: the adapter
1947 *
1948 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1949 * The designation 'slow' is because it involves register reads, while
1950 * data interrupts typically don't involve any MMIOs.
1951 */
1952int t4_slow_intr_handler(struct adapter *adapter)
1953{
1954 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1955
1956 if (!(cause & GLBL_INTR_MASK))
1957 return 0;
1958 if (cause & CIM)
1959 cim_intr_handler(adapter);
1960 if (cause & MPS)
1961 mps_intr_handler(adapter);
1962 if (cause & NCSI)
1963 ncsi_intr_handler(adapter);
1964 if (cause & PL)
1965 pl_intr_handler(adapter);
1966 if (cause & SMB)
1967 smb_intr_handler(adapter);
1968 if (cause & XGMAC0)
1969 xgmac_intr_handler(adapter, 0);
1970 if (cause & XGMAC1)
1971 xgmac_intr_handler(adapter, 1);
1972 if (cause & XGMAC_KR0)
1973 xgmac_intr_handler(adapter, 2);
1974 if (cause & XGMAC_KR1)
1975 xgmac_intr_handler(adapter, 3);
1976 if (cause & PCIE)
1977 pcie_intr_handler(adapter);
1978 if (cause & MC)
1979 mem_intr_handler(adapter, MEM_MC);
822dd8a8
HS
1980 if (!is_t4(adapter->params.chip) && (cause & MC1))
1981 mem_intr_handler(adapter, MEM_MC1);
56d36be4
DM
1982 if (cause & EDC0)
1983 mem_intr_handler(adapter, MEM_EDC0);
1984 if (cause & EDC1)
1985 mem_intr_handler(adapter, MEM_EDC1);
1986 if (cause & LE)
1987 le_intr_handler(adapter);
1988 if (cause & TP)
1989 tp_intr_handler(adapter);
1990 if (cause & MA)
1991 ma_intr_handler(adapter);
1992 if (cause & PM_TX)
1993 pmtx_intr_handler(adapter);
1994 if (cause & PM_RX)
1995 pmrx_intr_handler(adapter);
1996 if (cause & ULP_RX)
1997 ulprx_intr_handler(adapter);
1998 if (cause & CPL_SWITCH)
1999 cplsw_intr_handler(adapter);
2000 if (cause & SGE)
2001 sge_intr_handler(adapter);
2002 if (cause & ULP_TX)
2003 ulptx_intr_handler(adapter);
2004
2005 /* Clear the interrupts just processed for which we are the master. */
2006 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
2007 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
2008 return 1;
2009}
2010
2011/**
2012 * t4_intr_enable - enable interrupts
2013 * @adapter: the adapter whose interrupts should be enabled
2014 *
2015 * Enable PF-specific interrupts for the calling function and the top-level
2016 * interrupt concentrator for global interrupts. Interrupts are already
2017 * enabled at each module, here we just enable the roots of the interrupt
2018 * hierarchies.
2019 *
2020 * Note: this function should be called only when the driver manages
2021 * non PF-specific interrupts from the various HW modules. Only one PCI
2022 * function at a time should be doing this.
2023 */
2024void t4_intr_enable(struct adapter *adapter)
2025{
2026 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2027
2028 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
2029 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
2030 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
2031 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
2032 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
2033 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
2034 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
840f3000 2035 DBFIFO_HP_INT | DBFIFO_LP_INT |
56d36be4
DM
2036 EGRESS_SIZE_ERR);
2037 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
2038 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
2039}
2040
2041/**
2042 * t4_intr_disable - disable interrupts
2043 * @adapter: the adapter whose interrupts should be disabled
2044 *
2045 * Disable interrupts. We only disable the top-level interrupt
2046 * concentrators. The caller must be a PCI function managing global
2047 * interrupts.
2048 */
2049void t4_intr_disable(struct adapter *adapter)
2050{
2051 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2052
2053 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
2054 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
2055}
2056
56d36be4
DM
2057/**
2058 * hash_mac_addr - return the hash value of a MAC address
2059 * @addr: the 48-bit Ethernet MAC address
2060 *
2061 * Hashes a MAC address according to the hash function used by HW inexact
2062 * (hash) address matching.
2063 */
2064static int hash_mac_addr(const u8 *addr)
2065{
2066 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2067 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2068 a ^= b;
2069 a ^= (a >> 12);
2070 a ^= (a >> 6);
2071 return a & 0x3f;
2072}
2073
2074/**
2075 * t4_config_rss_range - configure a portion of the RSS mapping table
2076 * @adapter: the adapter
2077 * @mbox: mbox to use for the FW command
2078 * @viid: virtual interface whose RSS subtable is to be written
2079 * @start: start entry in the table to write
2080 * @n: how many table entries to write
2081 * @rspq: values for the response queue lookup table
2082 * @nrspq: number of values in @rspq
2083 *
2084 * Programs the selected part of the VI's RSS mapping table with the
2085 * provided values. If @nrspq < @n the supplied values are used repeatedly
2086 * until the full table range is populated.
2087 *
2088 * The caller must ensure the values in @rspq are in the range allowed for
2089 * @viid.
2090 */
2091int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2092 int start, int n, const u16 *rspq, unsigned int nrspq)
2093{
2094 int ret;
2095 const u16 *rsp = rspq;
2096 const u16 *rsp_end = rspq + nrspq;
2097 struct fw_rss_ind_tbl_cmd cmd;
2098
2099 memset(&cmd, 0, sizeof(cmd));
e2ac9628
HS
2100 cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
2101 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
b2e1a3f0 2102 FW_RSS_IND_TBL_CMD_VIID_V(viid));
56d36be4
DM
2103 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2104
2105 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2106 while (n > 0) {
2107 int nq = min(n, 32);
2108 __be32 *qp = &cmd.iq0_to_iq2;
2109
2110 cmd.niqid = htons(nq);
2111 cmd.startidx = htons(start);
2112
2113 start += nq;
2114 n -= nq;
2115
2116 while (nq > 0) {
2117 unsigned int v;
2118
b2e1a3f0 2119 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
56d36be4
DM
2120 if (++rsp >= rsp_end)
2121 rsp = rspq;
b2e1a3f0 2122 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
56d36be4
DM
2123 if (++rsp >= rsp_end)
2124 rsp = rspq;
b2e1a3f0 2125 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
56d36be4
DM
2126 if (++rsp >= rsp_end)
2127 rsp = rspq;
2128
2129 *qp++ = htonl(v);
2130 nq -= 3;
2131 }
2132
2133 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2134 if (ret)
2135 return ret;
2136 }
2137 return 0;
2138}
2139
2140/**
2141 * t4_config_glbl_rss - configure the global RSS mode
2142 * @adapter: the adapter
2143 * @mbox: mbox to use for the FW command
2144 * @mode: global RSS mode
2145 * @flags: mode-specific flags
2146 *
2147 * Sets the global RSS mode.
2148 */
2149int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2150 unsigned int flags)
2151{
2152 struct fw_rss_glb_config_cmd c;
2153
2154 memset(&c, 0, sizeof(c));
e2ac9628
HS
2155 c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
2156 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
56d36be4
DM
2157 c.retval_len16 = htonl(FW_LEN16(c));
2158 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
b2e1a3f0 2159 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
56d36be4
DM
2160 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2161 c.u.basicvirtual.mode_pkd =
b2e1a3f0 2162 htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
56d36be4
DM
2163 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2164 } else
2165 return -EINVAL;
2166 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2167}
2168
56d36be4
DM
2169/**
2170 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2171 * @adap: the adapter
2172 * @v4: holds the TCP/IP counter values
2173 * @v6: holds the TCP/IPv6 counter values
2174 *
2175 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2176 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2177 */
2178void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2179 struct tp_tcp_stats *v6)
2180{
2181 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2182
2183#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2184#define STAT(x) val[STAT_IDX(x)]
2185#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2186
2187 if (v4) {
2188 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2189 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2190 v4->tcpOutRsts = STAT(OUT_RST);
2191 v4->tcpInSegs = STAT64(IN_SEG);
2192 v4->tcpOutSegs = STAT64(OUT_SEG);
2193 v4->tcpRetransSegs = STAT64(RXT_SEG);
2194 }
2195 if (v6) {
2196 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2197 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2198 v6->tcpOutRsts = STAT(OUT_RST);
2199 v6->tcpInSegs = STAT64(IN_SEG);
2200 v6->tcpOutSegs = STAT64(OUT_SEG);
2201 v6->tcpRetransSegs = STAT64(RXT_SEG);
2202 }
2203#undef STAT64
2204#undef STAT
2205#undef STAT_IDX
2206}
2207
56d36be4
DM
2208/**
2209 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2210 * @adap: the adapter
2211 * @mtus: where to store the MTU values
2212 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2213 *
2214 * Reads the HW path MTU table.
2215 */
2216void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2217{
2218 u32 v;
2219 int i;
2220
2221 for (i = 0; i < NMTUS; ++i) {
2222 t4_write_reg(adap, TP_MTU_TABLE,
2223 MTUINDEX(0xff) | MTUVALUE(i));
2224 v = t4_read_reg(adap, TP_MTU_TABLE);
2225 mtus[i] = MTUVALUE_GET(v);
2226 if (mtu_log)
2227 mtu_log[i] = MTUWIDTH_GET(v);
2228 }
2229}
2230
636f9d37
VP
2231/**
2232 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2233 * @adap: the adapter
2234 * @addr: the indirect TP register address
2235 * @mask: specifies the field within the register to modify
2236 * @val: new value for the field
2237 *
2238 * Sets a field of an indirect TP register to the given value.
2239 */
2240void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2241 unsigned int mask, unsigned int val)
2242{
2243 t4_write_reg(adap, TP_PIO_ADDR, addr);
2244 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2245 t4_write_reg(adap, TP_PIO_DATA, val);
2246}
2247
56d36be4
DM
2248/**
2249 * init_cong_ctrl - initialize congestion control parameters
2250 * @a: the alpha values for congestion control
2251 * @b: the beta values for congestion control
2252 *
2253 * Initialize the congestion control parameters.
2254 */
91744948 2255static void init_cong_ctrl(unsigned short *a, unsigned short *b)
56d36be4
DM
2256{
2257 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2258 a[9] = 2;
2259 a[10] = 3;
2260 a[11] = 4;
2261 a[12] = 5;
2262 a[13] = 6;
2263 a[14] = 7;
2264 a[15] = 8;
2265 a[16] = 9;
2266 a[17] = 10;
2267 a[18] = 14;
2268 a[19] = 17;
2269 a[20] = 21;
2270 a[21] = 25;
2271 a[22] = 30;
2272 a[23] = 35;
2273 a[24] = 45;
2274 a[25] = 60;
2275 a[26] = 80;
2276 a[27] = 100;
2277 a[28] = 200;
2278 a[29] = 300;
2279 a[30] = 400;
2280 a[31] = 500;
2281
2282 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2283 b[9] = b[10] = 1;
2284 b[11] = b[12] = 2;
2285 b[13] = b[14] = b[15] = b[16] = 3;
2286 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2287 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2288 b[28] = b[29] = 6;
2289 b[30] = b[31] = 7;
2290}
2291
2292/* The minimum additive increment value for the congestion control table */
2293#define CC_MIN_INCR 2U
2294
2295/**
2296 * t4_load_mtus - write the MTU and congestion control HW tables
2297 * @adap: the adapter
2298 * @mtus: the values for the MTU table
2299 * @alpha: the values for the congestion control alpha parameter
2300 * @beta: the values for the congestion control beta parameter
2301 *
2302 * Write the HW MTU table with the supplied MTUs and the high-speed
2303 * congestion control table with the supplied alpha, beta, and MTUs.
2304 * We write the two tables together because the additive increments
2305 * depend on the MTUs.
2306 */
2307void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2308 const unsigned short *alpha, const unsigned short *beta)
2309{
2310 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2311 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2312 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2313 28672, 40960, 57344, 81920, 114688, 163840, 229376
2314 };
2315
2316 unsigned int i, w;
2317
2318 for (i = 0; i < NMTUS; ++i) {
2319 unsigned int mtu = mtus[i];
2320 unsigned int log2 = fls(mtu);
2321
2322 if (!(mtu & ((1 << log2) >> 2))) /* round */
2323 log2--;
2324 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2325 MTUWIDTH(log2) | MTUVALUE(mtu));
2326
2327 for (w = 0; w < NCCTRL_WIN; ++w) {
2328 unsigned int inc;
2329
2330 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2331 CC_MIN_INCR);
2332
2333 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2334 (w << 16) | (beta[w] << 13) | inc);
2335 }
2336 }
2337}
2338
56d36be4
DM
2339/**
2340 * get_mps_bg_map - return the buffer groups associated with a port
2341 * @adap: the adapter
2342 * @idx: the port index
2343 *
2344 * Returns a bitmap indicating which MPS buffer groups are associated
2345 * with the given port. Bit i is set if buffer group i is used by the
2346 * port.
2347 */
2348static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2349{
2350 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2351
2352 if (n == 0)
2353 return idx == 0 ? 0xf : 0;
2354 if (n == 1)
2355 return idx < 2 ? (3 << (2 * idx)) : 0;
2356 return 1 << idx;
2357}
2358
72aca4bf
KS
2359/**
2360 * t4_get_port_type_description - return Port Type string description
2361 * @port_type: firmware Port Type enumeration
2362 */
2363const char *t4_get_port_type_description(enum fw_port_type port_type)
2364{
2365 static const char *const port_type_description[] = {
2366 "R XFI",
2367 "R XAUI",
2368 "T SGMII",
2369 "T XFI",
2370 "T XAUI",
2371 "KX4",
2372 "CX4",
2373 "KX",
2374 "KR",
2375 "R SFP+",
2376 "KR/KX",
2377 "KR/KX/KX4",
2378 "R QSFP_10G",
5aa80e51 2379 "R QSA",
72aca4bf
KS
2380 "R QSFP",
2381 "R BP40_BA",
2382 };
2383
2384 if (port_type < ARRAY_SIZE(port_type_description))
2385 return port_type_description[port_type];
2386 return "UNKNOWN";
2387}
2388
56d36be4
DM
2389/**
2390 * t4_get_port_stats - collect port statistics
2391 * @adap: the adapter
2392 * @idx: the port index
2393 * @p: the stats structure to fill
2394 *
2395 * Collect statistics related to the given port from HW.
2396 */
2397void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2398{
2399 u32 bgmap = get_mps_bg_map(adap, idx);
2400
2401#define GET_STAT(name) \
0a57a536 2402 t4_read_reg64(adap, \
d14807dd 2403 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
0a57a536 2404 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
56d36be4
DM
2405#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2406
2407 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2408 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2409 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2410 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2411 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2412 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2413 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2414 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2415 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2416 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2417 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2418 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2419 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2420 p->tx_drop = GET_STAT(TX_PORT_DROP);
2421 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2422 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2423 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2424 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2425 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2426 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2427 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2428 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2429 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2430
2431 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2432 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2433 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2434 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2435 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2436 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2437 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2438 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2439 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2440 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2441 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2442 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2443 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2444 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2445 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2446 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2447 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2448 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2449 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2450 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2451 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2452 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2453 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2454 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2455 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2456 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2457 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2458
2459 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2460 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2461 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2462 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2463 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2464 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2465 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2466 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2467
2468#undef GET_STAT
2469#undef GET_STAT_COM
2470}
2471
56d36be4
DM
2472/**
2473 * t4_wol_magic_enable - enable/disable magic packet WoL
2474 * @adap: the adapter
2475 * @port: the physical port index
2476 * @addr: MAC address expected in magic packets, %NULL to disable
2477 *
2478 * Enables/disables magic packet wake-on-LAN for the selected port.
2479 */
2480void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2481 const u8 *addr)
2482{
0a57a536
SR
2483 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2484
d14807dd 2485 if (is_t4(adap->params.chip)) {
0a57a536
SR
2486 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2487 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2488 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2489 } else {
2490 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2491 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2492 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2493 }
2494
56d36be4 2495 if (addr) {
0a57a536 2496 t4_write_reg(adap, mag_id_reg_l,
56d36be4
DM
2497 (addr[2] << 24) | (addr[3] << 16) |
2498 (addr[4] << 8) | addr[5]);
0a57a536 2499 t4_write_reg(adap, mag_id_reg_h,
56d36be4
DM
2500 (addr[0] << 8) | addr[1]);
2501 }
0a57a536 2502 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
56d36be4
DM
2503 addr ? MAGICEN : 0);
2504}
2505
2506/**
2507 * t4_wol_pat_enable - enable/disable pattern-based WoL
2508 * @adap: the adapter
2509 * @port: the physical port index
2510 * @map: bitmap of which HW pattern filters to set
2511 * @mask0: byte mask for bytes 0-63 of a packet
2512 * @mask1: byte mask for bytes 64-127 of a packet
2513 * @crc: Ethernet CRC for selected bytes
2514 * @enable: enable/disable switch
2515 *
2516 * Sets the pattern filters indicated in @map to mask out the bytes
2517 * specified in @mask0/@mask1 in received packets and compare the CRC of
2518 * the resulting packet against @crc. If @enable is %true pattern-based
2519 * WoL is enabled, otherwise disabled.
2520 */
2521int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2522 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2523{
2524 int i;
0a57a536
SR
2525 u32 port_cfg_reg;
2526
d14807dd 2527 if (is_t4(adap->params.chip))
0a57a536
SR
2528 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2529 else
2530 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
56d36be4
DM
2531
2532 if (!enable) {
0a57a536 2533 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
56d36be4
DM
2534 return 0;
2535 }
2536 if (map > 0xff)
2537 return -EINVAL;
2538
0a57a536 2539#define EPIO_REG(name) \
d14807dd 2540 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
0a57a536 2541 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
56d36be4
DM
2542
2543 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2544 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2545 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2546
2547 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2548 if (!(map & 1))
2549 continue;
2550
2551 /* write byte masks */
2552 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2553 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2554 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
ce91a923 2555 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
56d36be4
DM
2556 return -ETIMEDOUT;
2557
2558 /* write CRC */
2559 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2560 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2561 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
ce91a923 2562 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
56d36be4
DM
2563 return -ETIMEDOUT;
2564 }
2565#undef EPIO_REG
2566
2567 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2568 return 0;
2569}
2570
f2b7e78d
VP
2571/* t4_mk_filtdelwr - create a delete filter WR
2572 * @ftid: the filter ID
2573 * @wr: the filter work request to populate
2574 * @qid: ingress queue to receive the delete notification
2575 *
2576 * Creates a filter work request to delete the supplied filter. If @qid is
2577 * negative the delete notification is suppressed.
2578 */
2579void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2580{
2581 memset(wr, 0, sizeof(*wr));
e2ac9628
HS
2582 wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
2583 wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
77a80e23
HS
2584 wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
2585 FW_FILTER_WR_NOREPLY_V(qid < 0));
2586 wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
f2b7e78d 2587 if (qid >= 0)
77a80e23 2588 wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
f2b7e78d
VP
2589}
2590
56d36be4 2591#define INIT_CMD(var, cmd, rd_wr) do { \
e2ac9628
HS
2592 (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
2593 FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
56d36be4
DM
2594 (var).retval_len16 = htonl(FW_LEN16(var)); \
2595} while (0)
2596
8caa1e84
VP
2597int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2598 u32 addr, u32 val)
2599{
2600 struct fw_ldst_cmd c;
2601
2602 memset(&c, 0, sizeof(c));
e2ac9628
HS
2603 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2604 FW_CMD_WRITE_F |
5167865a 2605 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
8caa1e84
VP
2606 c.cycles_to_len16 = htonl(FW_LEN16(c));
2607 c.u.addrval.addr = htonl(addr);
2608 c.u.addrval.val = htonl(val);
2609
2610 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2611}
2612
56d36be4
DM
2613/**
2614 * t4_mdio_rd - read a PHY register through MDIO
2615 * @adap: the adapter
2616 * @mbox: mailbox to use for the FW command
2617 * @phy_addr: the PHY address
2618 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2619 * @reg: the register to read
2620 * @valp: where to store the value
2621 *
2622 * Issues a FW command through the given mailbox to read a PHY register.
2623 */
2624int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2625 unsigned int mmd, unsigned int reg, u16 *valp)
2626{
2627 int ret;
2628 struct fw_ldst_cmd c;
2629
2630 memset(&c, 0, sizeof(c));
e2ac9628 2631 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
5167865a 2632 FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
56d36be4 2633 c.cycles_to_len16 = htonl(FW_LEN16(c));
5167865a
HS
2634 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
2635 FW_LDST_CMD_MMD_V(mmd));
56d36be4
DM
2636 c.u.mdio.raddr = htons(reg);
2637
2638 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2639 if (ret == 0)
2640 *valp = ntohs(c.u.mdio.rval);
2641 return ret;
2642}
2643
2644/**
2645 * t4_mdio_wr - write a PHY register through MDIO
2646 * @adap: the adapter
2647 * @mbox: mailbox to use for the FW command
2648 * @phy_addr: the PHY address
2649 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2650 * @reg: the register to write
2651 * @valp: value to write
2652 *
2653 * Issues a FW command through the given mailbox to write a PHY register.
2654 */
2655int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2656 unsigned int mmd, unsigned int reg, u16 val)
2657{
2658 struct fw_ldst_cmd c;
2659
2660 memset(&c, 0, sizeof(c));
e2ac9628 2661 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
5167865a 2662 FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
56d36be4 2663 c.cycles_to_len16 = htonl(FW_LEN16(c));
5167865a
HS
2664 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
2665 FW_LDST_CMD_MMD_V(mmd));
56d36be4
DM
2666 c.u.mdio.raddr = htons(reg);
2667 c.u.mdio.rval = htons(val);
2668
2669 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2670}
2671
68bce192
KS
2672/**
2673 * t4_sge_decode_idma_state - decode the idma state
2674 * @adap: the adapter
2675 * @state: the state idma is stuck in
2676 */
2677void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2678{
2679 static const char * const t4_decode[] = {
2680 "IDMA_IDLE",
2681 "IDMA_PUSH_MORE_CPL_FIFO",
2682 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2683 "Not used",
2684 "IDMA_PHYSADDR_SEND_PCIEHDR",
2685 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2686 "IDMA_PHYSADDR_SEND_PAYLOAD",
2687 "IDMA_SEND_FIFO_TO_IMSG",
2688 "IDMA_FL_REQ_DATA_FL_PREP",
2689 "IDMA_FL_REQ_DATA_FL",
2690 "IDMA_FL_DROP",
2691 "IDMA_FL_H_REQ_HEADER_FL",
2692 "IDMA_FL_H_SEND_PCIEHDR",
2693 "IDMA_FL_H_PUSH_CPL_FIFO",
2694 "IDMA_FL_H_SEND_CPL",
2695 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2696 "IDMA_FL_H_SEND_IP_HDR",
2697 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2698 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2699 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2700 "IDMA_FL_D_SEND_PCIEHDR",
2701 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2702 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2703 "IDMA_FL_SEND_PCIEHDR",
2704 "IDMA_FL_PUSH_CPL_FIFO",
2705 "IDMA_FL_SEND_CPL",
2706 "IDMA_FL_SEND_PAYLOAD_FIRST",
2707 "IDMA_FL_SEND_PAYLOAD",
2708 "IDMA_FL_REQ_NEXT_DATA_FL",
2709 "IDMA_FL_SEND_NEXT_PCIEHDR",
2710 "IDMA_FL_SEND_PADDING",
2711 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2712 "IDMA_FL_SEND_FIFO_TO_IMSG",
2713 "IDMA_FL_REQ_DATAFL_DONE",
2714 "IDMA_FL_REQ_HEADERFL_DONE",
2715 };
2716 static const char * const t5_decode[] = {
2717 "IDMA_IDLE",
2718 "IDMA_ALMOST_IDLE",
2719 "IDMA_PUSH_MORE_CPL_FIFO",
2720 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2721 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2722 "IDMA_PHYSADDR_SEND_PCIEHDR",
2723 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2724 "IDMA_PHYSADDR_SEND_PAYLOAD",
2725 "IDMA_SEND_FIFO_TO_IMSG",
2726 "IDMA_FL_REQ_DATA_FL",
2727 "IDMA_FL_DROP",
2728 "IDMA_FL_DROP_SEND_INC",
2729 "IDMA_FL_H_REQ_HEADER_FL",
2730 "IDMA_FL_H_SEND_PCIEHDR",
2731 "IDMA_FL_H_PUSH_CPL_FIFO",
2732 "IDMA_FL_H_SEND_CPL",
2733 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2734 "IDMA_FL_H_SEND_IP_HDR",
2735 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2736 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2737 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2738 "IDMA_FL_D_SEND_PCIEHDR",
2739 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2740 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2741 "IDMA_FL_SEND_PCIEHDR",
2742 "IDMA_FL_PUSH_CPL_FIFO",
2743 "IDMA_FL_SEND_CPL",
2744 "IDMA_FL_SEND_PAYLOAD_FIRST",
2745 "IDMA_FL_SEND_PAYLOAD",
2746 "IDMA_FL_REQ_NEXT_DATA_FL",
2747 "IDMA_FL_SEND_NEXT_PCIEHDR",
2748 "IDMA_FL_SEND_PADDING",
2749 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2750 };
2751 static const u32 sge_regs[] = {
2752 SGE_DEBUG_DATA_LOW_INDEX_2,
2753 SGE_DEBUG_DATA_LOW_INDEX_3,
2754 SGE_DEBUG_DATA_HIGH_INDEX_10,
2755 };
2756 const char **sge_idma_decode;
2757 int sge_idma_decode_nstates;
2758 int i;
2759
2760 if (is_t4(adapter->params.chip)) {
2761 sge_idma_decode = (const char **)t4_decode;
2762 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2763 } else {
2764 sge_idma_decode = (const char **)t5_decode;
2765 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2766 }
2767
2768 if (state < sge_idma_decode_nstates)
2769 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2770 else
2771 CH_WARN(adapter, "idma state %d unknown\n", state);
2772
2773 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2774 CH_WARN(adapter, "SGE register %#x value %#x\n",
2775 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2776}
2777
56d36be4 2778/**
636f9d37
VP
2779 * t4_fw_hello - establish communication with FW
2780 * @adap: the adapter
2781 * @mbox: mailbox to use for the FW command
2782 * @evt_mbox: mailbox to receive async FW events
2783 * @master: specifies the caller's willingness to be the device master
2784 * @state: returns the current device state (if non-NULL)
56d36be4 2785 *
636f9d37
VP
2786 * Issues a command to establish communication with FW. Returns either
2787 * an error (negative integer) or the mailbox of the Master PF.
56d36be4
DM
2788 */
2789int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2790 enum dev_master master, enum dev_state *state)
2791{
2792 int ret;
2793 struct fw_hello_cmd c;
636f9d37
VP
2794 u32 v;
2795 unsigned int master_mbox;
2796 int retries = FW_CMD_HELLO_RETRIES;
56d36be4 2797
636f9d37
VP
2798retry:
2799 memset(&c, 0, sizeof(c));
56d36be4 2800 INIT_CMD(c, HELLO, WRITE);
ce91a923 2801 c.err_to_clearinit = htonl(
5167865a
HS
2802 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
2803 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
2804 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
2805 FW_HELLO_CMD_MBMASTER_M) |
2806 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
2807 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
2808 FW_HELLO_CMD_CLEARINIT_F);
56d36be4 2809
636f9d37
VP
2810 /*
2811 * Issue the HELLO command to the firmware. If it's not successful
2812 * but indicates that we got a "busy" or "timeout" condition, retry
31d55c2d
HS
2813 * the HELLO until we exhaust our retry limit. If we do exceed our
2814 * retry limit, check to see if the firmware left us any error
2815 * information and report that if so.
636f9d37 2816 */
56d36be4 2817 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
636f9d37
VP
2818 if (ret < 0) {
2819 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2820 goto retry;
b2e1a3f0 2821 if (t4_read_reg(adap, MA_PCIE_FW) & PCIE_FW_ERR)
31d55c2d 2822 t4_report_fw_error(adap);
636f9d37
VP
2823 return ret;
2824 }
2825
ce91a923 2826 v = ntohl(c.err_to_clearinit);
5167865a 2827 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
636f9d37 2828 if (state) {
5167865a 2829 if (v & FW_HELLO_CMD_ERR_F)
56d36be4 2830 *state = DEV_STATE_ERR;
5167865a 2831 else if (v & FW_HELLO_CMD_INIT_F)
636f9d37 2832 *state = DEV_STATE_INIT;
56d36be4
DM
2833 else
2834 *state = DEV_STATE_UNINIT;
2835 }
636f9d37
VP
2836
2837 /*
2838 * If we're not the Master PF then we need to wait around for the
2839 * Master PF Driver to finish setting up the adapter.
2840 *
2841 * Note that we also do this wait if we're a non-Master-capable PF and
2842 * there is no current Master PF; a Master PF may show up momentarily
2843 * and we wouldn't want to fail pointlessly. (This can happen when an
2844 * OS loads lots of different drivers rapidly at the same time). In
2845 * this case, the Master PF returned by the firmware will be
b2e1a3f0 2846 * PCIE_FW_MASTER_M so the test below will work ...
636f9d37 2847 */
5167865a 2848 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
636f9d37
VP
2849 master_mbox != mbox) {
2850 int waiting = FW_CMD_HELLO_TIMEOUT;
2851
2852 /*
2853 * Wait for the firmware to either indicate an error or
2854 * initialized state. If we see either of these we bail out
2855 * and report the issue to the caller. If we exhaust the
2856 * "hello timeout" and we haven't exhausted our retries, try
2857 * again. Otherwise bail with a timeout error.
2858 */
2859 for (;;) {
2860 u32 pcie_fw;
2861
2862 msleep(50);
2863 waiting -= 50;
2864
2865 /*
2866 * If neither Error nor Initialialized are indicated
2867 * by the firmware keep waiting till we exaust our
2868 * timeout ... and then retry if we haven't exhausted
2869 * our retries ...
2870 */
2871 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
b2e1a3f0 2872 if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
636f9d37
VP
2873 if (waiting <= 0) {
2874 if (retries-- > 0)
2875 goto retry;
2876
2877 return -ETIMEDOUT;
2878 }
2879 continue;
2880 }
2881
2882 /*
2883 * We either have an Error or Initialized condition
2884 * report errors preferentially.
2885 */
2886 if (state) {
b2e1a3f0 2887 if (pcie_fw & PCIE_FW_ERR)
636f9d37 2888 *state = DEV_STATE_ERR;
b2e1a3f0 2889 else if (pcie_fw & PCIE_FW_INIT)
636f9d37
VP
2890 *state = DEV_STATE_INIT;
2891 }
2892
2893 /*
2894 * If we arrived before a Master PF was selected and
2895 * there's not a valid Master PF, grab its identity
2896 * for our caller.
2897 */
b2e1a3f0
HS
2898 if (master_mbox == PCIE_FW_MASTER_M &&
2899 (pcie_fw & PCIE_FW_MASTER_VLD))
2900 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
636f9d37
VP
2901 break;
2902 }
2903 }
2904
2905 return master_mbox;
56d36be4
DM
2906}
2907
2908/**
2909 * t4_fw_bye - end communication with FW
2910 * @adap: the adapter
2911 * @mbox: mailbox to use for the FW command
2912 *
2913 * Issues a command to terminate communication with FW.
2914 */
2915int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2916{
2917 struct fw_bye_cmd c;
2918
0062b15c 2919 memset(&c, 0, sizeof(c));
56d36be4
DM
2920 INIT_CMD(c, BYE, WRITE);
2921 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2922}
2923
2924/**
2925 * t4_init_cmd - ask FW to initialize the device
2926 * @adap: the adapter
2927 * @mbox: mailbox to use for the FW command
2928 *
2929 * Issues a command to FW to partially initialize the device. This
2930 * performs initialization that generally doesn't depend on user input.
2931 */
2932int t4_early_init(struct adapter *adap, unsigned int mbox)
2933{
2934 struct fw_initialize_cmd c;
2935
0062b15c 2936 memset(&c, 0, sizeof(c));
56d36be4
DM
2937 INIT_CMD(c, INITIALIZE, WRITE);
2938 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2939}
2940
2941/**
2942 * t4_fw_reset - issue a reset to FW
2943 * @adap: the adapter
2944 * @mbox: mailbox to use for the FW command
2945 * @reset: specifies the type of reset to perform
2946 *
2947 * Issues a reset command of the specified type to FW.
2948 */
2949int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2950{
2951 struct fw_reset_cmd c;
2952
0062b15c 2953 memset(&c, 0, sizeof(c));
56d36be4
DM
2954 INIT_CMD(c, RESET, WRITE);
2955 c.val = htonl(reset);
2956 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2957}
2958
26f7cbc0
VP
2959/**
2960 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2961 * @adap: the adapter
2962 * @mbox: mailbox to use for the FW RESET command (if desired)
2963 * @force: force uP into RESET even if FW RESET command fails
2964 *
2965 * Issues a RESET command to firmware (if desired) with a HALT indication
2966 * and then puts the microprocessor into RESET state. The RESET command
2967 * will only be issued if a legitimate mailbox is provided (mbox <=
b2e1a3f0 2968 * PCIE_FW_MASTER_M).
26f7cbc0
VP
2969 *
2970 * This is generally used in order for the host to safely manipulate the
2971 * adapter without fear of conflicting with whatever the firmware might
2972 * be doing. The only way out of this state is to RESTART the firmware
2973 * ...
2974 */
de5b8677 2975static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
26f7cbc0
VP
2976{
2977 int ret = 0;
2978
2979 /*
2980 * If a legitimate mailbox is provided, issue a RESET command
2981 * with a HALT indication.
2982 */
b2e1a3f0 2983 if (mbox <= PCIE_FW_MASTER_M) {
26f7cbc0
VP
2984 struct fw_reset_cmd c;
2985
2986 memset(&c, 0, sizeof(c));
2987 INIT_CMD(c, RESET, WRITE);
2988 c.val = htonl(PIORST | PIORSTMODE);
5167865a 2989 c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
26f7cbc0
VP
2990 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2991 }
2992
2993 /*
2994 * Normally we won't complete the operation if the firmware RESET
2995 * command fails but if our caller insists we'll go ahead and put the
2996 * uP into RESET. This can be useful if the firmware is hung or even
2997 * missing ... We'll have to take the risk of putting the uP into
2998 * RESET without the cooperation of firmware in that case.
2999 *
3000 * We also force the firmware's HALT flag to be on in case we bypassed
3001 * the firmware RESET command above or we're dealing with old firmware
3002 * which doesn't have the HALT capability. This will serve as a flag
3003 * for the incoming firmware to know that it's coming out of a HALT
3004 * rather than a RESET ... if it's new enough to understand that ...
3005 */
3006 if (ret == 0 || force) {
3007 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
b2e1a3f0
HS
3008 t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F,
3009 PCIE_FW_HALT_F);
26f7cbc0
VP
3010 }
3011
3012 /*
3013 * And we always return the result of the firmware RESET command
3014 * even when we force the uP into RESET ...
3015 */
3016 return ret;
3017}
3018
3019/**
3020 * t4_fw_restart - restart the firmware by taking the uP out of RESET
3021 * @adap: the adapter
3022 * @reset: if we want to do a RESET to restart things
3023 *
3024 * Restart firmware previously halted by t4_fw_halt(). On successful
3025 * return the previous PF Master remains as the new PF Master and there
3026 * is no need to issue a new HELLO command, etc.
3027 *
3028 * We do this in two ways:
3029 *
3030 * 1. If we're dealing with newer firmware we'll simply want to take
3031 * the chip's microprocessor out of RESET. This will cause the
3032 * firmware to start up from its start vector. And then we'll loop
3033 * until the firmware indicates it's started again (PCIE_FW.HALT
3034 * reset to 0) or we timeout.
3035 *
3036 * 2. If we're dealing with older firmware then we'll need to RESET
3037 * the chip since older firmware won't recognize the PCIE_FW.HALT
3038 * flag and automatically RESET itself on startup.
3039 */
de5b8677 3040static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
26f7cbc0
VP
3041{
3042 if (reset) {
3043 /*
3044 * Since we're directing the RESET instead of the firmware
3045 * doing it automatically, we need to clear the PCIE_FW.HALT
3046 * bit.
3047 */
b2e1a3f0 3048 t4_set_reg_field(adap, PCIE_FW, PCIE_FW_HALT_F, 0);
26f7cbc0
VP
3049
3050 /*
3051 * If we've been given a valid mailbox, first try to get the
3052 * firmware to do the RESET. If that works, great and we can
3053 * return success. Otherwise, if we haven't been given a
3054 * valid mailbox or the RESET command failed, fall back to
3055 * hitting the chip with a hammer.
3056 */
b2e1a3f0 3057 if (mbox <= PCIE_FW_MASTER_M) {
26f7cbc0
VP
3058 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3059 msleep(100);
3060 if (t4_fw_reset(adap, mbox,
3061 PIORST | PIORSTMODE) == 0)
3062 return 0;
3063 }
3064
3065 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
3066 msleep(2000);
3067 } else {
3068 int ms;
3069
3070 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3071 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
b2e1a3f0 3072 if (!(t4_read_reg(adap, PCIE_FW) & PCIE_FW_HALT_F))
26f7cbc0
VP
3073 return 0;
3074 msleep(100);
3075 ms += 100;
3076 }
3077 return -ETIMEDOUT;
3078 }
3079 return 0;
3080}
3081
3082/**
3083 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3084 * @adap: the adapter
3085 * @mbox: mailbox to use for the FW RESET command (if desired)
3086 * @fw_data: the firmware image to write
3087 * @size: image size
3088 * @force: force upgrade even if firmware doesn't cooperate
3089 *
3090 * Perform all of the steps necessary for upgrading an adapter's
3091 * firmware image. Normally this requires the cooperation of the
3092 * existing firmware in order to halt all existing activities
3093 * but if an invalid mailbox token is passed in we skip that step
3094 * (though we'll still put the adapter microprocessor into RESET in
3095 * that case).
3096 *
3097 * On successful return the new firmware will have been loaded and
3098 * the adapter will have been fully RESET losing all previous setup
3099 * state. On unsuccessful return the adapter may be completely hosed ...
3100 * positive errno indicates that the adapter is ~probably~ intact, a
3101 * negative errno indicates that things are looking bad ...
3102 */
22c0b963
HS
3103int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3104 const u8 *fw_data, unsigned int size, int force)
26f7cbc0
VP
3105{
3106 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3107 int reset, ret;
3108
79af221d
HS
3109 if (!t4_fw_matches_chip(adap, fw_hdr))
3110 return -EINVAL;
3111
26f7cbc0
VP
3112 ret = t4_fw_halt(adap, mbox, force);
3113 if (ret < 0 && !force)
3114 return ret;
3115
3116 ret = t4_load_fw(adap, fw_data, size);
3117 if (ret < 0)
3118 return ret;
3119
3120 /*
3121 * Older versions of the firmware don't understand the new
3122 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3123 * restart. So for newly loaded older firmware we'll have to do the
3124 * RESET for it so it starts up on a clean slate. We can tell if
3125 * the newly loaded firmware will handle this right by checking
3126 * its header flags to see if it advertises the capability.
3127 */
3128 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3129 return t4_fw_restart(adap, mbox, reset);
3130}
3131
636f9d37
VP
3132/**
3133 * t4_fixup_host_params - fix up host-dependent parameters
3134 * @adap: the adapter
3135 * @page_size: the host's Base Page Size
3136 * @cache_line_size: the host's Cache Line Size
3137 *
3138 * Various registers in T4 contain values which are dependent on the
3139 * host's Base Page and Cache Line Sizes. This function will fix all of
3140 * those registers with the appropriate values as passed in ...
3141 */
3142int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3143 unsigned int cache_line_size)
3144{
3145 unsigned int page_shift = fls(page_size) - 1;
3146 unsigned int sge_hps = page_shift - 10;
3147 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3148 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3149 unsigned int fl_align_log = fls(fl_align) - 1;
3150
3151 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
3152 HOSTPAGESIZEPF0(sge_hps) |
3153 HOSTPAGESIZEPF1(sge_hps) |
3154 HOSTPAGESIZEPF2(sge_hps) |
3155 HOSTPAGESIZEPF3(sge_hps) |
3156 HOSTPAGESIZEPF4(sge_hps) |
3157 HOSTPAGESIZEPF5(sge_hps) |
3158 HOSTPAGESIZEPF6(sge_hps) |
3159 HOSTPAGESIZEPF7(sge_hps));
3160
ce8f407a
HS
3161 if (is_t4(adap->params.chip)) {
3162 t4_set_reg_field(adap, SGE_CONTROL,
3163 INGPADBOUNDARY_MASK |
3164 EGRSTATUSPAGESIZE_MASK,
3165 INGPADBOUNDARY(fl_align_log - 5) |
3166 EGRSTATUSPAGESIZE(stat_len != 64));
3167 } else {
3168 /* T5 introduced the separation of the Free List Padding and
3169 * Packing Boundaries. Thus, we can select a smaller Padding
3170 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3171 * Bandwidth, and use a Packing Boundary which is large enough
3172 * to avoid false sharing between CPUs, etc.
3173 *
3174 * For the PCI Link, the smaller the Padding Boundary the
3175 * better. For the Memory Controller, a smaller Padding
3176 * Boundary is better until we cross under the Memory Line
3177 * Size (the minimum unit of transfer to/from Memory). If we
3178 * have a Padding Boundary which is smaller than the Memory
3179 * Line Size, that'll involve a Read-Modify-Write cycle on the
3180 * Memory Controller which is never good. For T5 the smallest
3181 * Padding Boundary which we can select is 32 bytes which is
3182 * larger than any known Memory Controller Line Size so we'll
3183 * use that.
3184 *
3185 * T5 has a different interpretation of the "0" value for the
3186 * Packing Boundary. This corresponds to 16 bytes instead of
3187 * the expected 32 bytes. We never have a Packing Boundary
3188 * less than 32 bytes so we can't use that special value but
3189 * on the other hand, if we wanted 32 bytes, the best we can
3190 * really do is 64 bytes.
3191 */
3192 if (fl_align <= 32) {
3193 fl_align = 64;
3194 fl_align_log = 6;
3195 }
3196 t4_set_reg_field(adap, SGE_CONTROL,
3197 INGPADBOUNDARY_MASK |
3198 EGRSTATUSPAGESIZE_MASK,
3199 INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
3200 EGRSTATUSPAGESIZE(stat_len != 64));
3201 t4_set_reg_field(adap, SGE_CONTROL2_A,
3202 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3203 INGPACKBOUNDARY_V(fl_align_log -
3204 INGPACKBOUNDARY_SHIFT_X));
3205 }
636f9d37
VP
3206 /*
3207 * Adjust various SGE Free List Host Buffer Sizes.
3208 *
3209 * This is something of a crock since we're using fixed indices into
3210 * the array which are also known by the sge.c code and the T4
3211 * Firmware Configuration File. We need to come up with a much better
3212 * approach to managing this array. For now, the first four entries
3213 * are:
3214 *
3215 * 0: Host Page Size
3216 * 1: 64KB
3217 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3218 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3219 *
3220 * For the single-MTU buffers in unpacked mode we need to include
3221 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3222 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3223 * Padding boundry. All of these are accommodated in the Factory
3224 * Default Firmware Configuration File but we need to adjust it for
3225 * this host's cache line size.
3226 */
3227 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
3228 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
3229 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
3230 & ~(fl_align-1));
3231 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
3232 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
3233 & ~(fl_align-1));
3234
3235 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3236
3237 return 0;
3238}
3239
3240/**
3241 * t4_fw_initialize - ask FW to initialize the device
3242 * @adap: the adapter
3243 * @mbox: mailbox to use for the FW command
3244 *
3245 * Issues a command to FW to partially initialize the device. This
3246 * performs initialization that generally doesn't depend on user input.
3247 */
3248int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3249{
3250 struct fw_initialize_cmd c;
3251
3252 memset(&c, 0, sizeof(c));
3253 INIT_CMD(c, INITIALIZE, WRITE);
3254 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3255}
3256
56d36be4
DM
3257/**
3258 * t4_query_params - query FW or device parameters
3259 * @adap: the adapter
3260 * @mbox: mailbox to use for the FW command
3261 * @pf: the PF
3262 * @vf: the VF
3263 * @nparams: the number of parameters
3264 * @params: the parameter names
3265 * @val: the parameter values
3266 *
3267 * Reads the value of FW or device parameters. Up to 7 parameters can be
3268 * queried at once.
3269 */
3270int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3271 unsigned int vf, unsigned int nparams, const u32 *params,
3272 u32 *val)
3273{
3274 int i, ret;
3275 struct fw_params_cmd c;
3276 __be32 *p = &c.param[0].mnem;
3277
3278 if (nparams > 7)
3279 return -EINVAL;
3280
3281 memset(&c, 0, sizeof(c));
e2ac9628 3282 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
5167865a
HS
3283 FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
3284 FW_PARAMS_CMD_VFN_V(vf));
56d36be4
DM
3285 c.retval_len16 = htonl(FW_LEN16(c));
3286 for (i = 0; i < nparams; i++, p += 2)
3287 *p = htonl(*params++);
3288
3289 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3290 if (ret == 0)
3291 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3292 *val++ = ntohl(*p);
3293 return ret;
3294}
3295
688848b1
AB
3296/**
3297 * t4_set_params_nosleep - sets FW or device parameters
3298 * @adap: the adapter
3299 * @mbox: mailbox to use for the FW command
3300 * @pf: the PF
3301 * @vf: the VF
3302 * @nparams: the number of parameters
3303 * @params: the parameter names
3304 * @val: the parameter values
3305 *
3306 * Does not ever sleep
3307 * Sets the value of FW or device parameters. Up to 7 parameters can be
3308 * specified at once.
3309 */
3310int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3311 unsigned int pf, unsigned int vf,
3312 unsigned int nparams, const u32 *params,
3313 const u32 *val)
3314{
3315 struct fw_params_cmd c;
3316 __be32 *p = &c.param[0].mnem;
3317
3318 if (nparams > 7)
3319 return -EINVAL;
3320
3321 memset(&c, 0, sizeof(c));
e2ac9628
HS
3322 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3323 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5167865a
HS
3324 FW_PARAMS_CMD_PFN_V(pf) |
3325 FW_PARAMS_CMD_VFN_V(vf));
688848b1
AB
3326 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3327
3328 while (nparams--) {
3329 *p++ = cpu_to_be32(*params++);
3330 *p++ = cpu_to_be32(*val++);
3331 }
3332
3333 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3334}
3335
56d36be4
DM
3336/**
3337 * t4_set_params - sets FW or device parameters
3338 * @adap: the adapter
3339 * @mbox: mailbox to use for the FW command
3340 * @pf: the PF
3341 * @vf: the VF
3342 * @nparams: the number of parameters
3343 * @params: the parameter names
3344 * @val: the parameter values
3345 *
3346 * Sets the value of FW or device parameters. Up to 7 parameters can be
3347 * specified at once.
3348 */
3349int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3350 unsigned int vf, unsigned int nparams, const u32 *params,
3351 const u32 *val)
3352{
3353 struct fw_params_cmd c;
3354 __be32 *p = &c.param[0].mnem;
3355
3356 if (nparams > 7)
3357 return -EINVAL;
3358
3359 memset(&c, 0, sizeof(c));
e2ac9628 3360 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
5167865a
HS
3361 FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
3362 FW_PARAMS_CMD_VFN_V(vf));
56d36be4
DM
3363 c.retval_len16 = htonl(FW_LEN16(c));
3364 while (nparams--) {
3365 *p++ = htonl(*params++);
3366 *p++ = htonl(*val++);
3367 }
3368
3369 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3370}
3371
3372/**
3373 * t4_cfg_pfvf - configure PF/VF resource limits
3374 * @adap: the adapter
3375 * @mbox: mailbox to use for the FW command
3376 * @pf: the PF being configured
3377 * @vf: the VF being configured
3378 * @txq: the max number of egress queues
3379 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
3380 * @rxqi: the max number of interrupt-capable ingress queues
3381 * @rxq: the max number of interruptless ingress queues
3382 * @tc: the PCI traffic class
3383 * @vi: the max number of virtual interfaces
3384 * @cmask: the channel access rights mask for the PF/VF
3385 * @pmask: the port access rights mask for the PF/VF
3386 * @nexact: the maximum number of exact MPS filters
3387 * @rcaps: read capabilities
3388 * @wxcaps: write/execute capabilities
3389 *
3390 * Configures resource limits and capabilities for a physical or virtual
3391 * function.
3392 */
3393int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3394 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3395 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3396 unsigned int vi, unsigned int cmask, unsigned int pmask,
3397 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3398{
3399 struct fw_pfvf_cmd c;
3400
3401 memset(&c, 0, sizeof(c));
e2ac9628 3402 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
5167865a
HS
3403 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
3404 FW_PFVF_CMD_VFN_V(vf));
56d36be4 3405 c.retval_len16 = htonl(FW_LEN16(c));
5167865a
HS
3406 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
3407 FW_PFVF_CMD_NIQ_V(rxq));
3408 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
3409 FW_PFVF_CMD_PMASK_V(pmask) |
3410 FW_PFVF_CMD_NEQ_V(txq));
3411 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
3412 FW_PFVF_CMD_NEXACTF_V(nexact));
3413 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
3414 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
3415 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
56d36be4
DM
3416 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3417}
3418
3419/**
3420 * t4_alloc_vi - allocate a virtual interface
3421 * @adap: the adapter
3422 * @mbox: mailbox to use for the FW command
3423 * @port: physical port associated with the VI
3424 * @pf: the PF owning the VI
3425 * @vf: the VF owning the VI
3426 * @nmac: number of MAC addresses needed (1 to 5)
3427 * @mac: the MAC addresses of the VI
3428 * @rss_size: size of RSS table slice associated with this VI
3429 *
3430 * Allocates a virtual interface for the given physical port. If @mac is
3431 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3432 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3433 * stored consecutively so the space needed is @nmac * 6 bytes.
3434 * Returns a negative error number or the non-negative VI id.
3435 */
3436int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3437 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3438 unsigned int *rss_size)
3439{
3440 int ret;
3441 struct fw_vi_cmd c;
3442
3443 memset(&c, 0, sizeof(c));
e2ac9628
HS
3444 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
3445 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2b5fb1f2
HS
3446 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
3447 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
3448 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
56d36be4
DM
3449 c.nmac = nmac - 1;
3450
3451 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3452 if (ret)
3453 return ret;
3454
3455 if (mac) {
3456 memcpy(mac, c.mac, sizeof(c.mac));
3457 switch (nmac) {
3458 case 5:
3459 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3460 case 4:
3461 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3462 case 3:
3463 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3464 case 2:
3465 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3466 }
3467 }
3468 if (rss_size)
2b5fb1f2
HS
3469 *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd));
3470 return FW_VI_CMD_VIID_G(ntohs(c.type_viid));
56d36be4
DM
3471}
3472
56d36be4
DM
3473/**
3474 * t4_set_rxmode - set Rx properties of a virtual interface
3475 * @adap: the adapter
3476 * @mbox: mailbox to use for the FW command
3477 * @viid: the VI id
3478 * @mtu: the new MTU or -1
3479 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3480 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3481 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 3482 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
3483 * @sleep_ok: if true we may sleep while awaiting command completion
3484 *
3485 * Sets Rx properties of a virtual interface.
3486 */
3487int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
3488 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3489 bool sleep_ok)
56d36be4
DM
3490{
3491 struct fw_vi_rxmode_cmd c;
3492
3493 /* convert to FW values */
3494 if (mtu < 0)
3495 mtu = FW_RXMODE_MTU_NO_CHG;
3496 if (promisc < 0)
2b5fb1f2 3497 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
56d36be4 3498 if (all_multi < 0)
2b5fb1f2 3499 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
56d36be4 3500 if (bcast < 0)
2b5fb1f2 3501 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
f8f5aafa 3502 if (vlanex < 0)
2b5fb1f2 3503 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
56d36be4
DM
3504
3505 memset(&c, 0, sizeof(c));
e2ac9628 3506 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2 3507 FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid));
56d36be4 3508 c.retval_len16 = htonl(FW_LEN16(c));
2b5fb1f2
HS
3509 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) |
3510 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
3511 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
3512 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
3513 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
56d36be4
DM
3514 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3515}
3516
3517/**
3518 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3519 * @adap: the adapter
3520 * @mbox: mailbox to use for the FW command
3521 * @viid: the VI id
3522 * @free: if true any existing filters for this VI id are first removed
3523 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3524 * @addr: the MAC address(es)
3525 * @idx: where to store the index of each allocated filter
3526 * @hash: pointer to hash address filter bitmap
3527 * @sleep_ok: call is allowed to sleep
3528 *
3529 * Allocates an exact-match filter for each of the supplied addresses and
3530 * sets it to the corresponding address. If @idx is not %NULL it should
3531 * have at least @naddr entries, each of which will be set to the index of
3532 * the filter allocated for the corresponding MAC address. If a filter
3533 * could not be allocated for an address its index is set to 0xffff.
3534 * If @hash is not %NULL addresses that fail to allocate an exact filter
3535 * are hashed and update the hash filter bitmap pointed at by @hash.
3536 *
3537 * Returns a negative error number or the number of filters allocated.
3538 */
3539int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3540 unsigned int viid, bool free, unsigned int naddr,
3541 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3542{
3543 int i, ret;
3544 struct fw_vi_mac_cmd c;
3545 struct fw_vi_mac_exact *p;
d14807dd 3546 unsigned int max_naddr = is_t4(adap->params.chip) ?
0a57a536
SR
3547 NUM_MPS_CLS_SRAM_L_INSTANCES :
3548 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
56d36be4
DM
3549
3550 if (naddr > 7)
3551 return -EINVAL;
3552
3553 memset(&c, 0, sizeof(c));
e2ac9628
HS
3554 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3555 FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
2b5fb1f2
HS
3556 FW_VI_MAC_CMD_VIID_V(viid));
3557 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) |
e2ac9628 3558 FW_CMD_LEN16_V((naddr + 2) / 2));
56d36be4
DM
3559
3560 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2b5fb1f2
HS
3561 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
3562 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
56d36be4
DM
3563 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3564 }
3565
3566 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3567 if (ret)
3568 return ret;
3569
3570 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2b5fb1f2 3571 u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
56d36be4
DM
3572
3573 if (idx)
0a57a536
SR
3574 idx[i] = index >= max_naddr ? 0xffff : index;
3575 if (index < max_naddr)
56d36be4
DM
3576 ret++;
3577 else if (hash)
ce9aeb58 3578 *hash |= (1ULL << hash_mac_addr(addr[i]));
56d36be4
DM
3579 }
3580 return ret;
3581}
3582
3583/**
3584 * t4_change_mac - modifies the exact-match filter for a MAC address
3585 * @adap: the adapter
3586 * @mbox: mailbox to use for the FW command
3587 * @viid: the VI id
3588 * @idx: index of existing filter for old value of MAC address, or -1
3589 * @addr: the new MAC address value
3590 * @persist: whether a new MAC allocation should be persistent
3591 * @add_smt: if true also add the address to the HW SMT
3592 *
3593 * Modifies an exact-match filter and sets it to the new MAC address.
3594 * Note that in general it is not possible to modify the value of a given
3595 * filter so the generic way to modify an address filter is to free the one
3596 * being used by the old address value and allocate a new filter for the
3597 * new address value. @idx can be -1 if the address is a new addition.
3598 *
3599 * Returns a negative error number or the index of the filter with the new
3600 * MAC value.
3601 */
3602int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3603 int idx, const u8 *addr, bool persist, bool add_smt)
3604{
3605 int ret, mode;
3606 struct fw_vi_mac_cmd c;
3607 struct fw_vi_mac_exact *p = c.u.exact;
d14807dd 3608 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
0a57a536
SR
3609 NUM_MPS_CLS_SRAM_L_INSTANCES :
3610 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
56d36be4
DM
3611
3612 if (idx < 0) /* new allocation */
3613 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3614 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3615
3616 memset(&c, 0, sizeof(c));
e2ac9628 3617 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2 3618 FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid));
e2ac9628 3619 c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
2b5fb1f2
HS
3620 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
3621 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
3622 FW_VI_MAC_CMD_IDX_V(idx));
56d36be4
DM
3623 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3624
3625 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3626 if (ret == 0) {
2b5fb1f2 3627 ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
0a57a536 3628 if (ret >= max_mac_addr)
56d36be4
DM
3629 ret = -ENOMEM;
3630 }
3631 return ret;
3632}
3633
3634/**
3635 * t4_set_addr_hash - program the MAC inexact-match hash filter
3636 * @adap: the adapter
3637 * @mbox: mailbox to use for the FW command
3638 * @viid: the VI id
3639 * @ucast: whether the hash filter should also match unicast addresses
3640 * @vec: the value to be written to the hash filter
3641 * @sleep_ok: call is allowed to sleep
3642 *
3643 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3644 */
3645int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3646 bool ucast, u64 vec, bool sleep_ok)
3647{
3648 struct fw_vi_mac_cmd c;
3649
3650 memset(&c, 0, sizeof(c));
e2ac9628 3651 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
3652 FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid));
3653 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F |
3654 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
e2ac9628 3655 FW_CMD_LEN16_V(1));
56d36be4
DM
3656 c.u.hash.hashvec = cpu_to_be64(vec);
3657 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3658}
3659
688848b1
AB
3660/**
3661 * t4_enable_vi_params - enable/disable a virtual interface
3662 * @adap: the adapter
3663 * @mbox: mailbox to use for the FW command
3664 * @viid: the VI id
3665 * @rx_en: 1=enable Rx, 0=disable Rx
3666 * @tx_en: 1=enable Tx, 0=disable Tx
3667 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3668 *
3669 * Enables/disables a virtual interface. Note that setting DCB Enable
3670 * only makes sense when enabling a Virtual Interface ...
3671 */
3672int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3673 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3674{
3675 struct fw_vi_enable_cmd c;
3676
3677 memset(&c, 0, sizeof(c));
e2ac9628 3678 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2 3679 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
688848b1 3680
2b5fb1f2
HS
3681 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
3682 FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) |
3683 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en));
30f00847 3684 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
688848b1
AB
3685}
3686
56d36be4
DM
3687/**
3688 * t4_enable_vi - enable/disable a virtual interface
3689 * @adap: the adapter
3690 * @mbox: mailbox to use for the FW command
3691 * @viid: the VI id
3692 * @rx_en: 1=enable Rx, 0=disable Rx
3693 * @tx_en: 1=enable Tx, 0=disable Tx
3694 *
3695 * Enables/disables a virtual interface.
3696 */
3697int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3698 bool rx_en, bool tx_en)
3699{
688848b1 3700 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
56d36be4
DM
3701}
3702
3703/**
3704 * t4_identify_port - identify a VI's port by blinking its LED
3705 * @adap: the adapter
3706 * @mbox: mailbox to use for the FW command
3707 * @viid: the VI id
3708 * @nblinks: how many times to blink LED at 2.5 Hz
3709 *
3710 * Identifies a VI's port by blinking its LED.
3711 */
3712int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3713 unsigned int nblinks)
3714{
3715 struct fw_vi_enable_cmd c;
3716
0062b15c 3717 memset(&c, 0, sizeof(c));
e2ac9628 3718 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
2b5fb1f2
HS
3719 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
3720 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
56d36be4
DM
3721 c.blinkdur = htons(nblinks);
3722 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
56d36be4
DM
3723}
3724
3725/**
3726 * t4_iq_free - free an ingress queue and its FLs
3727 * @adap: the adapter
3728 * @mbox: mailbox to use for the FW command
3729 * @pf: the PF owning the queues
3730 * @vf: the VF owning the queues
3731 * @iqtype: the ingress queue type
3732 * @iqid: ingress queue id
3733 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3734 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3735 *
3736 * Frees an ingress queue and its associated FLs, if any.
3737 */
3738int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3739 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3740 unsigned int fl0id, unsigned int fl1id)
3741{
3742 struct fw_iq_cmd c;
3743
3744 memset(&c, 0, sizeof(c));
e2ac9628 3745 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3746 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
3747 FW_IQ_CMD_VFN_V(vf));
3748 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
3749 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
56d36be4
DM
3750 c.iqid = htons(iqid);
3751 c.fl0id = htons(fl0id);
3752 c.fl1id = htons(fl1id);
3753 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3754}
3755
3756/**
3757 * t4_eth_eq_free - free an Ethernet egress queue
3758 * @adap: the adapter
3759 * @mbox: mailbox to use for the FW command
3760 * @pf: the PF owning the queue
3761 * @vf: the VF owning the queue
3762 * @eqid: egress queue id
3763 *
3764 * Frees an Ethernet egress queue.
3765 */
3766int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3767 unsigned int vf, unsigned int eqid)
3768{
3769 struct fw_eq_eth_cmd c;
3770
3771 memset(&c, 0, sizeof(c));
e2ac9628 3772 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3773 FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
3774 FW_EQ_ETH_CMD_VFN_V(vf));
3775 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
3776 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
56d36be4
DM
3777 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3778}
3779
3780/**
3781 * t4_ctrl_eq_free - free a control egress queue
3782 * @adap: the adapter
3783 * @mbox: mailbox to use for the FW command
3784 * @pf: the PF owning the queue
3785 * @vf: the VF owning the queue
3786 * @eqid: egress queue id
3787 *
3788 * Frees a control egress queue.
3789 */
3790int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3791 unsigned int vf, unsigned int eqid)
3792{
3793 struct fw_eq_ctrl_cmd c;
3794
3795 memset(&c, 0, sizeof(c));
e2ac9628 3796 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3797 FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
3798 FW_EQ_CTRL_CMD_VFN_V(vf));
3799 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
3800 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
56d36be4
DM
3801 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3802}
3803
3804/**
3805 * t4_ofld_eq_free - free an offload egress queue
3806 * @adap: the adapter
3807 * @mbox: mailbox to use for the FW command
3808 * @pf: the PF owning the queue
3809 * @vf: the VF owning the queue
3810 * @eqid: egress queue id
3811 *
3812 * Frees a control egress queue.
3813 */
3814int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3815 unsigned int vf, unsigned int eqid)
3816{
3817 struct fw_eq_ofld_cmd c;
3818
3819 memset(&c, 0, sizeof(c));
e2ac9628 3820 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
6e4b51a6
HS
3821 FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
3822 FW_EQ_OFLD_CMD_VFN_V(vf));
3823 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
3824 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
56d36be4
DM
3825 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3826}
3827
3828/**
3829 * t4_handle_fw_rpl - process a FW reply message
3830 * @adap: the adapter
3831 * @rpl: start of the FW message
3832 *
3833 * Processes a FW message, such as link state change messages.
3834 */
3835int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3836{
3837 u8 opcode = *(const u8 *)rpl;
3838
3839 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3840 int speed = 0, fc = 0;
3841 const struct fw_port_cmd *p = (void *)rpl;
2b5fb1f2 3842 int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid));
56d36be4
DM
3843 int port = adap->chan_map[chan];
3844 struct port_info *pi = adap2pinfo(adap, port);
3845 struct link_config *lc = &pi->link_cfg;
3846 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2b5fb1f2
HS
3847 int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
3848 u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
56d36be4 3849
2b5fb1f2 3850 if (stat & FW_PORT_CMD_RXPAUSE_F)
56d36be4 3851 fc |= PAUSE_RX;
2b5fb1f2 3852 if (stat & FW_PORT_CMD_TXPAUSE_F)
56d36be4 3853 fc |= PAUSE_TX;
2b5fb1f2 3854 if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
e8b39015 3855 speed = 100;
2b5fb1f2 3856 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
e8b39015 3857 speed = 1000;
2b5fb1f2 3858 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
e8b39015 3859 speed = 10000;
2b5fb1f2 3860 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
e8b39015 3861 speed = 40000;
56d36be4
DM
3862
3863 if (link_ok != lc->link_ok || speed != lc->speed ||
3864 fc != lc->fc) { /* something changed */
3865 lc->link_ok = link_ok;
3866 lc->speed = speed;
3867 lc->fc = fc;
444018a7 3868 lc->supported = be16_to_cpu(p->u.info.pcap);
56d36be4
DM
3869 t4_os_link_changed(adap, port, link_ok);
3870 }
3871 if (mod != pi->mod_type) {
3872 pi->mod_type = mod;
3873 t4_os_portmod_changed(adap, port);
3874 }
3875 }
3876 return 0;
3877}
3878
1dd06ae8 3879static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
56d36be4
DM
3880{
3881 u16 val;
56d36be4 3882
e5c8ae5f
JL
3883 if (pci_is_pcie(adapter->pdev)) {
3884 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
56d36be4
DM
3885 p->speed = val & PCI_EXP_LNKSTA_CLS;
3886 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3887 }
3888}
3889
3890/**
3891 * init_link_config - initialize a link's SW state
3892 * @lc: structure holding the link state
3893 * @caps: link capabilities
3894 *
3895 * Initializes the SW state maintained for each link, including the link's
3896 * capabilities and default speed/flow-control/autonegotiation settings.
3897 */
1dd06ae8 3898static void init_link_config(struct link_config *lc, unsigned int caps)
56d36be4
DM
3899{
3900 lc->supported = caps;
3901 lc->requested_speed = 0;
3902 lc->speed = 0;
3903 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3904 if (lc->supported & FW_PORT_CAP_ANEG) {
3905 lc->advertising = lc->supported & ADVERT_MASK;
3906 lc->autoneg = AUTONEG_ENABLE;
3907 lc->requested_fc |= PAUSE_AUTONEG;
3908 } else {
3909 lc->advertising = 0;
3910 lc->autoneg = AUTONEG_DISABLE;
3911 }
3912}
3913
8203b509
HS
3914#define CIM_PF_NOACCESS 0xeeeeeeee
3915
3916int t4_wait_dev_ready(void __iomem *regs)
56d36be4 3917{
8203b509
HS
3918 u32 whoami;
3919
3920 whoami = readl(regs + PL_WHOAMI);
3921 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
56d36be4 3922 return 0;
8203b509 3923
56d36be4 3924 msleep(500);
8203b509
HS
3925 whoami = readl(regs + PL_WHOAMI);
3926 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
56d36be4
DM
3927}
3928
fe2ee139
HS
3929struct flash_desc {
3930 u32 vendor_and_model_id;
3931 u32 size_mb;
3932};
3933
91744948 3934static int get_flash_params(struct adapter *adap)
900a6596 3935{
fe2ee139
HS
3936 /* Table for non-Numonix supported flash parts. Numonix parts are left
3937 * to the preexisting code. All flash parts have 64KB sectors.
3938 */
3939 static struct flash_desc supported_flash[] = {
3940 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
3941 };
3942
900a6596
DM
3943 int ret;
3944 u32 info;
3945
3946 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3947 if (!ret)
3948 ret = sf1_read(adap, 3, 0, 1, &info);
3949 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3950 if (ret)
3951 return ret;
3952
fe2ee139
HS
3953 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
3954 if (supported_flash[ret].vendor_and_model_id == info) {
3955 adap->params.sf_size = supported_flash[ret].size_mb;
3956 adap->params.sf_nsec =
3957 adap->params.sf_size / SF_SEC_SIZE;
3958 return 0;
3959 }
3960
900a6596
DM
3961 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3962 return -EINVAL;
3963 info >>= 16; /* log2 of size */
3964 if (info >= 0x14 && info < 0x18)
3965 adap->params.sf_nsec = 1 << (info - 16);
3966 else if (info == 0x18)
3967 adap->params.sf_nsec = 64;
3968 else
3969 return -EINVAL;
3970 adap->params.sf_size = 1 << info;
3971 adap->params.sf_fw_start =
3972 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
c290607e
HS
3973
3974 if (adap->params.sf_size < FLASH_MIN_SIZE)
3975 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
3976 adap->params.sf_size, FLASH_MIN_SIZE);
900a6596
DM
3977 return 0;
3978}
3979
56d36be4
DM
3980/**
3981 * t4_prep_adapter - prepare SW and HW for operation
3982 * @adapter: the adapter
3983 * @reset: if true perform a HW reset
3984 *
3985 * Initialize adapter SW state for the various HW modules, set initial
3986 * values for some adapter tunables, take PHYs out of reset, and
3987 * initialize the MDIO interface.
3988 */
91744948 3989int t4_prep_adapter(struct adapter *adapter)
56d36be4 3990{
0a57a536
SR
3991 int ret, ver;
3992 uint16_t device_id;
d14807dd 3993 u32 pl_rev;
56d36be4 3994
56d36be4 3995 get_pci_mode(adapter, &adapter->params.pci);
d14807dd 3996 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
56d36be4 3997
900a6596
DM
3998 ret = get_flash_params(adapter);
3999 if (ret < 0) {
4000 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
4001 return ret;
4002 }
4003
0a57a536
SR
4004 /* Retrieve adapter's device ID
4005 */
4006 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
4007 ver = device_id >> 12;
d14807dd 4008 adapter->params.chip = 0;
0a57a536
SR
4009 switch (ver) {
4010 case CHELSIO_T4:
d14807dd 4011 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
0a57a536
SR
4012 break;
4013 case CHELSIO_T5:
d14807dd 4014 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
0a57a536
SR
4015 break;
4016 default:
4017 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4018 device_id);
4019 return -EINVAL;
4020 }
4021
56d36be4
DM
4022 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4023
4024 /*
4025 * Default port for debugging in case we can't reach FW.
4026 */
4027 adapter->params.nports = 1;
4028 adapter->params.portvec = 1;
636f9d37 4029 adapter->params.vpd.cclk = 50000;
56d36be4
DM
4030 return 0;
4031}
4032
e85c9a7a 4033/**
dd0bcc0b 4034 * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
e85c9a7a
HS
4035 * @adapter: the adapter
4036 * @qid: the Queue ID
4037 * @qtype: the Ingress or Egress type for @qid
4038 * @pbar2_qoffset: BAR2 Queue Offset
4039 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4040 *
4041 * Returns the BAR2 SGE Queue Registers information associated with the
4042 * indicated Absolute Queue ID. These are passed back in return value
4043 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
4044 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
4045 *
4046 * This may return an error which indicates that BAR2 SGE Queue
4047 * registers aren't available. If an error is not returned, then the
4048 * following values are returned:
4049 *
4050 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
4051 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
4052 *
4053 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
4054 * require the "Inferred Queue ID" ability may be used. E.g. the
4055 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
4056 * then these "Inferred Queue ID" register may not be used.
4057 */
dd0bcc0b 4058int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
e85c9a7a
HS
4059 unsigned int qid,
4060 enum t4_bar2_qtype qtype,
4061 u64 *pbar2_qoffset,
4062 unsigned int *pbar2_qid)
4063{
4064 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
4065 u64 bar2_page_offset, bar2_qoffset;
4066 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
4067
4068 /* T4 doesn't support BAR2 SGE Queue registers.
4069 */
4070 if (is_t4(adapter->params.chip))
4071 return -EINVAL;
4072
4073 /* Get our SGE Page Size parameters.
4074 */
4075 page_shift = adapter->params.sge.hps + 10;
4076 page_size = 1 << page_shift;
4077
4078 /* Get the right Queues per Page parameters for our Queue.
4079 */
4080 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
4081 ? adapter->params.sge.eq_qpp
4082 : adapter->params.sge.iq_qpp);
4083 qpp_mask = (1 << qpp_shift) - 1;
4084
4085 /* Calculate the basics of the BAR2 SGE Queue register area:
4086 * o The BAR2 page the Queue registers will be in.
4087 * o The BAR2 Queue ID.
4088 * o The BAR2 Queue ID Offset into the BAR2 page.
4089 */
4090 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
4091 bar2_qid = qid & qpp_mask;
4092 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
4093
4094 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
4095 * hardware will infer the Absolute Queue ID simply from the writes to
4096 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
4097 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
4098 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
4099 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
4100 * from the BAR2 Page and BAR2 Queue ID.
4101 *
4102 * One important censequence of this is that some BAR2 SGE registers
4103 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
4104 * there. But other registers synthesize the SGE Queue ID purely
4105 * from the writes to the registers -- the Write Combined Doorbell
4106 * Buffer is a good example. These BAR2 SGE Registers are only
4107 * available for those BAR2 SGE Register areas where the SGE Absolute
4108 * Queue ID can be inferred from simple writes.
4109 */
4110 bar2_qoffset = bar2_page_offset;
4111 bar2_qinferred = (bar2_qid_offset < page_size);
4112 if (bar2_qinferred) {
4113 bar2_qoffset += bar2_qid_offset;
4114 bar2_qid = 0;
4115 }
4116
4117 *pbar2_qoffset = bar2_qoffset;
4118 *pbar2_qid = bar2_qid;
4119 return 0;
4120}
4121
4122/**
4123 * t4_init_sge_params - initialize adap->params.sge
4124 * @adapter: the adapter
4125 *
4126 * Initialize various fields of the adapter's SGE Parameters structure.
4127 */
4128int t4_init_sge_params(struct adapter *adapter)
4129{
4130 struct sge_params *sge_params = &adapter->params.sge;
4131 u32 hps, qpp;
4132 unsigned int s_hps, s_qpp;
4133
4134 /* Extract the SGE Page Size for our PF.
4135 */
4136 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE);
4137 s_hps = (HOSTPAGESIZEPF0_S +
4138 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
4139 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
4140
4141 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
4142 */
4143 s_qpp = (QUEUESPERPAGEPF0_S +
4144 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
4145 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF);
4146 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
4147 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF);
4148 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
4149
4150 return 0;
4151}
4152
dcf7b6f5
KS
4153/**
4154 * t4_init_tp_params - initialize adap->params.tp
4155 * @adap: the adapter
4156 *
4157 * Initialize various fields of the adapter's TP Parameters structure.
4158 */
4159int t4_init_tp_params(struct adapter *adap)
4160{
4161 int chan;
4162 u32 v;
4163
4164 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
4165 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
4166 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
4167
4168 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4169 for (chan = 0; chan < NCHAN; chan++)
4170 adap->params.tp.tx_modq[chan] = chan;
4171
4172 /* Cache the adapter's Compressed Filter Mode and global Incress
4173 * Configuration.
4174 */
4175 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4176 &adap->params.tp.vlan_pri_map, 1,
4177 TP_VLAN_PRI_MAP);
4178 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4179 &adap->params.tp.ingress_config, 1,
4180 TP_INGRESS_CONFIG);
4181
4182 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
4183 * shift positions of several elements of the Compressed Filter Tuple
4184 * for this adapter which we need frequently ...
4185 */
4186 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
4187 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
4188 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
4189 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
4190 F_PROTOCOL);
4191
4192 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4193 * represents the presense of an Outer VLAN instead of a VNIC ID.
4194 */
4195 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
4196 adap->params.tp.vnic_shift = -1;
4197
4198 return 0;
4199}
4200
4201/**
4202 * t4_filter_field_shift - calculate filter field shift
4203 * @adap: the adapter
4204 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
4205 *
4206 * Return the shift position of a filter field within the Compressed
4207 * Filter Tuple. The filter field is specified via its selection bit
4208 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
4209 */
4210int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
4211{
4212 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
4213 unsigned int sel;
4214 int field_shift;
4215
4216 if ((filter_mode & filter_sel) == 0)
4217 return -1;
4218
4219 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4220 switch (filter_mode & sel) {
4221 case F_FCOE:
4222 field_shift += W_FT_FCOE;
4223 break;
4224 case F_PORT:
4225 field_shift += W_FT_PORT;
4226 break;
4227 case F_VNIC_ID:
4228 field_shift += W_FT_VNIC_ID;
4229 break;
4230 case F_VLAN:
4231 field_shift += W_FT_VLAN;
4232 break;
4233 case F_TOS:
4234 field_shift += W_FT_TOS;
4235 break;
4236 case F_PROTOCOL:
4237 field_shift += W_FT_PROTOCOL;
4238 break;
4239 case F_ETHERTYPE:
4240 field_shift += W_FT_ETHERTYPE;
4241 break;
4242 case F_MACMATCH:
4243 field_shift += W_FT_MACMATCH;
4244 break;
4245 case F_MPSHITTYPE:
4246 field_shift += W_FT_MPSHITTYPE;
4247 break;
4248 case F_FRAGMENTATION:
4249 field_shift += W_FT_FRAGMENTATION;
4250 break;
4251 }
4252 }
4253 return field_shift;
4254}
4255
91744948 4256int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
56d36be4
DM
4257{
4258 u8 addr[6];
4259 int ret, i, j = 0;
4260 struct fw_port_cmd c;
f796564a 4261 struct fw_rss_vi_config_cmd rvc;
56d36be4
DM
4262
4263 memset(&c, 0, sizeof(c));
f796564a 4264 memset(&rvc, 0, sizeof(rvc));
56d36be4
DM
4265
4266 for_each_port(adap, i) {
4267 unsigned int rss_size;
4268 struct port_info *p = adap2pinfo(adap, i);
4269
4270 while ((adap->params.portvec & (1 << j)) == 0)
4271 j++;
4272
e2ac9628
HS
4273 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
4274 FW_CMD_REQUEST_F | FW_CMD_READ_F |
2b5fb1f2 4275 FW_PORT_CMD_PORTID_V(j));
56d36be4 4276 c.action_to_len16 = htonl(
2b5fb1f2 4277 FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
56d36be4
DM
4278 FW_LEN16(c));
4279 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4280 if (ret)
4281 return ret;
4282
4283 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4284 if (ret < 0)
4285 return ret;
4286
4287 p->viid = ret;
4288 p->tx_chan = j;
4289 p->lport = j;
4290 p->rss_size = rss_size;
4291 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
40c9f8ab 4292 adap->port[i]->dev_port = j;
56d36be4
DM
4293
4294 ret = ntohl(c.u.info.lstatus_to_modtype);
2b5fb1f2
HS
4295 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
4296 FW_PORT_CMD_MDIOADDR_G(ret) : -1;
4297 p->port_type = FW_PORT_CMD_PTYPE_G(ret);
a0881cab 4298 p->mod_type = FW_PORT_MOD_TYPE_NA;
56d36be4 4299
e2ac9628
HS
4300 rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
4301 FW_CMD_REQUEST_F | FW_CMD_READ_F |
f796564a
DM
4302 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4303 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4304 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4305 if (ret)
4306 return ret;
4307 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4308
56d36be4
DM
4309 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4310 j++;
4311 }
4312 return 0;
4313}