Merge tag 'fscache-fixes-20140917' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
ce100b8b 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
56d36be4
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
56d36be4
DM
35#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
38#include "t4fw_api.h"
39
de5b8677 40static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
41 const u8 *fw_data, unsigned int size, int force);
56d36be4
DM
42/**
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
51 *
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
56 */
de498c89
RD
57static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
59{
60 while (1) {
61 u32 val = t4_read_reg(adapter, reg);
62
63 if (!!(val & mask) == polarity) {
64 if (valp)
65 *valp = val;
66 return 0;
67 }
68 if (--attempts == 0)
69 return -EAGAIN;
70 if (delay)
71 udelay(delay);
72 }
73}
74
75static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
77{
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79 delay, NULL);
80}
81
82/**
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
88 *
89 * Sets a register field specified by the supplied mask to the
90 * given value.
91 */
92void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93 u32 val)
94{
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
96
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
99}
100
101/**
102 * t4_read_indirect - read indirectly addressed registers
103 * @adap: the adapter
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
109 *
110 * Reads registers that are accessed indirectly through an address/data
111 * register pair.
112 */
f2b7e78d 113void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
de498c89
RD
114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
116{
117 while (nregs--) {
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
120 start_idx++;
121 }
122}
123
13ee15d3
VP
124/**
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
132 *
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
135 */
136void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
139{
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
143 }
144}
145
0abfd152
HS
146/*
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
151 */
152void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153{
154 u32 req = ENABLE | FUNCTION(adap->fn) | reg;
155
156 if (is_t4(adap->params.chip))
157 req |= F_LOCALCFG;
158
159 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
160 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
161
162 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
163 * Configuration Space read. (None of the other fields matter when
164 * ENABLE is 0 so a simple register write is easier than a
165 * read-modify-write via t4_set_reg_field().)
166 */
167 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
168}
169
31d55c2d
HS
170/*
171 * t4_report_fw_error - report firmware error
172 * @adap: the adapter
173 *
174 * The adapter firmware can indicate error conditions to the host.
175 * If the firmware has indicated an error, print out the reason for
176 * the firmware error.
177 */
178static void t4_report_fw_error(struct adapter *adap)
179{
180 static const char *const reason[] = {
181 "Crash", /* PCIE_FW_EVAL_CRASH */
182 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
183 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
184 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
185 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
186 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
187 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
188 "Reserved", /* reserved */
189 };
190 u32 pcie_fw;
191
192 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
193 if (pcie_fw & FW_PCIE_FW_ERR)
194 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
195 reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
196}
197
56d36be4
DM
198/*
199 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
200 */
201static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
202 u32 mbox_addr)
203{
204 for ( ; nflit; nflit--, mbox_addr += 8)
205 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
206}
207
208/*
209 * Handle a FW assertion reported in a mailbox.
210 */
211static void fw_asrt(struct adapter *adap, u32 mbox_addr)
212{
213 struct fw_debug_cmd asrt;
214
215 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
216 dev_alert(adap->pdev_dev,
217 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
218 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
219 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
220}
221
222static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
223{
224 dev_err(adap->pdev_dev,
225 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
226 (unsigned long long)t4_read_reg64(adap, data_reg),
227 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
228 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
229 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
230 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
231 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
232 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
233 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
234}
235
236/**
237 * t4_wr_mbox_meat - send a command to FW through the given mailbox
238 * @adap: the adapter
239 * @mbox: index of the mailbox to use
240 * @cmd: the command to write
241 * @size: command length in bytes
242 * @rpl: where to optionally store the reply
243 * @sleep_ok: if true we may sleep while awaiting command completion
244 *
245 * Sends the given command to FW through the selected mailbox and waits
246 * for the FW to execute the command. If @rpl is not %NULL it is used to
247 * store the FW's reply to the command. The command and its optional
248 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
249 * to respond. @sleep_ok determines whether we may sleep while awaiting
250 * the response. If sleeping is allowed we use progressive backoff
251 * otherwise we spin.
252 *
253 * The return value is 0 on success or a negative errno on failure. A
254 * failure can happen either because we are not able to execute the
255 * command or FW executes it but signals an error. In the latter case
256 * the return value is the error code indicated by FW (negated).
257 */
258int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
259 void *rpl, bool sleep_ok)
260{
005b5717 261 static const int delay[] = {
56d36be4
DM
262 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
263 };
264
265 u32 v;
266 u64 res;
267 int i, ms, delay_idx;
268 const __be64 *p = cmd;
269 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
270 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
271
272 if ((size & 15) || size > MBOX_LEN)
273 return -EINVAL;
274
204dc3c0
DM
275 /*
276 * If the device is off-line, as in EEH, commands will time out.
277 * Fail them early so we don't waste time waiting.
278 */
279 if (adap->pdev->error_state != pci_channel_io_normal)
280 return -EIO;
281
56d36be4
DM
282 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
283 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
284 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
285
286 if (v != MBOX_OWNER_DRV)
287 return v ? -EBUSY : -ETIMEDOUT;
288
289 for (i = 0; i < size; i += 8)
290 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
291
292 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
293 t4_read_reg(adap, ctl_reg); /* flush write */
294
295 delay_idx = 0;
296 ms = delay[0];
297
298 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
299 if (sleep_ok) {
300 ms = delay[delay_idx]; /* last element may repeat */
301 if (delay_idx < ARRAY_SIZE(delay) - 1)
302 delay_idx++;
303 msleep(ms);
304 } else
305 mdelay(ms);
306
307 v = t4_read_reg(adap, ctl_reg);
308 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
309 if (!(v & MBMSGVALID)) {
310 t4_write_reg(adap, ctl_reg, 0);
311 continue;
312 }
313
314 res = t4_read_reg64(adap, data_reg);
315 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
316 fw_asrt(adap, data_reg);
317 res = FW_CMD_RETVAL(EIO);
318 } else if (rpl)
319 get_mbox_rpl(adap, rpl, size / 8, data_reg);
320
321 if (FW_CMD_RETVAL_GET((int)res))
322 dump_mbox(adap, mbox, data_reg);
323 t4_write_reg(adap, ctl_reg, 0);
324 return -FW_CMD_RETVAL_GET((int)res);
325 }
326 }
327
328 dump_mbox(adap, mbox, data_reg);
329 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
330 *(const u8 *)cmd, mbox);
31d55c2d 331 t4_report_fw_error(adap);
56d36be4
DM
332 return -ETIMEDOUT;
333}
334
335/**
336 * t4_mc_read - read from MC through backdoor accesses
337 * @adap: the adapter
338 * @addr: address of first byte requested
19dd37ba 339 * @idx: which MC to access
56d36be4
DM
340 * @data: 64 bytes of data containing the requested address
341 * @ecc: where to store the corresponding 64-bit ECC word
342 *
343 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
344 * that covers the requested address @addr. If @parity is not %NULL it
345 * is assigned the 64-bit ECC word for the read data.
346 */
19dd37ba 347int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
56d36be4
DM
348{
349 int i;
19dd37ba
SR
350 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
351 u32 mc_bist_status_rdata, mc_bist_data_pattern;
56d36be4 352
d14807dd 353 if (is_t4(adap->params.chip)) {
19dd37ba
SR
354 mc_bist_cmd = MC_BIST_CMD;
355 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
356 mc_bist_cmd_len = MC_BIST_CMD_LEN;
357 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
358 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
359 } else {
360 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
361 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
362 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
363 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
364 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
365 }
366
367 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
56d36be4 368 return -EBUSY;
19dd37ba
SR
369 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
370 t4_write_reg(adap, mc_bist_cmd_len, 64);
371 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
372 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
56d36be4 373 BIST_CMD_GAP(1));
19dd37ba 374 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
56d36be4
DM
375 if (i)
376 return i;
377
19dd37ba 378#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
56d36be4
DM
379
380 for (i = 15; i >= 0; i--)
381 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
382 if (ecc)
383 *ecc = t4_read_reg64(adap, MC_DATA(16));
384#undef MC_DATA
385 return 0;
386}
387
388/**
389 * t4_edc_read - read from EDC through backdoor accesses
390 * @adap: the adapter
391 * @idx: which EDC to access
392 * @addr: address of first byte requested
393 * @data: 64 bytes of data containing the requested address
394 * @ecc: where to store the corresponding 64-bit ECC word
395 *
396 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
397 * that covers the requested address @addr. If @parity is not %NULL it
398 * is assigned the 64-bit ECC word for the read data.
399 */
400int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
401{
402 int i;
19dd37ba
SR
403 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
404 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
56d36be4 405
d14807dd 406 if (is_t4(adap->params.chip)) {
19dd37ba
SR
407 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
408 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
409 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
410 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
411 idx);
412 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
413 idx);
414 } else {
415 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
416 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
417 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
418 edc_bist_cmd_data_pattern =
419 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
420 edc_bist_status_rdata =
421 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
422 }
423
424 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
56d36be4 425 return -EBUSY;
19dd37ba
SR
426 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
427 t4_write_reg(adap, edc_bist_cmd_len, 64);
428 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
429 t4_write_reg(adap, edc_bist_cmd,
56d36be4 430 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
19dd37ba 431 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
56d36be4
DM
432 if (i)
433 return i;
434
19dd37ba 435#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
56d36be4
DM
436
437 for (i = 15; i >= 0; i--)
438 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
439 if (ecc)
440 *ecc = t4_read_reg64(adap, EDC_DATA(16));
441#undef EDC_DATA
442 return 0;
443}
444
5afc8b84
VP
445/**
446 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
447 * @adap: the adapter
fc5ab020 448 * @win: PCI-E Memory Window to use
5afc8b84
VP
449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
450 * @addr: address within indicated memory type
451 * @len: amount of memory to transfer
452 * @buf: host memory buffer
fc5ab020 453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5afc8b84
VP
454 *
455 * Reads/writes an [almost] arbitrary memory region in the firmware: the
fc5ab020
HS
456 * firmware memory address and host buffer must be aligned on 32-bit
457 * boudaries; the length may be arbitrary. The memory is transferred as
458 * a raw byte sequence from/to the firmware's memory. If this memory
459 * contains data structures which contain multi-byte integers, it's the
460 * caller's responsibility to perform appropriate byte order conversions.
5afc8b84 461 */
fc5ab020
HS
462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
463 u32 len, __be32 *buf, int dir)
5afc8b84 464{
fc5ab020
HS
465 u32 pos, offset, resid, memoffset;
466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
5afc8b84 467
fc5ab020 468 /* Argument sanity checks ...
5afc8b84 469 */
fc5ab020 470 if (addr & 0x3)
5afc8b84
VP
471 return -EINVAL;
472
fc5ab020
HS
473 /* It's convenient to be able to handle lengths which aren't a
474 * multiple of 32-bits because we often end up transferring files to
475 * the firmware. So we'll handle that by normalizing the length here
476 * and then handling any residual transfer at the end.
477 */
478 resid = len & 0x3;
479 len -= resid;
8c357ebd 480
19dd37ba 481 /* Offset into the region of memory which is being accessed
5afc8b84
VP
482 * MEM_EDC0 = 0
483 * MEM_EDC1 = 1
19dd37ba
SR
484 * MEM_MC = 2 -- T4
485 * MEM_MC0 = 2 -- For T5
486 * MEM_MC1 = 3 -- For T5
5afc8b84 487 */
19dd37ba
SR
488 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
489 if (mtype != MEM_MC1)
490 memoffset = (mtype * (edc_size * 1024 * 1024));
491 else {
492 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
493 MA_EXT_MEMORY_BAR));
494 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
495 }
5afc8b84
VP
496
497 /* Determine the PCIE_MEM_ACCESS_OFFSET */
498 addr = addr + memoffset;
499
fc5ab020
HS
500 /* Each PCI-E Memory Window is programmed with a window size -- or
501 * "aperture" -- which controls the granularity of its mapping onto
502 * adapter memory. We need to grab that aperture in order to know
503 * how to use the specified window. The window is also programmed
504 * with the base address of the Memory Window in BAR0's address
505 * space. For T4 this is an absolute PCI-E Bus Address. For T5
506 * the address is relative to BAR0.
5afc8b84 507 */
fc5ab020
HS
508 mem_reg = t4_read_reg(adap,
509 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
510 win));
511 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
512 mem_base = GET_PCIEOFST(mem_reg) << 10;
513 if (is_t4(adap->params.chip))
514 mem_base -= adap->t4_bar0;
515 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
5afc8b84 516
fc5ab020
HS
517 /* Calculate our initial PCI-E Memory Window Position and Offset into
518 * that Window.
519 */
520 pos = addr & ~(mem_aperture-1);
521 offset = addr - pos;
5afc8b84 522
fc5ab020
HS
523 /* Set up initial PCI-E Memory Window to cover the start of our
524 * transfer. (Read it back to ensure that changes propagate before we
525 * attempt to use the new value.)
526 */
527 t4_write_reg(adap,
528 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
529 pos | win_pf);
530 t4_read_reg(adap,
531 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
532
533 /* Transfer data to/from the adapter as long as there's an integral
534 * number of 32-bit transfers to complete.
535 */
536 while (len > 0) {
537 if (dir == T4_MEMORY_READ)
538 *buf++ = (__force __be32) t4_read_reg(adap,
539 mem_base + offset);
540 else
541 t4_write_reg(adap, mem_base + offset,
542 (__force u32) *buf++);
543 offset += sizeof(__be32);
544 len -= sizeof(__be32);
545
546 /* If we've reached the end of our current window aperture,
547 * move the PCI-E Memory Window on to the next. Note that
548 * doing this here after "len" may be 0 allows us to set up
549 * the PCI-E Memory Window for a possible final residual
550 * transfer below ...
5afc8b84 551 */
fc5ab020
HS
552 if (offset == mem_aperture) {
553 pos += mem_aperture;
554 offset = 0;
555 t4_write_reg(adap,
556 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
557 win), pos | win_pf);
558 t4_read_reg(adap,
559 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
560 win));
5afc8b84 561 }
5afc8b84
VP
562 }
563
fc5ab020
HS
564 /* If the original transfer had a length which wasn't a multiple of
565 * 32-bits, now's where we need to finish off the transfer of the
566 * residual amount. The PCI-E Memory Window has already been moved
567 * above (if necessary) to cover this final transfer.
568 */
569 if (resid) {
570 union {
571 __be32 word;
572 char byte[4];
573 } last;
574 unsigned char *bp;
575 int i;
576
c81576c2 577 if (dir == T4_MEMORY_READ) {
fc5ab020
HS
578 last.word = (__force __be32) t4_read_reg(adap,
579 mem_base + offset);
580 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
581 bp[i] = last.byte[i];
582 } else {
583 last.word = *buf;
584 for (i = resid; i < 4; i++)
585 last.byte[i] = 0;
586 t4_write_reg(adap, mem_base + offset,
587 (__force u32) last.word);
588 }
589 }
5afc8b84 590
fc5ab020 591 return 0;
5afc8b84
VP
592}
593
56d36be4 594#define EEPROM_STAT_ADDR 0x7bfc
47ce9c48
SR
595#define VPD_BASE 0x400
596#define VPD_BASE_OLD 0
0a57a536 597#define VPD_LEN 1024
63a92fe6 598#define CHELSIO_VPD_UNIQUE_ID 0x82
56d36be4
DM
599
600/**
601 * t4_seeprom_wp - enable/disable EEPROM write protection
602 * @adapter: the adapter
603 * @enable: whether to enable or disable write protection
604 *
605 * Enables or disables write protection on the serial EEPROM.
606 */
607int t4_seeprom_wp(struct adapter *adapter, bool enable)
608{
609 unsigned int v = enable ? 0xc : 0;
610 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
611 return ret < 0 ? ret : 0;
612}
613
614/**
615 * get_vpd_params - read VPD parameters from VPD EEPROM
616 * @adapter: adapter to read
617 * @p: where to store the parameters
618 *
619 * Reads card parameters stored in VPD EEPROM.
620 */
636f9d37 621int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
56d36be4 622{
636f9d37 623 u32 cclk_param, cclk_val;
47ce9c48 624 int i, ret, addr;
a94cd705 625 int ec, sn, pn;
8c357ebd 626 u8 *vpd, csum;
23d88e1d 627 unsigned int vpdr_len, kw_offset, id_len;
56d36be4 628
8c357ebd
VP
629 vpd = vmalloc(VPD_LEN);
630 if (!vpd)
631 return -ENOMEM;
632
47ce9c48
SR
633 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
634 if (ret < 0)
635 goto out;
63a92fe6
HS
636
637 /* The VPD shall have a unique identifier specified by the PCI SIG.
638 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
639 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
640 * is expected to automatically put this entry at the
641 * beginning of the VPD.
642 */
643 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
47ce9c48
SR
644
645 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
56d36be4 646 if (ret < 0)
8c357ebd 647 goto out;
56d36be4 648
23d88e1d
DM
649 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
650 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
8c357ebd
VP
651 ret = -EINVAL;
652 goto out;
23d88e1d
DM
653 }
654
655 id_len = pci_vpd_lrdt_size(vpd);
656 if (id_len > ID_LEN)
657 id_len = ID_LEN;
658
659 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
660 if (i < 0) {
661 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
8c357ebd
VP
662 ret = -EINVAL;
663 goto out;
23d88e1d
DM
664 }
665
666 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
667 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
668 if (vpdr_len + kw_offset > VPD_LEN) {
226ec5fd 669 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
8c357ebd
VP
670 ret = -EINVAL;
671 goto out;
226ec5fd
DM
672 }
673
674#define FIND_VPD_KW(var, name) do { \
23d88e1d 675 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
226ec5fd
DM
676 if (var < 0) { \
677 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
8c357ebd
VP
678 ret = -EINVAL; \
679 goto out; \
226ec5fd
DM
680 } \
681 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
682} while (0)
683
684 FIND_VPD_KW(i, "RV");
685 for (csum = 0; i >= 0; i--)
686 csum += vpd[i];
56d36be4
DM
687
688 if (csum) {
689 dev_err(adapter->pdev_dev,
690 "corrupted VPD EEPROM, actual csum %u\n", csum);
8c357ebd
VP
691 ret = -EINVAL;
692 goto out;
56d36be4
DM
693 }
694
226ec5fd
DM
695 FIND_VPD_KW(ec, "EC");
696 FIND_VPD_KW(sn, "SN");
a94cd705 697 FIND_VPD_KW(pn, "PN");
226ec5fd
DM
698#undef FIND_VPD_KW
699
23d88e1d 700 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
56d36be4 701 strim(p->id);
226ec5fd 702 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 703 strim(p->ec);
226ec5fd
DM
704 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
705 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4 706 strim(p->sn);
63a92fe6 707 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
a94cd705
KS
708 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
709 strim(p->pn);
636f9d37
VP
710
711 /*
712 * Ask firmware for the Core Clock since it knows how to translate the
713 * Reference Clock ('V2') VPD field into a Core Clock value ...
714 */
715 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
716 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
717 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
718 1, &cclk_param, &cclk_val);
8c357ebd
VP
719
720out:
721 vfree(vpd);
636f9d37
VP
722 if (ret)
723 return ret;
724 p->cclk = cclk_val;
725
56d36be4
DM
726 return 0;
727}
728
729/* serial flash and firmware constants */
730enum {
731 SF_ATTEMPTS = 10, /* max retries for SF operations */
732
733 /* flash command opcodes */
734 SF_PROG_PAGE = 2, /* program page */
735 SF_WR_DISABLE = 4, /* disable writes */
736 SF_RD_STATUS = 5, /* read status register */
737 SF_WR_ENABLE = 6, /* enable writes */
738 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 739 SF_RD_ID = 0x9f, /* read ID */
56d36be4
DM
740 SF_ERASE_SECTOR = 0xd8, /* erase sector */
741
6f1d7210 742 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
56d36be4
DM
743};
744
745/**
746 * sf1_read - read data from the serial flash
747 * @adapter: the adapter
748 * @byte_cnt: number of bytes to read
749 * @cont: whether another operation will be chained
750 * @lock: whether to lock SF for PL access only
751 * @valp: where to store the read data
752 *
753 * Reads up to 4 bytes of data from the serial flash. The location of
754 * the read needs to be specified prior to calling this by issuing the
755 * appropriate commands to the serial flash.
756 */
757static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
758 int lock, u32 *valp)
759{
760 int ret;
761
762 if (!byte_cnt || byte_cnt > 4)
763 return -EINVAL;
ce91a923 764 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
56d36be4
DM
765 return -EBUSY;
766 cont = cont ? SF_CONT : 0;
767 lock = lock ? SF_LOCK : 0;
768 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
ce91a923 769 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
56d36be4
DM
770 if (!ret)
771 *valp = t4_read_reg(adapter, SF_DATA);
772 return ret;
773}
774
775/**
776 * sf1_write - write data to the serial flash
777 * @adapter: the adapter
778 * @byte_cnt: number of bytes to write
779 * @cont: whether another operation will be chained
780 * @lock: whether to lock SF for PL access only
781 * @val: value to write
782 *
783 * Writes up to 4 bytes of data to the serial flash. The location of
784 * the write needs to be specified prior to calling this by issuing the
785 * appropriate commands to the serial flash.
786 */
787static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
788 int lock, u32 val)
789{
790 if (!byte_cnt || byte_cnt > 4)
791 return -EINVAL;
ce91a923 792 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
56d36be4
DM
793 return -EBUSY;
794 cont = cont ? SF_CONT : 0;
795 lock = lock ? SF_LOCK : 0;
796 t4_write_reg(adapter, SF_DATA, val);
797 t4_write_reg(adapter, SF_OP, lock |
798 cont | BYTECNT(byte_cnt - 1) | OP_WR);
ce91a923 799 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
56d36be4
DM
800}
801
802/**
803 * flash_wait_op - wait for a flash operation to complete
804 * @adapter: the adapter
805 * @attempts: max number of polls of the status register
806 * @delay: delay between polls in ms
807 *
808 * Wait for a flash operation to complete by polling the status register.
809 */
810static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
811{
812 int ret;
813 u32 status;
814
815 while (1) {
816 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
817 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
818 return ret;
819 if (!(status & 1))
820 return 0;
821 if (--attempts == 0)
822 return -EAGAIN;
823 if (delay)
824 msleep(delay);
825 }
826}
827
828/**
829 * t4_read_flash - read words from serial flash
830 * @adapter: the adapter
831 * @addr: the start address for the read
832 * @nwords: how many 32-bit words to read
833 * @data: where to store the read data
834 * @byte_oriented: whether to store data as bytes or as words
835 *
836 * Read the specified number of 32-bit words from the serial flash.
837 * If @byte_oriented is set the read data is stored as a byte array
838 * (i.e., big-endian), otherwise as 32-bit words in the platform's
839 * natural endianess.
840 */
de498c89
RD
841static int t4_read_flash(struct adapter *adapter, unsigned int addr,
842 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
843{
844 int ret;
845
900a6596 846 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
847 return -EINVAL;
848
849 addr = swab32(addr) | SF_RD_DATA_FAST;
850
851 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
852 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
853 return ret;
854
855 for ( ; nwords; nwords--, data++) {
856 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
857 if (nwords == 1)
858 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
859 if (ret)
860 return ret;
861 if (byte_oriented)
404d9e3f 862 *data = (__force __u32) (htonl(*data));
56d36be4
DM
863 }
864 return 0;
865}
866
867/**
868 * t4_write_flash - write up to a page of data to the serial flash
869 * @adapter: the adapter
870 * @addr: the start address to write
871 * @n: length of data to write in bytes
872 * @data: the data to write
873 *
874 * Writes up to a page of data (256 bytes) to the serial flash starting
875 * at the given address. All the data must be written to the same page.
876 */
877static int t4_write_flash(struct adapter *adapter, unsigned int addr,
878 unsigned int n, const u8 *data)
879{
880 int ret;
881 u32 buf[64];
882 unsigned int i, c, left, val, offset = addr & 0xff;
883
900a6596 884 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
885 return -EINVAL;
886
887 val = swab32(addr) | SF_PROG_PAGE;
888
889 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
890 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
891 goto unlock;
892
893 for (left = n; left; left -= c) {
894 c = min(left, 4U);
895 for (val = 0, i = 0; i < c; ++i)
896 val = (val << 8) + *data++;
897
898 ret = sf1_write(adapter, c, c != left, 1, val);
899 if (ret)
900 goto unlock;
901 }
900a6596 902 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
903 if (ret)
904 goto unlock;
905
906 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
907
908 /* Read the page to verify the write succeeded */
909 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
910 if (ret)
911 return ret;
912
913 if (memcmp(data - n, (u8 *)buf + offset, n)) {
914 dev_err(adapter->pdev_dev,
915 "failed to correctly write the flash page at %#x\n",
916 addr);
917 return -EIO;
918 }
919 return 0;
920
921unlock:
922 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
923 return ret;
924}
925
926/**
16e47624 927 * t4_get_fw_version - read the firmware version
56d36be4
DM
928 * @adapter: the adapter
929 * @vers: where to place the version
930 *
931 * Reads the FW version from flash.
932 */
16e47624 933int t4_get_fw_version(struct adapter *adapter, u32 *vers)
56d36be4 934{
16e47624
HS
935 return t4_read_flash(adapter, FLASH_FW_START +
936 offsetof(struct fw_hdr, fw_ver), 1,
937 vers, 0);
56d36be4
DM
938}
939
940/**
16e47624 941 * t4_get_tp_version - read the TP microcode version
56d36be4
DM
942 * @adapter: the adapter
943 * @vers: where to place the version
944 *
945 * Reads the TP microcode version from flash.
946 */
16e47624 947int t4_get_tp_version(struct adapter *adapter, u32 *vers)
56d36be4 948{
16e47624 949 return t4_read_flash(adapter, FLASH_FW_START +
900a6596 950 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
951 1, vers, 0);
952}
953
16e47624
HS
954/* Is the given firmware API compatible with the one the driver was compiled
955 * with?
56d36be4 956 */
16e47624 957static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
56d36be4 958{
56d36be4 959
16e47624
HS
960 /* short circuit if it's the exact same firmware version */
961 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
962 return 1;
56d36be4 963
16e47624
HS
964#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
965 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
966 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
967 return 1;
968#undef SAME_INTF
0a57a536 969
16e47624
HS
970 return 0;
971}
56d36be4 972
16e47624
HS
973/* The firmware in the filesystem is usable, but should it be installed?
974 * This routine explains itself in detail if it indicates the filesystem
975 * firmware should be installed.
976 */
977static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
978 int k, int c)
979{
980 const char *reason;
981
982 if (!card_fw_usable) {
983 reason = "incompatible or unusable";
984 goto install;
e69972f5
JH
985 }
986
16e47624
HS
987 if (k > c) {
988 reason = "older than the version supported with this driver";
989 goto install;
56d36be4
DM
990 }
991
16e47624
HS
992 return 0;
993
994install:
995 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
996 "installing firmware %u.%u.%u.%u on card.\n",
997 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
998 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
999 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1000 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
56d36be4 1001
56d36be4
DM
1002 return 1;
1003}
1004
16e47624
HS
1005int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1006 const u8 *fw_data, unsigned int fw_size,
1007 struct fw_hdr *card_fw, enum dev_state state,
1008 int *reset)
1009{
1010 int ret, card_fw_usable, fs_fw_usable;
1011 const struct fw_hdr *fs_fw;
1012 const struct fw_hdr *drv_fw;
1013
1014 drv_fw = &fw_info->fw_hdr;
1015
1016 /* Read the header of the firmware on the card */
1017 ret = -t4_read_flash(adap, FLASH_FW_START,
1018 sizeof(*card_fw) / sizeof(uint32_t),
1019 (uint32_t *)card_fw, 1);
1020 if (ret == 0) {
1021 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
1022 } else {
1023 dev_err(adap->pdev_dev,
1024 "Unable to read card's firmware header: %d\n", ret);
1025 card_fw_usable = 0;
1026 }
1027
1028 if (fw_data != NULL) {
1029 fs_fw = (const void *)fw_data;
1030 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1031 } else {
1032 fs_fw = NULL;
1033 fs_fw_usable = 0;
1034 }
1035
1036 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1037 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1038 /* Common case: the firmware on the card is an exact match and
1039 * the filesystem one is an exact match too, or the filesystem
1040 * one is absent/incompatible.
1041 */
1042 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1043 should_install_fs_fw(adap, card_fw_usable,
1044 be32_to_cpu(fs_fw->fw_ver),
1045 be32_to_cpu(card_fw->fw_ver))) {
1046 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1047 fw_size, 0);
1048 if (ret != 0) {
1049 dev_err(adap->pdev_dev,
1050 "failed to install firmware: %d\n", ret);
1051 goto bye;
1052 }
1053
1054 /* Installed successfully, update the cached header too. */
1055 memcpy(card_fw, fs_fw, sizeof(*card_fw));
1056 card_fw_usable = 1;
1057 *reset = 0; /* already reset as part of load_fw */
1058 }
1059
1060 if (!card_fw_usable) {
1061 uint32_t d, c, k;
1062
1063 d = be32_to_cpu(drv_fw->fw_ver);
1064 c = be32_to_cpu(card_fw->fw_ver);
1065 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1066
1067 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1068 "chip state %d, "
1069 "driver compiled with %d.%d.%d.%d, "
1070 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1071 state,
1072 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1073 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1074 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1075 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1076 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1077 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1078 ret = EINVAL;
1079 goto bye;
1080 }
1081
1082 /* We're using whatever's on the card and it's known to be good. */
1083 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1084 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1085
1086bye:
1087 return ret;
1088}
1089
56d36be4
DM
1090/**
1091 * t4_flash_erase_sectors - erase a range of flash sectors
1092 * @adapter: the adapter
1093 * @start: the first sector to erase
1094 * @end: the last sector to erase
1095 *
1096 * Erases the sectors in the given inclusive range.
1097 */
1098static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1099{
1100 int ret = 0;
1101
1102 while (start <= end) {
1103 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1104 (ret = sf1_write(adapter, 4, 0, 1,
1105 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 1106 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
1107 dev_err(adapter->pdev_dev,
1108 "erase of flash sector %d failed, error %d\n",
1109 start, ret);
1110 break;
1111 }
1112 start++;
1113 }
1114 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
1115 return ret;
1116}
1117
636f9d37
VP
1118/**
1119 * t4_flash_cfg_addr - return the address of the flash configuration file
1120 * @adapter: the adapter
1121 *
1122 * Return the address within the flash where the Firmware Configuration
1123 * File is stored.
1124 */
1125unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1126{
1127 if (adapter->params.sf_size == 0x100000)
1128 return FLASH_FPGA_CFG_START;
1129 else
1130 return FLASH_CFG_START;
1131}
1132
56d36be4
DM
1133/**
1134 * t4_load_fw - download firmware
1135 * @adap: the adapter
1136 * @fw_data: the firmware image to write
1137 * @size: image size
1138 *
1139 * Write the supplied firmware image to the card's serial flash.
1140 */
1141int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1142{
1143 u32 csum;
1144 int ret, addr;
1145 unsigned int i;
1146 u8 first_page[SF_PAGE_SIZE];
404d9e3f 1147 const __be32 *p = (const __be32 *)fw_data;
56d36be4 1148 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596
DM
1149 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1150 unsigned int fw_img_start = adap->params.sf_fw_start;
1151 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
56d36be4
DM
1152
1153 if (!size) {
1154 dev_err(adap->pdev_dev, "FW image has no data\n");
1155 return -EINVAL;
1156 }
1157 if (size & 511) {
1158 dev_err(adap->pdev_dev,
1159 "FW image size not multiple of 512 bytes\n");
1160 return -EINVAL;
1161 }
1162 if (ntohs(hdr->len512) * 512 != size) {
1163 dev_err(adap->pdev_dev,
1164 "FW image size differs from size in FW header\n");
1165 return -EINVAL;
1166 }
1167 if (size > FW_MAX_SIZE) {
1168 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1169 FW_MAX_SIZE);
1170 return -EFBIG;
1171 }
1172
1173 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1174 csum += ntohl(p[i]);
1175
1176 if (csum != 0xffffffff) {
1177 dev_err(adap->pdev_dev,
1178 "corrupted firmware image, checksum %#x\n", csum);
1179 return -EINVAL;
1180 }
1181
900a6596
DM
1182 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1183 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
1184 if (ret)
1185 goto out;
1186
1187 /*
1188 * We write the correct version at the end so the driver can see a bad
1189 * version if the FW write fails. Start by writing a copy of the
1190 * first page with a bad version.
1191 */
1192 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1193 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
900a6596 1194 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
1195 if (ret)
1196 goto out;
1197
900a6596 1198 addr = fw_img_start;
56d36be4
DM
1199 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1200 addr += SF_PAGE_SIZE;
1201 fw_data += SF_PAGE_SIZE;
1202 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1203 if (ret)
1204 goto out;
1205 }
1206
1207 ret = t4_write_flash(adap,
900a6596 1208 fw_img_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
1209 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1210out:
1211 if (ret)
1212 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1213 ret);
1214 return ret;
1215}
1216
1217#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
72aca4bf
KS
1218 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1219 FW_PORT_CAP_ANEG)
56d36be4
DM
1220
1221/**
1222 * t4_link_start - apply link configuration to MAC/PHY
1223 * @phy: the PHY to setup
1224 * @mac: the MAC to setup
1225 * @lc: the requested link configuration
1226 *
1227 * Set up a port's MAC and PHY according to a desired link configuration.
1228 * - If the PHY can auto-negotiate first decide what to advertise, then
1229 * enable/disable auto-negotiation as desired, and reset.
1230 * - If the PHY does not auto-negotiate just reset it.
1231 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1232 * otherwise do it later based on the outcome of auto-negotiation.
1233 */
1234int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1235 struct link_config *lc)
1236{
1237 struct fw_port_cmd c;
1238 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1239
1240 lc->link_ok = 0;
1241 if (lc->requested_fc & PAUSE_RX)
1242 fc |= FW_PORT_CAP_FC_RX;
1243 if (lc->requested_fc & PAUSE_TX)
1244 fc |= FW_PORT_CAP_FC_TX;
1245
1246 memset(&c, 0, sizeof(c));
1247 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1248 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1249 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1250 FW_LEN16(c));
1251
1252 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1253 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1254 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1255 } else if (lc->autoneg == AUTONEG_DISABLE) {
1256 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1257 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1258 } else
1259 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1260
1261 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1262}
1263
1264/**
1265 * t4_restart_aneg - restart autonegotiation
1266 * @adap: the adapter
1267 * @mbox: mbox to use for the FW command
1268 * @port: the port id
1269 *
1270 * Restarts autonegotiation for the selected port.
1271 */
1272int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1273{
1274 struct fw_port_cmd c;
1275
1276 memset(&c, 0, sizeof(c));
1277 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1278 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1279 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1280 FW_LEN16(c));
1281 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1282 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1283}
1284
8caa1e84
VP
1285typedef void (*int_handler_t)(struct adapter *adap);
1286
56d36be4
DM
1287struct intr_info {
1288 unsigned int mask; /* bits to check in interrupt status */
1289 const char *msg; /* message to print or NULL */
1290 short stat_idx; /* stat counter to increment or -1 */
1291 unsigned short fatal; /* whether the condition reported is fatal */
8caa1e84 1292 int_handler_t int_handler; /* platform-specific int handler */
56d36be4
DM
1293};
1294
1295/**
1296 * t4_handle_intr_status - table driven interrupt handler
1297 * @adapter: the adapter that generated the interrupt
1298 * @reg: the interrupt status register to process
1299 * @acts: table of interrupt actions
1300 *
1301 * A table driven interrupt handler that applies a set of masks to an
1302 * interrupt status word and performs the corresponding actions if the
25985edc 1303 * interrupts described by the mask have occurred. The actions include
56d36be4
DM
1304 * optionally emitting a warning or alert message. The table is terminated
1305 * by an entry specifying mask 0. Returns the number of fatal interrupt
1306 * conditions.
1307 */
1308static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1309 const struct intr_info *acts)
1310{
1311 int fatal = 0;
1312 unsigned int mask = 0;
1313 unsigned int status = t4_read_reg(adapter, reg);
1314
1315 for ( ; acts->mask; ++acts) {
1316 if (!(status & acts->mask))
1317 continue;
1318 if (acts->fatal) {
1319 fatal++;
1320 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1321 status & acts->mask);
1322 } else if (acts->msg && printk_ratelimit())
1323 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1324 status & acts->mask);
8caa1e84
VP
1325 if (acts->int_handler)
1326 acts->int_handler(adapter);
56d36be4
DM
1327 mask |= acts->mask;
1328 }
1329 status &= mask;
1330 if (status) /* clear processed interrupts */
1331 t4_write_reg(adapter, reg, status);
1332 return fatal;
1333}
1334
1335/*
1336 * Interrupt handler for the PCIE module.
1337 */
1338static void pcie_intr_handler(struct adapter *adapter)
1339{
005b5717 1340 static const struct intr_info sysbus_intr_info[] = {
56d36be4
DM
1341 { RNPP, "RXNP array parity error", -1, 1 },
1342 { RPCP, "RXPC array parity error", -1, 1 },
1343 { RCIP, "RXCIF array parity error", -1, 1 },
1344 { RCCP, "Rx completions control array parity error", -1, 1 },
1345 { RFTP, "RXFT array parity error", -1, 1 },
1346 { 0 }
1347 };
005b5717 1348 static const struct intr_info pcie_port_intr_info[] = {
56d36be4
DM
1349 { TPCP, "TXPC array parity error", -1, 1 },
1350 { TNPP, "TXNP array parity error", -1, 1 },
1351 { TFTP, "TXFT array parity error", -1, 1 },
1352 { TCAP, "TXCA array parity error", -1, 1 },
1353 { TCIP, "TXCIF array parity error", -1, 1 },
1354 { RCAP, "RXCA array parity error", -1, 1 },
1355 { OTDD, "outbound request TLP discarded", -1, 1 },
1356 { RDPE, "Rx data parity error", -1, 1 },
1357 { TDUE, "Tx uncorrectable data error", -1, 1 },
1358 { 0 }
1359 };
005b5717 1360 static const struct intr_info pcie_intr_info[] = {
56d36be4
DM
1361 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1362 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1363 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1364 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1365 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1366 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1367 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1368 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1369 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1370 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1371 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1372 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1373 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1374 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1375 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1376 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1377 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1378 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1379 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1380 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1381 { FIDPERR, "PCI FID parity error", -1, 1 },
1382 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1383 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1384 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1385 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1386 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1387 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1388 { PCIESINT, "PCI core secondary fault", -1, 1 },
1389 { PCIEPINT, "PCI core primary fault", -1, 1 },
1390 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1391 { 0 }
1392 };
1393
0a57a536
SR
1394 static struct intr_info t5_pcie_intr_info[] = {
1395 { MSTGRPPERR, "Master Response Read Queue parity error",
1396 -1, 1 },
1397 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1398 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1399 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1400 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1401 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1402 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1403 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1404 -1, 1 },
1405 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1406 -1, 1 },
1407 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1408 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1409 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1410 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1411 { DREQWRPERR, "PCI DMA channel write request parity error",
1412 -1, 1 },
1413 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1414 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1415 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1416 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1417 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1418 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1419 { FIDPERR, "PCI FID parity error", -1, 1 },
1420 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1421 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1422 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1423 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1424 -1, 1 },
1425 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1426 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1427 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1428 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1429 { READRSPERR, "Outbound read error", -1, 0 },
1430 { 0 }
1431 };
1432
56d36be4
DM
1433 int fat;
1434
9bb59b96
HS
1435 if (is_t4(adapter->params.chip))
1436 fat = t4_handle_intr_status(adapter,
1437 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1438 sysbus_intr_info) +
1439 t4_handle_intr_status(adapter,
1440 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1441 pcie_port_intr_info) +
1442 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1443 pcie_intr_info);
1444 else
1445 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1446 t5_pcie_intr_info);
0a57a536 1447
56d36be4
DM
1448 if (fat)
1449 t4_fatal_err(adapter);
1450}
1451
1452/*
1453 * TP interrupt handler.
1454 */
1455static void tp_intr_handler(struct adapter *adapter)
1456{
005b5717 1457 static const struct intr_info tp_intr_info[] = {
56d36be4
DM
1458 { 0x3fffffff, "TP parity error", -1, 1 },
1459 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1460 { 0 }
1461 };
1462
1463 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1464 t4_fatal_err(adapter);
1465}
1466
1467/*
1468 * SGE interrupt handler.
1469 */
1470static void sge_intr_handler(struct adapter *adapter)
1471{
1472 u64 v;
1473
005b5717 1474 static const struct intr_info sge_intr_info[] = {
56d36be4
DM
1475 { ERR_CPL_EXCEED_IQE_SIZE,
1476 "SGE received CPL exceeding IQE size", -1, 1 },
1477 { ERR_INVALID_CIDX_INC,
1478 "SGE GTS CIDX increment too large", -1, 0 },
1479 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
840f3000
VP
1480 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1481 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1482 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
56d36be4
DM
1483 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1484 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1485 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1486 0 },
1487 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1488 0 },
1489 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1490 0 },
1491 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1492 0 },
1493 { ERR_ING_CTXT_PRIO,
1494 "SGE too many priority ingress contexts", -1, 0 },
1495 { ERR_EGR_CTXT_PRIO,
1496 "SGE too many priority egress contexts", -1, 0 },
1497 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1498 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1499 { 0 }
1500 };
1501
1502 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
8caa1e84 1503 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
56d36be4
DM
1504 if (v) {
1505 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
8caa1e84 1506 (unsigned long long)v);
56d36be4
DM
1507 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1508 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1509 }
1510
1511 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1512 v != 0)
1513 t4_fatal_err(adapter);
1514}
1515
1516/*
1517 * CIM interrupt handler.
1518 */
1519static void cim_intr_handler(struct adapter *adapter)
1520{
005b5717 1521 static const struct intr_info cim_intr_info[] = {
56d36be4
DM
1522 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1523 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1524 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1525 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1526 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1527 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1528 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1529 { 0 }
1530 };
005b5717 1531 static const struct intr_info cim_upintr_info[] = {
56d36be4
DM
1532 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1533 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1534 { ILLWRINT, "CIM illegal write", -1, 1 },
1535 { ILLRDINT, "CIM illegal read", -1, 1 },
1536 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1537 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1538 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1539 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1540 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1541 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1542 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1543 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1544 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1545 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1546 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1547 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1548 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1549 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1550 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1551 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1552 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1553 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1554 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1555 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1556 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1557 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1558 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1559 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1560 { 0 }
1561 };
1562
1563 int fat;
1564
31d55c2d
HS
1565 if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
1566 t4_report_fw_error(adapter);
1567
56d36be4
DM
1568 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1569 cim_intr_info) +
1570 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1571 cim_upintr_info);
1572 if (fat)
1573 t4_fatal_err(adapter);
1574}
1575
1576/*
1577 * ULP RX interrupt handler.
1578 */
1579static void ulprx_intr_handler(struct adapter *adapter)
1580{
005b5717 1581 static const struct intr_info ulprx_intr_info[] = {
91e9a1ec 1582 { 0x1800000, "ULPRX context error", -1, 1 },
56d36be4
DM
1583 { 0x7fffff, "ULPRX parity error", -1, 1 },
1584 { 0 }
1585 };
1586
1587 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1588 t4_fatal_err(adapter);
1589}
1590
1591/*
1592 * ULP TX interrupt handler.
1593 */
1594static void ulptx_intr_handler(struct adapter *adapter)
1595{
005b5717 1596 static const struct intr_info ulptx_intr_info[] = {
56d36be4
DM
1597 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1598 0 },
1599 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1600 0 },
1601 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1602 0 },
1603 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1604 0 },
1605 { 0xfffffff, "ULPTX parity error", -1, 1 },
1606 { 0 }
1607 };
1608
1609 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1610 t4_fatal_err(adapter);
1611}
1612
1613/*
1614 * PM TX interrupt handler.
1615 */
1616static void pmtx_intr_handler(struct adapter *adapter)
1617{
005b5717 1618 static const struct intr_info pmtx_intr_info[] = {
56d36be4
DM
1619 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1620 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1621 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1622 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1623 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1624 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1625 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1626 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1627 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1628 { 0 }
1629 };
1630
1631 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1632 t4_fatal_err(adapter);
1633}
1634
1635/*
1636 * PM RX interrupt handler.
1637 */
1638static void pmrx_intr_handler(struct adapter *adapter)
1639{
005b5717 1640 static const struct intr_info pmrx_intr_info[] = {
56d36be4
DM
1641 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1642 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1643 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1644 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1645 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1646 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1647 { 0 }
1648 };
1649
1650 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1651 t4_fatal_err(adapter);
1652}
1653
1654/*
1655 * CPL switch interrupt handler.
1656 */
1657static void cplsw_intr_handler(struct adapter *adapter)
1658{
005b5717 1659 static const struct intr_info cplsw_intr_info[] = {
56d36be4
DM
1660 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1661 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1662 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1663 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1664 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1665 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1666 { 0 }
1667 };
1668
1669 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1670 t4_fatal_err(adapter);
1671}
1672
1673/*
1674 * LE interrupt handler.
1675 */
1676static void le_intr_handler(struct adapter *adap)
1677{
005b5717 1678 static const struct intr_info le_intr_info[] = {
56d36be4
DM
1679 { LIPMISS, "LE LIP miss", -1, 0 },
1680 { LIP0, "LE 0 LIP error", -1, 0 },
1681 { PARITYERR, "LE parity error", -1, 1 },
1682 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1683 { REQQPARERR, "LE request queue parity error", -1, 1 },
1684 { 0 }
1685 };
1686
1687 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1688 t4_fatal_err(adap);
1689}
1690
1691/*
1692 * MPS interrupt handler.
1693 */
1694static void mps_intr_handler(struct adapter *adapter)
1695{
005b5717 1696 static const struct intr_info mps_rx_intr_info[] = {
56d36be4
DM
1697 { 0xffffff, "MPS Rx parity error", -1, 1 },
1698 { 0 }
1699 };
005b5717 1700 static const struct intr_info mps_tx_intr_info[] = {
56d36be4
DM
1701 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1702 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1703 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1704 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1705 { BUBBLE, "MPS Tx underflow", -1, 1 },
1706 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1707 { FRMERR, "MPS Tx framing error", -1, 1 },
1708 { 0 }
1709 };
005b5717 1710 static const struct intr_info mps_trc_intr_info[] = {
56d36be4
DM
1711 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1712 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1713 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1714 { 0 }
1715 };
005b5717 1716 static const struct intr_info mps_stat_sram_intr_info[] = {
56d36be4
DM
1717 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1718 { 0 }
1719 };
005b5717 1720 static const struct intr_info mps_stat_tx_intr_info[] = {
56d36be4
DM
1721 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1722 { 0 }
1723 };
005b5717 1724 static const struct intr_info mps_stat_rx_intr_info[] = {
56d36be4
DM
1725 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1726 { 0 }
1727 };
005b5717 1728 static const struct intr_info mps_cls_intr_info[] = {
56d36be4
DM
1729 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1730 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1731 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1732 { 0 }
1733 };
1734
1735 int fat;
1736
1737 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1738 mps_rx_intr_info) +
1739 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1740 mps_tx_intr_info) +
1741 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1742 mps_trc_intr_info) +
1743 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1744 mps_stat_sram_intr_info) +
1745 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1746 mps_stat_tx_intr_info) +
1747 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1748 mps_stat_rx_intr_info) +
1749 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1750 mps_cls_intr_info);
1751
1752 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1753 RXINT | TXINT | STATINT);
1754 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1755 if (fat)
1756 t4_fatal_err(adapter);
1757}
1758
1759#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1760
1761/*
1762 * EDC/MC interrupt handler.
1763 */
1764static void mem_intr_handler(struct adapter *adapter, int idx)
1765{
822dd8a8 1766 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
56d36be4
DM
1767
1768 unsigned int addr, cnt_addr, v;
1769
1770 if (idx <= MEM_EDC1) {
1771 addr = EDC_REG(EDC_INT_CAUSE, idx);
1772 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
822dd8a8
HS
1773 } else if (idx == MEM_MC) {
1774 if (is_t4(adapter->params.chip)) {
1775 addr = MC_INT_CAUSE;
1776 cnt_addr = MC_ECC_STATUS;
1777 } else {
1778 addr = MC_P_INT_CAUSE;
1779 cnt_addr = MC_P_ECC_STATUS;
1780 }
56d36be4 1781 } else {
822dd8a8
HS
1782 addr = MC_REG(MC_P_INT_CAUSE, 1);
1783 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
56d36be4
DM
1784 }
1785
1786 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1787 if (v & PERR_INT_CAUSE)
1788 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1789 name[idx]);
1790 if (v & ECC_CE_INT_CAUSE) {
1791 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1792
1793 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1794 if (printk_ratelimit())
1795 dev_warn(adapter->pdev_dev,
1796 "%u %s correctable ECC data error%s\n",
1797 cnt, name[idx], cnt > 1 ? "s" : "");
1798 }
1799 if (v & ECC_UE_INT_CAUSE)
1800 dev_alert(adapter->pdev_dev,
1801 "%s uncorrectable ECC data error\n", name[idx]);
1802
1803 t4_write_reg(adapter, addr, v);
1804 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1805 t4_fatal_err(adapter);
1806}
1807
1808/*
1809 * MA interrupt handler.
1810 */
1811static void ma_intr_handler(struct adapter *adap)
1812{
1813 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1814
9bb59b96 1815 if (status & MEM_PERR_INT_CAUSE) {
56d36be4
DM
1816 dev_alert(adap->pdev_dev,
1817 "MA parity error, parity status %#x\n",
1818 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
9bb59b96
HS
1819 if (is_t5(adap->params.chip))
1820 dev_alert(adap->pdev_dev,
1821 "MA parity error, parity status %#x\n",
1822 t4_read_reg(adap,
1823 MA_PARITY_ERROR_STATUS2));
1824 }
56d36be4
DM
1825 if (status & MEM_WRAP_INT_CAUSE) {
1826 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1827 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1828 "client %u to address %#x\n",
1829 MEM_WRAP_CLIENT_NUM_GET(v),
1830 MEM_WRAP_ADDRESS_GET(v) << 4);
1831 }
1832 t4_write_reg(adap, MA_INT_CAUSE, status);
1833 t4_fatal_err(adap);
1834}
1835
1836/*
1837 * SMB interrupt handler.
1838 */
1839static void smb_intr_handler(struct adapter *adap)
1840{
005b5717 1841 static const struct intr_info smb_intr_info[] = {
56d36be4
DM
1842 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1843 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1844 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1845 { 0 }
1846 };
1847
1848 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1849 t4_fatal_err(adap);
1850}
1851
1852/*
1853 * NC-SI interrupt handler.
1854 */
1855static void ncsi_intr_handler(struct adapter *adap)
1856{
005b5717 1857 static const struct intr_info ncsi_intr_info[] = {
56d36be4
DM
1858 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1859 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1860 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1861 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1862 { 0 }
1863 };
1864
1865 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1866 t4_fatal_err(adap);
1867}
1868
1869/*
1870 * XGMAC interrupt handler.
1871 */
1872static void xgmac_intr_handler(struct adapter *adap, int port)
1873{
0a57a536
SR
1874 u32 v, int_cause_reg;
1875
d14807dd 1876 if (is_t4(adap->params.chip))
0a57a536
SR
1877 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1878 else
1879 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1880
1881 v = t4_read_reg(adap, int_cause_reg);
56d36be4
DM
1882
1883 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1884 if (!v)
1885 return;
1886
1887 if (v & TXFIFO_PRTY_ERR)
1888 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1889 port);
1890 if (v & RXFIFO_PRTY_ERR)
1891 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1892 port);
1893 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1894 t4_fatal_err(adap);
1895}
1896
1897/*
1898 * PL interrupt handler.
1899 */
1900static void pl_intr_handler(struct adapter *adap)
1901{
005b5717 1902 static const struct intr_info pl_intr_info[] = {
56d36be4
DM
1903 { FATALPERR, "T4 fatal parity error", -1, 1 },
1904 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1905 { 0 }
1906 };
1907
1908 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1909 t4_fatal_err(adap);
1910}
1911
63bcceec 1912#define PF_INTR_MASK (PFSW)
56d36be4
DM
1913#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1914 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1915 CPL_SWITCH | SGE | ULP_TX)
1916
1917/**
1918 * t4_slow_intr_handler - control path interrupt handler
1919 * @adapter: the adapter
1920 *
1921 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1922 * The designation 'slow' is because it involves register reads, while
1923 * data interrupts typically don't involve any MMIOs.
1924 */
1925int t4_slow_intr_handler(struct adapter *adapter)
1926{
1927 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1928
1929 if (!(cause & GLBL_INTR_MASK))
1930 return 0;
1931 if (cause & CIM)
1932 cim_intr_handler(adapter);
1933 if (cause & MPS)
1934 mps_intr_handler(adapter);
1935 if (cause & NCSI)
1936 ncsi_intr_handler(adapter);
1937 if (cause & PL)
1938 pl_intr_handler(adapter);
1939 if (cause & SMB)
1940 smb_intr_handler(adapter);
1941 if (cause & XGMAC0)
1942 xgmac_intr_handler(adapter, 0);
1943 if (cause & XGMAC1)
1944 xgmac_intr_handler(adapter, 1);
1945 if (cause & XGMAC_KR0)
1946 xgmac_intr_handler(adapter, 2);
1947 if (cause & XGMAC_KR1)
1948 xgmac_intr_handler(adapter, 3);
1949 if (cause & PCIE)
1950 pcie_intr_handler(adapter);
1951 if (cause & MC)
1952 mem_intr_handler(adapter, MEM_MC);
822dd8a8
HS
1953 if (!is_t4(adapter->params.chip) && (cause & MC1))
1954 mem_intr_handler(adapter, MEM_MC1);
56d36be4
DM
1955 if (cause & EDC0)
1956 mem_intr_handler(adapter, MEM_EDC0);
1957 if (cause & EDC1)
1958 mem_intr_handler(adapter, MEM_EDC1);
1959 if (cause & LE)
1960 le_intr_handler(adapter);
1961 if (cause & TP)
1962 tp_intr_handler(adapter);
1963 if (cause & MA)
1964 ma_intr_handler(adapter);
1965 if (cause & PM_TX)
1966 pmtx_intr_handler(adapter);
1967 if (cause & PM_RX)
1968 pmrx_intr_handler(adapter);
1969 if (cause & ULP_RX)
1970 ulprx_intr_handler(adapter);
1971 if (cause & CPL_SWITCH)
1972 cplsw_intr_handler(adapter);
1973 if (cause & SGE)
1974 sge_intr_handler(adapter);
1975 if (cause & ULP_TX)
1976 ulptx_intr_handler(adapter);
1977
1978 /* Clear the interrupts just processed for which we are the master. */
1979 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1980 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1981 return 1;
1982}
1983
1984/**
1985 * t4_intr_enable - enable interrupts
1986 * @adapter: the adapter whose interrupts should be enabled
1987 *
1988 * Enable PF-specific interrupts for the calling function and the top-level
1989 * interrupt concentrator for global interrupts. Interrupts are already
1990 * enabled at each module, here we just enable the roots of the interrupt
1991 * hierarchies.
1992 *
1993 * Note: this function should be called only when the driver manages
1994 * non PF-specific interrupts from the various HW modules. Only one PCI
1995 * function at a time should be doing this.
1996 */
1997void t4_intr_enable(struct adapter *adapter)
1998{
1999 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2000
2001 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
2002 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
2003 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
2004 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
2005 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
2006 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
2007 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
840f3000 2008 DBFIFO_HP_INT | DBFIFO_LP_INT |
56d36be4
DM
2009 EGRESS_SIZE_ERR);
2010 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
2011 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
2012}
2013
2014/**
2015 * t4_intr_disable - disable interrupts
2016 * @adapter: the adapter whose interrupts should be disabled
2017 *
2018 * Disable interrupts. We only disable the top-level interrupt
2019 * concentrators. The caller must be a PCI function managing global
2020 * interrupts.
2021 */
2022void t4_intr_disable(struct adapter *adapter)
2023{
2024 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2025
2026 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
2027 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
2028}
2029
56d36be4
DM
2030/**
2031 * hash_mac_addr - return the hash value of a MAC address
2032 * @addr: the 48-bit Ethernet MAC address
2033 *
2034 * Hashes a MAC address according to the hash function used by HW inexact
2035 * (hash) address matching.
2036 */
2037static int hash_mac_addr(const u8 *addr)
2038{
2039 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2040 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2041 a ^= b;
2042 a ^= (a >> 12);
2043 a ^= (a >> 6);
2044 return a & 0x3f;
2045}
2046
2047/**
2048 * t4_config_rss_range - configure a portion of the RSS mapping table
2049 * @adapter: the adapter
2050 * @mbox: mbox to use for the FW command
2051 * @viid: virtual interface whose RSS subtable is to be written
2052 * @start: start entry in the table to write
2053 * @n: how many table entries to write
2054 * @rspq: values for the response queue lookup table
2055 * @nrspq: number of values in @rspq
2056 *
2057 * Programs the selected part of the VI's RSS mapping table with the
2058 * provided values. If @nrspq < @n the supplied values are used repeatedly
2059 * until the full table range is populated.
2060 *
2061 * The caller must ensure the values in @rspq are in the range allowed for
2062 * @viid.
2063 */
2064int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2065 int start, int n, const u16 *rspq, unsigned int nrspq)
2066{
2067 int ret;
2068 const u16 *rsp = rspq;
2069 const u16 *rsp_end = rspq + nrspq;
2070 struct fw_rss_ind_tbl_cmd cmd;
2071
2072 memset(&cmd, 0, sizeof(cmd));
2073 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2074 FW_CMD_REQUEST | FW_CMD_WRITE |
2075 FW_RSS_IND_TBL_CMD_VIID(viid));
2076 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2077
2078 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2079 while (n > 0) {
2080 int nq = min(n, 32);
2081 __be32 *qp = &cmd.iq0_to_iq2;
2082
2083 cmd.niqid = htons(nq);
2084 cmd.startidx = htons(start);
2085
2086 start += nq;
2087 n -= nq;
2088
2089 while (nq > 0) {
2090 unsigned int v;
2091
2092 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2093 if (++rsp >= rsp_end)
2094 rsp = rspq;
2095 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2096 if (++rsp >= rsp_end)
2097 rsp = rspq;
2098 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2099 if (++rsp >= rsp_end)
2100 rsp = rspq;
2101
2102 *qp++ = htonl(v);
2103 nq -= 3;
2104 }
2105
2106 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2107 if (ret)
2108 return ret;
2109 }
2110 return 0;
2111}
2112
2113/**
2114 * t4_config_glbl_rss - configure the global RSS mode
2115 * @adapter: the adapter
2116 * @mbox: mbox to use for the FW command
2117 * @mode: global RSS mode
2118 * @flags: mode-specific flags
2119 *
2120 * Sets the global RSS mode.
2121 */
2122int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2123 unsigned int flags)
2124{
2125 struct fw_rss_glb_config_cmd c;
2126
2127 memset(&c, 0, sizeof(c));
2128 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2129 FW_CMD_REQUEST | FW_CMD_WRITE);
2130 c.retval_len16 = htonl(FW_LEN16(c));
2131 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2132 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2133 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2134 c.u.basicvirtual.mode_pkd =
2135 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2136 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2137 } else
2138 return -EINVAL;
2139 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2140}
2141
56d36be4
DM
2142/**
2143 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2144 * @adap: the adapter
2145 * @v4: holds the TCP/IP counter values
2146 * @v6: holds the TCP/IPv6 counter values
2147 *
2148 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2149 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2150 */
2151void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2152 struct tp_tcp_stats *v6)
2153{
2154 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2155
2156#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2157#define STAT(x) val[STAT_IDX(x)]
2158#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2159
2160 if (v4) {
2161 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2162 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2163 v4->tcpOutRsts = STAT(OUT_RST);
2164 v4->tcpInSegs = STAT64(IN_SEG);
2165 v4->tcpOutSegs = STAT64(OUT_SEG);
2166 v4->tcpRetransSegs = STAT64(RXT_SEG);
2167 }
2168 if (v6) {
2169 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2170 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2171 v6->tcpOutRsts = STAT(OUT_RST);
2172 v6->tcpInSegs = STAT64(IN_SEG);
2173 v6->tcpOutSegs = STAT64(OUT_SEG);
2174 v6->tcpRetransSegs = STAT64(RXT_SEG);
2175 }
2176#undef STAT64
2177#undef STAT
2178#undef STAT_IDX
2179}
2180
56d36be4
DM
2181/**
2182 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2183 * @adap: the adapter
2184 * @mtus: where to store the MTU values
2185 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2186 *
2187 * Reads the HW path MTU table.
2188 */
2189void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2190{
2191 u32 v;
2192 int i;
2193
2194 for (i = 0; i < NMTUS; ++i) {
2195 t4_write_reg(adap, TP_MTU_TABLE,
2196 MTUINDEX(0xff) | MTUVALUE(i));
2197 v = t4_read_reg(adap, TP_MTU_TABLE);
2198 mtus[i] = MTUVALUE_GET(v);
2199 if (mtu_log)
2200 mtu_log[i] = MTUWIDTH_GET(v);
2201 }
2202}
2203
636f9d37
VP
2204/**
2205 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2206 * @adap: the adapter
2207 * @addr: the indirect TP register address
2208 * @mask: specifies the field within the register to modify
2209 * @val: new value for the field
2210 *
2211 * Sets a field of an indirect TP register to the given value.
2212 */
2213void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2214 unsigned int mask, unsigned int val)
2215{
2216 t4_write_reg(adap, TP_PIO_ADDR, addr);
2217 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2218 t4_write_reg(adap, TP_PIO_DATA, val);
2219}
2220
56d36be4
DM
2221/**
2222 * init_cong_ctrl - initialize congestion control parameters
2223 * @a: the alpha values for congestion control
2224 * @b: the beta values for congestion control
2225 *
2226 * Initialize the congestion control parameters.
2227 */
91744948 2228static void init_cong_ctrl(unsigned short *a, unsigned short *b)
56d36be4
DM
2229{
2230 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2231 a[9] = 2;
2232 a[10] = 3;
2233 a[11] = 4;
2234 a[12] = 5;
2235 a[13] = 6;
2236 a[14] = 7;
2237 a[15] = 8;
2238 a[16] = 9;
2239 a[17] = 10;
2240 a[18] = 14;
2241 a[19] = 17;
2242 a[20] = 21;
2243 a[21] = 25;
2244 a[22] = 30;
2245 a[23] = 35;
2246 a[24] = 45;
2247 a[25] = 60;
2248 a[26] = 80;
2249 a[27] = 100;
2250 a[28] = 200;
2251 a[29] = 300;
2252 a[30] = 400;
2253 a[31] = 500;
2254
2255 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2256 b[9] = b[10] = 1;
2257 b[11] = b[12] = 2;
2258 b[13] = b[14] = b[15] = b[16] = 3;
2259 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2260 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2261 b[28] = b[29] = 6;
2262 b[30] = b[31] = 7;
2263}
2264
2265/* The minimum additive increment value for the congestion control table */
2266#define CC_MIN_INCR 2U
2267
2268/**
2269 * t4_load_mtus - write the MTU and congestion control HW tables
2270 * @adap: the adapter
2271 * @mtus: the values for the MTU table
2272 * @alpha: the values for the congestion control alpha parameter
2273 * @beta: the values for the congestion control beta parameter
2274 *
2275 * Write the HW MTU table with the supplied MTUs and the high-speed
2276 * congestion control table with the supplied alpha, beta, and MTUs.
2277 * We write the two tables together because the additive increments
2278 * depend on the MTUs.
2279 */
2280void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2281 const unsigned short *alpha, const unsigned short *beta)
2282{
2283 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2284 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2285 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2286 28672, 40960, 57344, 81920, 114688, 163840, 229376
2287 };
2288
2289 unsigned int i, w;
2290
2291 for (i = 0; i < NMTUS; ++i) {
2292 unsigned int mtu = mtus[i];
2293 unsigned int log2 = fls(mtu);
2294
2295 if (!(mtu & ((1 << log2) >> 2))) /* round */
2296 log2--;
2297 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2298 MTUWIDTH(log2) | MTUVALUE(mtu));
2299
2300 for (w = 0; w < NCCTRL_WIN; ++w) {
2301 unsigned int inc;
2302
2303 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2304 CC_MIN_INCR);
2305
2306 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2307 (w << 16) | (beta[w] << 13) | inc);
2308 }
2309 }
2310}
2311
56d36be4
DM
2312/**
2313 * get_mps_bg_map - return the buffer groups associated with a port
2314 * @adap: the adapter
2315 * @idx: the port index
2316 *
2317 * Returns a bitmap indicating which MPS buffer groups are associated
2318 * with the given port. Bit i is set if buffer group i is used by the
2319 * port.
2320 */
2321static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2322{
2323 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2324
2325 if (n == 0)
2326 return idx == 0 ? 0xf : 0;
2327 if (n == 1)
2328 return idx < 2 ? (3 << (2 * idx)) : 0;
2329 return 1 << idx;
2330}
2331
72aca4bf
KS
2332/**
2333 * t4_get_port_type_description - return Port Type string description
2334 * @port_type: firmware Port Type enumeration
2335 */
2336const char *t4_get_port_type_description(enum fw_port_type port_type)
2337{
2338 static const char *const port_type_description[] = {
2339 "R XFI",
2340 "R XAUI",
2341 "T SGMII",
2342 "T XFI",
2343 "T XAUI",
2344 "KX4",
2345 "CX4",
2346 "KX",
2347 "KR",
2348 "R SFP+",
2349 "KR/KX",
2350 "KR/KX/KX4",
2351 "R QSFP_10G",
2352 "",
2353 "R QSFP",
2354 "R BP40_BA",
2355 };
2356
2357 if (port_type < ARRAY_SIZE(port_type_description))
2358 return port_type_description[port_type];
2359 return "UNKNOWN";
2360}
2361
56d36be4
DM
2362/**
2363 * t4_get_port_stats - collect port statistics
2364 * @adap: the adapter
2365 * @idx: the port index
2366 * @p: the stats structure to fill
2367 *
2368 * Collect statistics related to the given port from HW.
2369 */
2370void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2371{
2372 u32 bgmap = get_mps_bg_map(adap, idx);
2373
2374#define GET_STAT(name) \
0a57a536 2375 t4_read_reg64(adap, \
d14807dd 2376 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
0a57a536 2377 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
56d36be4
DM
2378#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2379
2380 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2381 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2382 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2383 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2384 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2385 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2386 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2387 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2388 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2389 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2390 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2391 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2392 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2393 p->tx_drop = GET_STAT(TX_PORT_DROP);
2394 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2395 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2396 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2397 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2398 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2399 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2400 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2401 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2402 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2403
2404 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2405 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2406 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2407 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2408 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2409 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2410 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2411 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2412 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2413 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2414 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2415 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2416 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2417 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2418 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2419 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2420 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2421 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2422 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2423 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2424 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2425 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2426 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2427 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2428 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2429 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2430 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2431
2432 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2433 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2434 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2435 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2436 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2437 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2438 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2439 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2440
2441#undef GET_STAT
2442#undef GET_STAT_COM
2443}
2444
56d36be4
DM
2445/**
2446 * t4_wol_magic_enable - enable/disable magic packet WoL
2447 * @adap: the adapter
2448 * @port: the physical port index
2449 * @addr: MAC address expected in magic packets, %NULL to disable
2450 *
2451 * Enables/disables magic packet wake-on-LAN for the selected port.
2452 */
2453void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2454 const u8 *addr)
2455{
0a57a536
SR
2456 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2457
d14807dd 2458 if (is_t4(adap->params.chip)) {
0a57a536
SR
2459 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2460 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2461 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2462 } else {
2463 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2464 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2465 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2466 }
2467
56d36be4 2468 if (addr) {
0a57a536 2469 t4_write_reg(adap, mag_id_reg_l,
56d36be4
DM
2470 (addr[2] << 24) | (addr[3] << 16) |
2471 (addr[4] << 8) | addr[5]);
0a57a536 2472 t4_write_reg(adap, mag_id_reg_h,
56d36be4
DM
2473 (addr[0] << 8) | addr[1]);
2474 }
0a57a536 2475 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
56d36be4
DM
2476 addr ? MAGICEN : 0);
2477}
2478
2479/**
2480 * t4_wol_pat_enable - enable/disable pattern-based WoL
2481 * @adap: the adapter
2482 * @port: the physical port index
2483 * @map: bitmap of which HW pattern filters to set
2484 * @mask0: byte mask for bytes 0-63 of a packet
2485 * @mask1: byte mask for bytes 64-127 of a packet
2486 * @crc: Ethernet CRC for selected bytes
2487 * @enable: enable/disable switch
2488 *
2489 * Sets the pattern filters indicated in @map to mask out the bytes
2490 * specified in @mask0/@mask1 in received packets and compare the CRC of
2491 * the resulting packet against @crc. If @enable is %true pattern-based
2492 * WoL is enabled, otherwise disabled.
2493 */
2494int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2495 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2496{
2497 int i;
0a57a536
SR
2498 u32 port_cfg_reg;
2499
d14807dd 2500 if (is_t4(adap->params.chip))
0a57a536
SR
2501 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2502 else
2503 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
56d36be4
DM
2504
2505 if (!enable) {
0a57a536 2506 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
56d36be4
DM
2507 return 0;
2508 }
2509 if (map > 0xff)
2510 return -EINVAL;
2511
0a57a536 2512#define EPIO_REG(name) \
d14807dd 2513 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
0a57a536 2514 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
56d36be4
DM
2515
2516 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2517 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2518 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2519
2520 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2521 if (!(map & 1))
2522 continue;
2523
2524 /* write byte masks */
2525 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2526 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2527 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
ce91a923 2528 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
56d36be4
DM
2529 return -ETIMEDOUT;
2530
2531 /* write CRC */
2532 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2533 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2534 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
ce91a923 2535 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
56d36be4
DM
2536 return -ETIMEDOUT;
2537 }
2538#undef EPIO_REG
2539
2540 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2541 return 0;
2542}
2543
f2b7e78d
VP
2544/* t4_mk_filtdelwr - create a delete filter WR
2545 * @ftid: the filter ID
2546 * @wr: the filter work request to populate
2547 * @qid: ingress queue to receive the delete notification
2548 *
2549 * Creates a filter work request to delete the supplied filter. If @qid is
2550 * negative the delete notification is suppressed.
2551 */
2552void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2553{
2554 memset(wr, 0, sizeof(*wr));
2555 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
2556 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
2557 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
2558 V_FW_FILTER_WR_NOREPLY(qid < 0));
2559 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
2560 if (qid >= 0)
2561 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
2562}
2563
56d36be4
DM
2564#define INIT_CMD(var, cmd, rd_wr) do { \
2565 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2566 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2567 (var).retval_len16 = htonl(FW_LEN16(var)); \
2568} while (0)
2569
8caa1e84
VP
2570int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2571 u32 addr, u32 val)
2572{
2573 struct fw_ldst_cmd c;
2574
2575 memset(&c, 0, sizeof(c));
636f9d37
VP
2576 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2577 FW_CMD_WRITE |
2578 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
8caa1e84
VP
2579 c.cycles_to_len16 = htonl(FW_LEN16(c));
2580 c.u.addrval.addr = htonl(addr);
2581 c.u.addrval.val = htonl(val);
2582
2583 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2584}
2585
56d36be4
DM
2586/**
2587 * t4_mdio_rd - read a PHY register through MDIO
2588 * @adap: the adapter
2589 * @mbox: mailbox to use for the FW command
2590 * @phy_addr: the PHY address
2591 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2592 * @reg: the register to read
2593 * @valp: where to store the value
2594 *
2595 * Issues a FW command through the given mailbox to read a PHY register.
2596 */
2597int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2598 unsigned int mmd, unsigned int reg, u16 *valp)
2599{
2600 int ret;
2601 struct fw_ldst_cmd c;
2602
2603 memset(&c, 0, sizeof(c));
2604 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2605 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2606 c.cycles_to_len16 = htonl(FW_LEN16(c));
2607 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2608 FW_LDST_CMD_MMD(mmd));
2609 c.u.mdio.raddr = htons(reg);
2610
2611 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2612 if (ret == 0)
2613 *valp = ntohs(c.u.mdio.rval);
2614 return ret;
2615}
2616
2617/**
2618 * t4_mdio_wr - write a PHY register through MDIO
2619 * @adap: the adapter
2620 * @mbox: mailbox to use for the FW command
2621 * @phy_addr: the PHY address
2622 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2623 * @reg: the register to write
2624 * @valp: value to write
2625 *
2626 * Issues a FW command through the given mailbox to write a PHY register.
2627 */
2628int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2629 unsigned int mmd, unsigned int reg, u16 val)
2630{
2631 struct fw_ldst_cmd c;
2632
2633 memset(&c, 0, sizeof(c));
2634 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2635 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2636 c.cycles_to_len16 = htonl(FW_LEN16(c));
2637 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2638 FW_LDST_CMD_MMD(mmd));
2639 c.u.mdio.raddr = htons(reg);
2640 c.u.mdio.rval = htons(val);
2641
2642 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2643}
2644
68bce192
KS
2645/**
2646 * t4_sge_decode_idma_state - decode the idma state
2647 * @adap: the adapter
2648 * @state: the state idma is stuck in
2649 */
2650void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2651{
2652 static const char * const t4_decode[] = {
2653 "IDMA_IDLE",
2654 "IDMA_PUSH_MORE_CPL_FIFO",
2655 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2656 "Not used",
2657 "IDMA_PHYSADDR_SEND_PCIEHDR",
2658 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2659 "IDMA_PHYSADDR_SEND_PAYLOAD",
2660 "IDMA_SEND_FIFO_TO_IMSG",
2661 "IDMA_FL_REQ_DATA_FL_PREP",
2662 "IDMA_FL_REQ_DATA_FL",
2663 "IDMA_FL_DROP",
2664 "IDMA_FL_H_REQ_HEADER_FL",
2665 "IDMA_FL_H_SEND_PCIEHDR",
2666 "IDMA_FL_H_PUSH_CPL_FIFO",
2667 "IDMA_FL_H_SEND_CPL",
2668 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2669 "IDMA_FL_H_SEND_IP_HDR",
2670 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2671 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2672 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2673 "IDMA_FL_D_SEND_PCIEHDR",
2674 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2675 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2676 "IDMA_FL_SEND_PCIEHDR",
2677 "IDMA_FL_PUSH_CPL_FIFO",
2678 "IDMA_FL_SEND_CPL",
2679 "IDMA_FL_SEND_PAYLOAD_FIRST",
2680 "IDMA_FL_SEND_PAYLOAD",
2681 "IDMA_FL_REQ_NEXT_DATA_FL",
2682 "IDMA_FL_SEND_NEXT_PCIEHDR",
2683 "IDMA_FL_SEND_PADDING",
2684 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2685 "IDMA_FL_SEND_FIFO_TO_IMSG",
2686 "IDMA_FL_REQ_DATAFL_DONE",
2687 "IDMA_FL_REQ_HEADERFL_DONE",
2688 };
2689 static const char * const t5_decode[] = {
2690 "IDMA_IDLE",
2691 "IDMA_ALMOST_IDLE",
2692 "IDMA_PUSH_MORE_CPL_FIFO",
2693 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2694 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2695 "IDMA_PHYSADDR_SEND_PCIEHDR",
2696 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2697 "IDMA_PHYSADDR_SEND_PAYLOAD",
2698 "IDMA_SEND_FIFO_TO_IMSG",
2699 "IDMA_FL_REQ_DATA_FL",
2700 "IDMA_FL_DROP",
2701 "IDMA_FL_DROP_SEND_INC",
2702 "IDMA_FL_H_REQ_HEADER_FL",
2703 "IDMA_FL_H_SEND_PCIEHDR",
2704 "IDMA_FL_H_PUSH_CPL_FIFO",
2705 "IDMA_FL_H_SEND_CPL",
2706 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2707 "IDMA_FL_H_SEND_IP_HDR",
2708 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2709 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2710 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2711 "IDMA_FL_D_SEND_PCIEHDR",
2712 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2713 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2714 "IDMA_FL_SEND_PCIEHDR",
2715 "IDMA_FL_PUSH_CPL_FIFO",
2716 "IDMA_FL_SEND_CPL",
2717 "IDMA_FL_SEND_PAYLOAD_FIRST",
2718 "IDMA_FL_SEND_PAYLOAD",
2719 "IDMA_FL_REQ_NEXT_DATA_FL",
2720 "IDMA_FL_SEND_NEXT_PCIEHDR",
2721 "IDMA_FL_SEND_PADDING",
2722 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2723 };
2724 static const u32 sge_regs[] = {
2725 SGE_DEBUG_DATA_LOW_INDEX_2,
2726 SGE_DEBUG_DATA_LOW_INDEX_3,
2727 SGE_DEBUG_DATA_HIGH_INDEX_10,
2728 };
2729 const char **sge_idma_decode;
2730 int sge_idma_decode_nstates;
2731 int i;
2732
2733 if (is_t4(adapter->params.chip)) {
2734 sge_idma_decode = (const char **)t4_decode;
2735 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2736 } else {
2737 sge_idma_decode = (const char **)t5_decode;
2738 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2739 }
2740
2741 if (state < sge_idma_decode_nstates)
2742 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2743 else
2744 CH_WARN(adapter, "idma state %d unknown\n", state);
2745
2746 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2747 CH_WARN(adapter, "SGE register %#x value %#x\n",
2748 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2749}
2750
56d36be4 2751/**
636f9d37
VP
2752 * t4_fw_hello - establish communication with FW
2753 * @adap: the adapter
2754 * @mbox: mailbox to use for the FW command
2755 * @evt_mbox: mailbox to receive async FW events
2756 * @master: specifies the caller's willingness to be the device master
2757 * @state: returns the current device state (if non-NULL)
56d36be4 2758 *
636f9d37
VP
2759 * Issues a command to establish communication with FW. Returns either
2760 * an error (negative integer) or the mailbox of the Master PF.
56d36be4
DM
2761 */
2762int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2763 enum dev_master master, enum dev_state *state)
2764{
2765 int ret;
2766 struct fw_hello_cmd c;
636f9d37
VP
2767 u32 v;
2768 unsigned int master_mbox;
2769 int retries = FW_CMD_HELLO_RETRIES;
56d36be4 2770
636f9d37
VP
2771retry:
2772 memset(&c, 0, sizeof(c));
56d36be4 2773 INIT_CMD(c, HELLO, WRITE);
ce91a923 2774 c.err_to_clearinit = htonl(
56d36be4
DM
2775 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2776 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
636f9d37
VP
2777 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2778 FW_HELLO_CMD_MBMASTER_MASK) |
2779 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2780 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2781 FW_HELLO_CMD_CLEARINIT);
56d36be4 2782
636f9d37
VP
2783 /*
2784 * Issue the HELLO command to the firmware. If it's not successful
2785 * but indicates that we got a "busy" or "timeout" condition, retry
31d55c2d
HS
2786 * the HELLO until we exhaust our retry limit. If we do exceed our
2787 * retry limit, check to see if the firmware left us any error
2788 * information and report that if so.
636f9d37 2789 */
56d36be4 2790 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
636f9d37
VP
2791 if (ret < 0) {
2792 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2793 goto retry;
31d55c2d
HS
2794 if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
2795 t4_report_fw_error(adap);
636f9d37
VP
2796 return ret;
2797 }
2798
ce91a923 2799 v = ntohl(c.err_to_clearinit);
636f9d37
VP
2800 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2801 if (state) {
2802 if (v & FW_HELLO_CMD_ERR)
56d36be4 2803 *state = DEV_STATE_ERR;
636f9d37
VP
2804 else if (v & FW_HELLO_CMD_INIT)
2805 *state = DEV_STATE_INIT;
56d36be4
DM
2806 else
2807 *state = DEV_STATE_UNINIT;
2808 }
636f9d37
VP
2809
2810 /*
2811 * If we're not the Master PF then we need to wait around for the
2812 * Master PF Driver to finish setting up the adapter.
2813 *
2814 * Note that we also do this wait if we're a non-Master-capable PF and
2815 * there is no current Master PF; a Master PF may show up momentarily
2816 * and we wouldn't want to fail pointlessly. (This can happen when an
2817 * OS loads lots of different drivers rapidly at the same time). In
2818 * this case, the Master PF returned by the firmware will be
2819 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2820 */
2821 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2822 master_mbox != mbox) {
2823 int waiting = FW_CMD_HELLO_TIMEOUT;
2824
2825 /*
2826 * Wait for the firmware to either indicate an error or
2827 * initialized state. If we see either of these we bail out
2828 * and report the issue to the caller. If we exhaust the
2829 * "hello timeout" and we haven't exhausted our retries, try
2830 * again. Otherwise bail with a timeout error.
2831 */
2832 for (;;) {
2833 u32 pcie_fw;
2834
2835 msleep(50);
2836 waiting -= 50;
2837
2838 /*
2839 * If neither Error nor Initialialized are indicated
2840 * by the firmware keep waiting till we exaust our
2841 * timeout ... and then retry if we haven't exhausted
2842 * our retries ...
2843 */
2844 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2845 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2846 if (waiting <= 0) {
2847 if (retries-- > 0)
2848 goto retry;
2849
2850 return -ETIMEDOUT;
2851 }
2852 continue;
2853 }
2854
2855 /*
2856 * We either have an Error or Initialized condition
2857 * report errors preferentially.
2858 */
2859 if (state) {
2860 if (pcie_fw & FW_PCIE_FW_ERR)
2861 *state = DEV_STATE_ERR;
2862 else if (pcie_fw & FW_PCIE_FW_INIT)
2863 *state = DEV_STATE_INIT;
2864 }
2865
2866 /*
2867 * If we arrived before a Master PF was selected and
2868 * there's not a valid Master PF, grab its identity
2869 * for our caller.
2870 */
2871 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2872 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2873 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2874 break;
2875 }
2876 }
2877
2878 return master_mbox;
56d36be4
DM
2879}
2880
2881/**
2882 * t4_fw_bye - end communication with FW
2883 * @adap: the adapter
2884 * @mbox: mailbox to use for the FW command
2885 *
2886 * Issues a command to terminate communication with FW.
2887 */
2888int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2889{
2890 struct fw_bye_cmd c;
2891
0062b15c 2892 memset(&c, 0, sizeof(c));
56d36be4
DM
2893 INIT_CMD(c, BYE, WRITE);
2894 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2895}
2896
2897/**
2898 * t4_init_cmd - ask FW to initialize the device
2899 * @adap: the adapter
2900 * @mbox: mailbox to use for the FW command
2901 *
2902 * Issues a command to FW to partially initialize the device. This
2903 * performs initialization that generally doesn't depend on user input.
2904 */
2905int t4_early_init(struct adapter *adap, unsigned int mbox)
2906{
2907 struct fw_initialize_cmd c;
2908
0062b15c 2909 memset(&c, 0, sizeof(c));
56d36be4
DM
2910 INIT_CMD(c, INITIALIZE, WRITE);
2911 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2912}
2913
2914/**
2915 * t4_fw_reset - issue a reset to FW
2916 * @adap: the adapter
2917 * @mbox: mailbox to use for the FW command
2918 * @reset: specifies the type of reset to perform
2919 *
2920 * Issues a reset command of the specified type to FW.
2921 */
2922int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2923{
2924 struct fw_reset_cmd c;
2925
0062b15c 2926 memset(&c, 0, sizeof(c));
56d36be4
DM
2927 INIT_CMD(c, RESET, WRITE);
2928 c.val = htonl(reset);
2929 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2930}
2931
26f7cbc0
VP
2932/**
2933 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2934 * @adap: the adapter
2935 * @mbox: mailbox to use for the FW RESET command (if desired)
2936 * @force: force uP into RESET even if FW RESET command fails
2937 *
2938 * Issues a RESET command to firmware (if desired) with a HALT indication
2939 * and then puts the microprocessor into RESET state. The RESET command
2940 * will only be issued if a legitimate mailbox is provided (mbox <=
2941 * FW_PCIE_FW_MASTER_MASK).
2942 *
2943 * This is generally used in order for the host to safely manipulate the
2944 * adapter without fear of conflicting with whatever the firmware might
2945 * be doing. The only way out of this state is to RESTART the firmware
2946 * ...
2947 */
de5b8677 2948static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
26f7cbc0
VP
2949{
2950 int ret = 0;
2951
2952 /*
2953 * If a legitimate mailbox is provided, issue a RESET command
2954 * with a HALT indication.
2955 */
2956 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2957 struct fw_reset_cmd c;
2958
2959 memset(&c, 0, sizeof(c));
2960 INIT_CMD(c, RESET, WRITE);
2961 c.val = htonl(PIORST | PIORSTMODE);
2962 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2963 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2964 }
2965
2966 /*
2967 * Normally we won't complete the operation if the firmware RESET
2968 * command fails but if our caller insists we'll go ahead and put the
2969 * uP into RESET. This can be useful if the firmware is hung or even
2970 * missing ... We'll have to take the risk of putting the uP into
2971 * RESET without the cooperation of firmware in that case.
2972 *
2973 * We also force the firmware's HALT flag to be on in case we bypassed
2974 * the firmware RESET command above or we're dealing with old firmware
2975 * which doesn't have the HALT capability. This will serve as a flag
2976 * for the incoming firmware to know that it's coming out of a HALT
2977 * rather than a RESET ... if it's new enough to understand that ...
2978 */
2979 if (ret == 0 || force) {
2980 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2981 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2982 FW_PCIE_FW_HALT);
2983 }
2984
2985 /*
2986 * And we always return the result of the firmware RESET command
2987 * even when we force the uP into RESET ...
2988 */
2989 return ret;
2990}
2991
2992/**
2993 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2994 * @adap: the adapter
2995 * @reset: if we want to do a RESET to restart things
2996 *
2997 * Restart firmware previously halted by t4_fw_halt(). On successful
2998 * return the previous PF Master remains as the new PF Master and there
2999 * is no need to issue a new HELLO command, etc.
3000 *
3001 * We do this in two ways:
3002 *
3003 * 1. If we're dealing with newer firmware we'll simply want to take
3004 * the chip's microprocessor out of RESET. This will cause the
3005 * firmware to start up from its start vector. And then we'll loop
3006 * until the firmware indicates it's started again (PCIE_FW.HALT
3007 * reset to 0) or we timeout.
3008 *
3009 * 2. If we're dealing with older firmware then we'll need to RESET
3010 * the chip since older firmware won't recognize the PCIE_FW.HALT
3011 * flag and automatically RESET itself on startup.
3012 */
de5b8677 3013static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
26f7cbc0
VP
3014{
3015 if (reset) {
3016 /*
3017 * Since we're directing the RESET instead of the firmware
3018 * doing it automatically, we need to clear the PCIE_FW.HALT
3019 * bit.
3020 */
3021 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
3022
3023 /*
3024 * If we've been given a valid mailbox, first try to get the
3025 * firmware to do the RESET. If that works, great and we can
3026 * return success. Otherwise, if we haven't been given a
3027 * valid mailbox or the RESET command failed, fall back to
3028 * hitting the chip with a hammer.
3029 */
3030 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
3031 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3032 msleep(100);
3033 if (t4_fw_reset(adap, mbox,
3034 PIORST | PIORSTMODE) == 0)
3035 return 0;
3036 }
3037
3038 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
3039 msleep(2000);
3040 } else {
3041 int ms;
3042
3043 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3044 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3045 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
3046 return 0;
3047 msleep(100);
3048 ms += 100;
3049 }
3050 return -ETIMEDOUT;
3051 }
3052 return 0;
3053}
3054
3055/**
3056 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3057 * @adap: the adapter
3058 * @mbox: mailbox to use for the FW RESET command (if desired)
3059 * @fw_data: the firmware image to write
3060 * @size: image size
3061 * @force: force upgrade even if firmware doesn't cooperate
3062 *
3063 * Perform all of the steps necessary for upgrading an adapter's
3064 * firmware image. Normally this requires the cooperation of the
3065 * existing firmware in order to halt all existing activities
3066 * but if an invalid mailbox token is passed in we skip that step
3067 * (though we'll still put the adapter microprocessor into RESET in
3068 * that case).
3069 *
3070 * On successful return the new firmware will have been loaded and
3071 * the adapter will have been fully RESET losing all previous setup
3072 * state. On unsuccessful return the adapter may be completely hosed ...
3073 * positive errno indicates that the adapter is ~probably~ intact, a
3074 * negative errno indicates that things are looking bad ...
3075 */
de5b8677 3076static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3077 const u8 *fw_data, unsigned int size, int force)
26f7cbc0
VP
3078{
3079 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3080 int reset, ret;
3081
3082 ret = t4_fw_halt(adap, mbox, force);
3083 if (ret < 0 && !force)
3084 return ret;
3085
3086 ret = t4_load_fw(adap, fw_data, size);
3087 if (ret < 0)
3088 return ret;
3089
3090 /*
3091 * Older versions of the firmware don't understand the new
3092 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3093 * restart. So for newly loaded older firmware we'll have to do the
3094 * RESET for it so it starts up on a clean slate. We can tell if
3095 * the newly loaded firmware will handle this right by checking
3096 * its header flags to see if it advertises the capability.
3097 */
3098 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3099 return t4_fw_restart(adap, mbox, reset);
3100}
3101
636f9d37
VP
3102/**
3103 * t4_fixup_host_params - fix up host-dependent parameters
3104 * @adap: the adapter
3105 * @page_size: the host's Base Page Size
3106 * @cache_line_size: the host's Cache Line Size
3107 *
3108 * Various registers in T4 contain values which are dependent on the
3109 * host's Base Page and Cache Line Sizes. This function will fix all of
3110 * those registers with the appropriate values as passed in ...
3111 */
3112int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3113 unsigned int cache_line_size)
3114{
3115 unsigned int page_shift = fls(page_size) - 1;
3116 unsigned int sge_hps = page_shift - 10;
3117 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3118 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3119 unsigned int fl_align_log = fls(fl_align) - 1;
3120
3121 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
3122 HOSTPAGESIZEPF0(sge_hps) |
3123 HOSTPAGESIZEPF1(sge_hps) |
3124 HOSTPAGESIZEPF2(sge_hps) |
3125 HOSTPAGESIZEPF3(sge_hps) |
3126 HOSTPAGESIZEPF4(sge_hps) |
3127 HOSTPAGESIZEPF5(sge_hps) |
3128 HOSTPAGESIZEPF6(sge_hps) |
3129 HOSTPAGESIZEPF7(sge_hps));
3130
3131 t4_set_reg_field(adap, SGE_CONTROL,
0dad9e94 3132 INGPADBOUNDARY_MASK |
636f9d37
VP
3133 EGRSTATUSPAGESIZE_MASK,
3134 INGPADBOUNDARY(fl_align_log - 5) |
3135 EGRSTATUSPAGESIZE(stat_len != 64));
3136
3137 /*
3138 * Adjust various SGE Free List Host Buffer Sizes.
3139 *
3140 * This is something of a crock since we're using fixed indices into
3141 * the array which are also known by the sge.c code and the T4
3142 * Firmware Configuration File. We need to come up with a much better
3143 * approach to managing this array. For now, the first four entries
3144 * are:
3145 *
3146 * 0: Host Page Size
3147 * 1: 64KB
3148 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3149 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3150 *
3151 * For the single-MTU buffers in unpacked mode we need to include
3152 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3153 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3154 * Padding boundry. All of these are accommodated in the Factory
3155 * Default Firmware Configuration File but we need to adjust it for
3156 * this host's cache line size.
3157 */
3158 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
3159 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
3160 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
3161 & ~(fl_align-1));
3162 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
3163 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
3164 & ~(fl_align-1));
3165
3166 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3167
3168 return 0;
3169}
3170
3171/**
3172 * t4_fw_initialize - ask FW to initialize the device
3173 * @adap: the adapter
3174 * @mbox: mailbox to use for the FW command
3175 *
3176 * Issues a command to FW to partially initialize the device. This
3177 * performs initialization that generally doesn't depend on user input.
3178 */
3179int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3180{
3181 struct fw_initialize_cmd c;
3182
3183 memset(&c, 0, sizeof(c));
3184 INIT_CMD(c, INITIALIZE, WRITE);
3185 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3186}
3187
56d36be4
DM
3188/**
3189 * t4_query_params - query FW or device parameters
3190 * @adap: the adapter
3191 * @mbox: mailbox to use for the FW command
3192 * @pf: the PF
3193 * @vf: the VF
3194 * @nparams: the number of parameters
3195 * @params: the parameter names
3196 * @val: the parameter values
3197 *
3198 * Reads the value of FW or device parameters. Up to 7 parameters can be
3199 * queried at once.
3200 */
3201int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3202 unsigned int vf, unsigned int nparams, const u32 *params,
3203 u32 *val)
3204{
3205 int i, ret;
3206 struct fw_params_cmd c;
3207 __be32 *p = &c.param[0].mnem;
3208
3209 if (nparams > 7)
3210 return -EINVAL;
3211
3212 memset(&c, 0, sizeof(c));
3213 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3214 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
3215 FW_PARAMS_CMD_VFN(vf));
3216 c.retval_len16 = htonl(FW_LEN16(c));
3217 for (i = 0; i < nparams; i++, p += 2)
3218 *p = htonl(*params++);
3219
3220 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3221 if (ret == 0)
3222 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3223 *val++ = ntohl(*p);
3224 return ret;
3225}
3226
688848b1
AB
3227/**
3228 * t4_set_params_nosleep - sets FW or device parameters
3229 * @adap: the adapter
3230 * @mbox: mailbox to use for the FW command
3231 * @pf: the PF
3232 * @vf: the VF
3233 * @nparams: the number of parameters
3234 * @params: the parameter names
3235 * @val: the parameter values
3236 *
3237 * Does not ever sleep
3238 * Sets the value of FW or device parameters. Up to 7 parameters can be
3239 * specified at once.
3240 */
3241int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3242 unsigned int pf, unsigned int vf,
3243 unsigned int nparams, const u32 *params,
3244 const u32 *val)
3245{
3246 struct fw_params_cmd c;
3247 __be32 *p = &c.param[0].mnem;
3248
3249 if (nparams > 7)
3250 return -EINVAL;
3251
3252 memset(&c, 0, sizeof(c));
3253 c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
3254 FW_CMD_REQUEST | FW_CMD_WRITE |
3255 FW_PARAMS_CMD_PFN(pf) |
3256 FW_PARAMS_CMD_VFN(vf));
3257 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3258
3259 while (nparams--) {
3260 *p++ = cpu_to_be32(*params++);
3261 *p++ = cpu_to_be32(*val++);
3262 }
3263
3264 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3265}
3266
56d36be4
DM
3267/**
3268 * t4_set_params - sets FW or device parameters
3269 * @adap: the adapter
3270 * @mbox: mailbox to use for the FW command
3271 * @pf: the PF
3272 * @vf: the VF
3273 * @nparams: the number of parameters
3274 * @params: the parameter names
3275 * @val: the parameter values
3276 *
3277 * Sets the value of FW or device parameters. Up to 7 parameters can be
3278 * specified at once.
3279 */
3280int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3281 unsigned int vf, unsigned int nparams, const u32 *params,
3282 const u32 *val)
3283{
3284 struct fw_params_cmd c;
3285 __be32 *p = &c.param[0].mnem;
3286
3287 if (nparams > 7)
3288 return -EINVAL;
3289
3290 memset(&c, 0, sizeof(c));
3291 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3292 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
3293 FW_PARAMS_CMD_VFN(vf));
3294 c.retval_len16 = htonl(FW_LEN16(c));
3295 while (nparams--) {
3296 *p++ = htonl(*params++);
3297 *p++ = htonl(*val++);
3298 }
3299
3300 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3301}
3302
3303/**
3304 * t4_cfg_pfvf - configure PF/VF resource limits
3305 * @adap: the adapter
3306 * @mbox: mailbox to use for the FW command
3307 * @pf: the PF being configured
3308 * @vf: the VF being configured
3309 * @txq: the max number of egress queues
3310 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
3311 * @rxqi: the max number of interrupt-capable ingress queues
3312 * @rxq: the max number of interruptless ingress queues
3313 * @tc: the PCI traffic class
3314 * @vi: the max number of virtual interfaces
3315 * @cmask: the channel access rights mask for the PF/VF
3316 * @pmask: the port access rights mask for the PF/VF
3317 * @nexact: the maximum number of exact MPS filters
3318 * @rcaps: read capabilities
3319 * @wxcaps: write/execute capabilities
3320 *
3321 * Configures resource limits and capabilities for a physical or virtual
3322 * function.
3323 */
3324int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3325 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3326 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3327 unsigned int vi, unsigned int cmask, unsigned int pmask,
3328 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3329{
3330 struct fw_pfvf_cmd c;
3331
3332 memset(&c, 0, sizeof(c));
3333 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
3334 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
3335 FW_PFVF_CMD_VFN(vf));
3336 c.retval_len16 = htonl(FW_LEN16(c));
3337 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
3338 FW_PFVF_CMD_NIQ(rxq));
81323b74 3339 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
56d36be4
DM
3340 FW_PFVF_CMD_PMASK(pmask) |
3341 FW_PFVF_CMD_NEQ(txq));
3342 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
3343 FW_PFVF_CMD_NEXACTF(nexact));
3344 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3345 FW_PFVF_CMD_WX_CAPS(wxcaps) |
3346 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3347 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3348}
3349
3350/**
3351 * t4_alloc_vi - allocate a virtual interface
3352 * @adap: the adapter
3353 * @mbox: mailbox to use for the FW command
3354 * @port: physical port associated with the VI
3355 * @pf: the PF owning the VI
3356 * @vf: the VF owning the VI
3357 * @nmac: number of MAC addresses needed (1 to 5)
3358 * @mac: the MAC addresses of the VI
3359 * @rss_size: size of RSS table slice associated with this VI
3360 *
3361 * Allocates a virtual interface for the given physical port. If @mac is
3362 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3363 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3364 * stored consecutively so the space needed is @nmac * 6 bytes.
3365 * Returns a negative error number or the non-negative VI id.
3366 */
3367int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3368 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3369 unsigned int *rss_size)
3370{
3371 int ret;
3372 struct fw_vi_cmd c;
3373
3374 memset(&c, 0, sizeof(c));
3375 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3376 FW_CMD_WRITE | FW_CMD_EXEC |
3377 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3378 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3379 c.portid_pkd = FW_VI_CMD_PORTID(port);
3380 c.nmac = nmac - 1;
3381
3382 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3383 if (ret)
3384 return ret;
3385
3386 if (mac) {
3387 memcpy(mac, c.mac, sizeof(c.mac));
3388 switch (nmac) {
3389 case 5:
3390 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3391 case 4:
3392 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3393 case 3:
3394 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3395 case 2:
3396 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3397 }
3398 }
3399 if (rss_size)
3400 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
a0881cab 3401 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
56d36be4
DM
3402}
3403
56d36be4
DM
3404/**
3405 * t4_set_rxmode - set Rx properties of a virtual interface
3406 * @adap: the adapter
3407 * @mbox: mailbox to use for the FW command
3408 * @viid: the VI id
3409 * @mtu: the new MTU or -1
3410 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3411 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3412 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 3413 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
3414 * @sleep_ok: if true we may sleep while awaiting command completion
3415 *
3416 * Sets Rx properties of a virtual interface.
3417 */
3418int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
3419 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3420 bool sleep_ok)
56d36be4
DM
3421{
3422 struct fw_vi_rxmode_cmd c;
3423
3424 /* convert to FW values */
3425 if (mtu < 0)
3426 mtu = FW_RXMODE_MTU_NO_CHG;
3427 if (promisc < 0)
3428 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3429 if (all_multi < 0)
3430 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3431 if (bcast < 0)
3432 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
f8f5aafa
DM
3433 if (vlanex < 0)
3434 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
56d36be4
DM
3435
3436 memset(&c, 0, sizeof(c));
3437 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3438 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3439 c.retval_len16 = htonl(FW_LEN16(c));
f8f5aafa
DM
3440 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3441 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3442 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3443 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3444 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
56d36be4
DM
3445 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3446}
3447
3448/**
3449 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3450 * @adap: the adapter
3451 * @mbox: mailbox to use for the FW command
3452 * @viid: the VI id
3453 * @free: if true any existing filters for this VI id are first removed
3454 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3455 * @addr: the MAC address(es)
3456 * @idx: where to store the index of each allocated filter
3457 * @hash: pointer to hash address filter bitmap
3458 * @sleep_ok: call is allowed to sleep
3459 *
3460 * Allocates an exact-match filter for each of the supplied addresses and
3461 * sets it to the corresponding address. If @idx is not %NULL it should
3462 * have at least @naddr entries, each of which will be set to the index of
3463 * the filter allocated for the corresponding MAC address. If a filter
3464 * could not be allocated for an address its index is set to 0xffff.
3465 * If @hash is not %NULL addresses that fail to allocate an exact filter
3466 * are hashed and update the hash filter bitmap pointed at by @hash.
3467 *
3468 * Returns a negative error number or the number of filters allocated.
3469 */
3470int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3471 unsigned int viid, bool free, unsigned int naddr,
3472 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3473{
3474 int i, ret;
3475 struct fw_vi_mac_cmd c;
3476 struct fw_vi_mac_exact *p;
d14807dd 3477 unsigned int max_naddr = is_t4(adap->params.chip) ?
0a57a536
SR
3478 NUM_MPS_CLS_SRAM_L_INSTANCES :
3479 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
56d36be4
DM
3480
3481 if (naddr > 7)
3482 return -EINVAL;
3483
3484 memset(&c, 0, sizeof(c));
3485 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3486 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3487 FW_VI_MAC_CMD_VIID(viid));
3488 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3489 FW_CMD_LEN16((naddr + 2) / 2));
3490
3491 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3492 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3493 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3494 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3495 }
3496
3497 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3498 if (ret)
3499 return ret;
3500
3501 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3502 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3503
3504 if (idx)
0a57a536
SR
3505 idx[i] = index >= max_naddr ? 0xffff : index;
3506 if (index < max_naddr)
56d36be4
DM
3507 ret++;
3508 else if (hash)
ce9aeb58 3509 *hash |= (1ULL << hash_mac_addr(addr[i]));
56d36be4
DM
3510 }
3511 return ret;
3512}
3513
3514/**
3515 * t4_change_mac - modifies the exact-match filter for a MAC address
3516 * @adap: the adapter
3517 * @mbox: mailbox to use for the FW command
3518 * @viid: the VI id
3519 * @idx: index of existing filter for old value of MAC address, or -1
3520 * @addr: the new MAC address value
3521 * @persist: whether a new MAC allocation should be persistent
3522 * @add_smt: if true also add the address to the HW SMT
3523 *
3524 * Modifies an exact-match filter and sets it to the new MAC address.
3525 * Note that in general it is not possible to modify the value of a given
3526 * filter so the generic way to modify an address filter is to free the one
3527 * being used by the old address value and allocate a new filter for the
3528 * new address value. @idx can be -1 if the address is a new addition.
3529 *
3530 * Returns a negative error number or the index of the filter with the new
3531 * MAC value.
3532 */
3533int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3534 int idx, const u8 *addr, bool persist, bool add_smt)
3535{
3536 int ret, mode;
3537 struct fw_vi_mac_cmd c;
3538 struct fw_vi_mac_exact *p = c.u.exact;
d14807dd 3539 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
0a57a536
SR
3540 NUM_MPS_CLS_SRAM_L_INSTANCES :
3541 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
56d36be4
DM
3542
3543 if (idx < 0) /* new allocation */
3544 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3545 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3546
3547 memset(&c, 0, sizeof(c));
3548 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3549 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3550 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3551 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3552 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3553 FW_VI_MAC_CMD_IDX(idx));
3554 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3555
3556 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3557 if (ret == 0) {
3558 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
0a57a536 3559 if (ret >= max_mac_addr)
56d36be4
DM
3560 ret = -ENOMEM;
3561 }
3562 return ret;
3563}
3564
3565/**
3566 * t4_set_addr_hash - program the MAC inexact-match hash filter
3567 * @adap: the adapter
3568 * @mbox: mailbox to use for the FW command
3569 * @viid: the VI id
3570 * @ucast: whether the hash filter should also match unicast addresses
3571 * @vec: the value to be written to the hash filter
3572 * @sleep_ok: call is allowed to sleep
3573 *
3574 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3575 */
3576int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3577 bool ucast, u64 vec, bool sleep_ok)
3578{
3579 struct fw_vi_mac_cmd c;
3580
3581 memset(&c, 0, sizeof(c));
3582 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3583 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3584 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3585 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3586 FW_CMD_LEN16(1));
3587 c.u.hash.hashvec = cpu_to_be64(vec);
3588 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3589}
3590
688848b1
AB
3591/**
3592 * t4_enable_vi_params - enable/disable a virtual interface
3593 * @adap: the adapter
3594 * @mbox: mailbox to use for the FW command
3595 * @viid: the VI id
3596 * @rx_en: 1=enable Rx, 0=disable Rx
3597 * @tx_en: 1=enable Tx, 0=disable Tx
3598 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3599 *
3600 * Enables/disables a virtual interface. Note that setting DCB Enable
3601 * only makes sense when enabling a Virtual Interface ...
3602 */
3603int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3604 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3605{
3606 struct fw_vi_enable_cmd c;
3607
3608 memset(&c, 0, sizeof(c));
3609 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3610 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3611
3612 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3613 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
3614 FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
30f00847 3615 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
688848b1
AB
3616}
3617
56d36be4
DM
3618/**
3619 * t4_enable_vi - enable/disable a virtual interface
3620 * @adap: the adapter
3621 * @mbox: mailbox to use for the FW command
3622 * @viid: the VI id
3623 * @rx_en: 1=enable Rx, 0=disable Rx
3624 * @tx_en: 1=enable Tx, 0=disable Tx
3625 *
3626 * Enables/disables a virtual interface.
3627 */
3628int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3629 bool rx_en, bool tx_en)
3630{
688848b1 3631 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
56d36be4
DM
3632}
3633
3634/**
3635 * t4_identify_port - identify a VI's port by blinking its LED
3636 * @adap: the adapter
3637 * @mbox: mailbox to use for the FW command
3638 * @viid: the VI id
3639 * @nblinks: how many times to blink LED at 2.5 Hz
3640 *
3641 * Identifies a VI's port by blinking its LED.
3642 */
3643int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3644 unsigned int nblinks)
3645{
3646 struct fw_vi_enable_cmd c;
3647
0062b15c 3648 memset(&c, 0, sizeof(c));
56d36be4
DM
3649 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3650 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3651 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3652 c.blinkdur = htons(nblinks);
3653 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
56d36be4
DM
3654}
3655
3656/**
3657 * t4_iq_free - free an ingress queue and its FLs
3658 * @adap: the adapter
3659 * @mbox: mailbox to use for the FW command
3660 * @pf: the PF owning the queues
3661 * @vf: the VF owning the queues
3662 * @iqtype: the ingress queue type
3663 * @iqid: ingress queue id
3664 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3665 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3666 *
3667 * Frees an ingress queue and its associated FLs, if any.
3668 */
3669int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3670 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3671 unsigned int fl0id, unsigned int fl1id)
3672{
3673 struct fw_iq_cmd c;
3674
3675 memset(&c, 0, sizeof(c));
3676 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3677 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3678 FW_IQ_CMD_VFN(vf));
3679 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3680 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3681 c.iqid = htons(iqid);
3682 c.fl0id = htons(fl0id);
3683 c.fl1id = htons(fl1id);
3684 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3685}
3686
3687/**
3688 * t4_eth_eq_free - free an Ethernet egress queue
3689 * @adap: the adapter
3690 * @mbox: mailbox to use for the FW command
3691 * @pf: the PF owning the queue
3692 * @vf: the VF owning the queue
3693 * @eqid: egress queue id
3694 *
3695 * Frees an Ethernet egress queue.
3696 */
3697int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3698 unsigned int vf, unsigned int eqid)
3699{
3700 struct fw_eq_eth_cmd c;
3701
3702 memset(&c, 0, sizeof(c));
3703 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3704 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3705 FW_EQ_ETH_CMD_VFN(vf));
3706 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3707 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3708 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3709}
3710
3711/**
3712 * t4_ctrl_eq_free - free a control egress queue
3713 * @adap: the adapter
3714 * @mbox: mailbox to use for the FW command
3715 * @pf: the PF owning the queue
3716 * @vf: the VF owning the queue
3717 * @eqid: egress queue id
3718 *
3719 * Frees a control egress queue.
3720 */
3721int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3722 unsigned int vf, unsigned int eqid)
3723{
3724 struct fw_eq_ctrl_cmd c;
3725
3726 memset(&c, 0, sizeof(c));
3727 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3728 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3729 FW_EQ_CTRL_CMD_VFN(vf));
3730 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3731 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3732 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3733}
3734
3735/**
3736 * t4_ofld_eq_free - free an offload egress queue
3737 * @adap: the adapter
3738 * @mbox: mailbox to use for the FW command
3739 * @pf: the PF owning the queue
3740 * @vf: the VF owning the queue
3741 * @eqid: egress queue id
3742 *
3743 * Frees a control egress queue.
3744 */
3745int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3746 unsigned int vf, unsigned int eqid)
3747{
3748 struct fw_eq_ofld_cmd c;
3749
3750 memset(&c, 0, sizeof(c));
3751 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3752 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3753 FW_EQ_OFLD_CMD_VFN(vf));
3754 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3755 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3756 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3757}
3758
3759/**
3760 * t4_handle_fw_rpl - process a FW reply message
3761 * @adap: the adapter
3762 * @rpl: start of the FW message
3763 *
3764 * Processes a FW message, such as link state change messages.
3765 */
3766int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3767{
3768 u8 opcode = *(const u8 *)rpl;
3769
3770 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3771 int speed = 0, fc = 0;
3772 const struct fw_port_cmd *p = (void *)rpl;
3773 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3774 int port = adap->chan_map[chan];
3775 struct port_info *pi = adap2pinfo(adap, port);
3776 struct link_config *lc = &pi->link_cfg;
3777 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3778 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3779 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3780
3781 if (stat & FW_PORT_CMD_RXPAUSE)
3782 fc |= PAUSE_RX;
3783 if (stat & FW_PORT_CMD_TXPAUSE)
3784 fc |= PAUSE_TX;
3785 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
e8b39015 3786 speed = 100;
56d36be4 3787 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
e8b39015 3788 speed = 1000;
56d36be4 3789 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
e8b39015 3790 speed = 10000;
72aca4bf 3791 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
e8b39015 3792 speed = 40000;
56d36be4
DM
3793
3794 if (link_ok != lc->link_ok || speed != lc->speed ||
3795 fc != lc->fc) { /* something changed */
3796 lc->link_ok = link_ok;
3797 lc->speed = speed;
3798 lc->fc = fc;
444018a7 3799 lc->supported = be16_to_cpu(p->u.info.pcap);
56d36be4
DM
3800 t4_os_link_changed(adap, port, link_ok);
3801 }
3802 if (mod != pi->mod_type) {
3803 pi->mod_type = mod;
3804 t4_os_portmod_changed(adap, port);
3805 }
3806 }
3807 return 0;
3808}
3809
1dd06ae8 3810static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
56d36be4
DM
3811{
3812 u16 val;
56d36be4 3813
e5c8ae5f
JL
3814 if (pci_is_pcie(adapter->pdev)) {
3815 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
56d36be4
DM
3816 p->speed = val & PCI_EXP_LNKSTA_CLS;
3817 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3818 }
3819}
3820
3821/**
3822 * init_link_config - initialize a link's SW state
3823 * @lc: structure holding the link state
3824 * @caps: link capabilities
3825 *
3826 * Initializes the SW state maintained for each link, including the link's
3827 * capabilities and default speed/flow-control/autonegotiation settings.
3828 */
1dd06ae8 3829static void init_link_config(struct link_config *lc, unsigned int caps)
56d36be4
DM
3830{
3831 lc->supported = caps;
3832 lc->requested_speed = 0;
3833 lc->speed = 0;
3834 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3835 if (lc->supported & FW_PORT_CAP_ANEG) {
3836 lc->advertising = lc->supported & ADVERT_MASK;
3837 lc->autoneg = AUTONEG_ENABLE;
3838 lc->requested_fc |= PAUSE_AUTONEG;
3839 } else {
3840 lc->advertising = 0;
3841 lc->autoneg = AUTONEG_DISABLE;
3842 }
3843}
3844
204dc3c0 3845int t4_wait_dev_ready(struct adapter *adap)
56d36be4
DM
3846{
3847 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3848 return 0;
3849 msleep(500);
3850 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3851}
3852
91744948 3853static int get_flash_params(struct adapter *adap)
900a6596
DM
3854{
3855 int ret;
3856 u32 info;
3857
3858 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3859 if (!ret)
3860 ret = sf1_read(adap, 3, 0, 1, &info);
3861 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3862 if (ret)
3863 return ret;
3864
3865 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3866 return -EINVAL;
3867 info >>= 16; /* log2 of size */
3868 if (info >= 0x14 && info < 0x18)
3869 adap->params.sf_nsec = 1 << (info - 16);
3870 else if (info == 0x18)
3871 adap->params.sf_nsec = 64;
3872 else
3873 return -EINVAL;
3874 adap->params.sf_size = 1 << info;
3875 adap->params.sf_fw_start =
3876 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3877 return 0;
3878}
3879
56d36be4
DM
3880/**
3881 * t4_prep_adapter - prepare SW and HW for operation
3882 * @adapter: the adapter
3883 * @reset: if true perform a HW reset
3884 *
3885 * Initialize adapter SW state for the various HW modules, set initial
3886 * values for some adapter tunables, take PHYs out of reset, and
3887 * initialize the MDIO interface.
3888 */
91744948 3889int t4_prep_adapter(struct adapter *adapter)
56d36be4 3890{
0a57a536
SR
3891 int ret, ver;
3892 uint16_t device_id;
d14807dd 3893 u32 pl_rev;
56d36be4 3894
204dc3c0 3895 ret = t4_wait_dev_ready(adapter);
56d36be4
DM
3896 if (ret < 0)
3897 return ret;
3898
3899 get_pci_mode(adapter, &adapter->params.pci);
d14807dd 3900 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
56d36be4 3901
900a6596
DM
3902 ret = get_flash_params(adapter);
3903 if (ret < 0) {
3904 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3905 return ret;
3906 }
3907
0a57a536
SR
3908 /* Retrieve adapter's device ID
3909 */
3910 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3911 ver = device_id >> 12;
d14807dd 3912 adapter->params.chip = 0;
0a57a536
SR
3913 switch (ver) {
3914 case CHELSIO_T4:
d14807dd 3915 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
0a57a536
SR
3916 break;
3917 case CHELSIO_T5:
d14807dd 3918 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
0a57a536
SR
3919 break;
3920 default:
3921 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3922 device_id);
3923 return -EINVAL;
3924 }
3925
56d36be4
DM
3926 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3927
3928 /*
3929 * Default port for debugging in case we can't reach FW.
3930 */
3931 adapter->params.nports = 1;
3932 adapter->params.portvec = 1;
636f9d37 3933 adapter->params.vpd.cclk = 50000;
56d36be4
DM
3934 return 0;
3935}
3936
dcf7b6f5
KS
3937/**
3938 * t4_init_tp_params - initialize adap->params.tp
3939 * @adap: the adapter
3940 *
3941 * Initialize various fields of the adapter's TP Parameters structure.
3942 */
3943int t4_init_tp_params(struct adapter *adap)
3944{
3945 int chan;
3946 u32 v;
3947
3948 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3949 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3950 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3951
3952 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3953 for (chan = 0; chan < NCHAN; chan++)
3954 adap->params.tp.tx_modq[chan] = chan;
3955
3956 /* Cache the adapter's Compressed Filter Mode and global Incress
3957 * Configuration.
3958 */
3959 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3960 &adap->params.tp.vlan_pri_map, 1,
3961 TP_VLAN_PRI_MAP);
3962 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3963 &adap->params.tp.ingress_config, 1,
3964 TP_INGRESS_CONFIG);
3965
3966 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3967 * shift positions of several elements of the Compressed Filter Tuple
3968 * for this adapter which we need frequently ...
3969 */
3970 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3971 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3972 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3973 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3974 F_PROTOCOL);
3975
3976 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3977 * represents the presense of an Outer VLAN instead of a VNIC ID.
3978 */
3979 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3980 adap->params.tp.vnic_shift = -1;
3981
3982 return 0;
3983}
3984
3985/**
3986 * t4_filter_field_shift - calculate filter field shift
3987 * @adap: the adapter
3988 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3989 *
3990 * Return the shift position of a filter field within the Compressed
3991 * Filter Tuple. The filter field is specified via its selection bit
3992 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
3993 */
3994int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
3995{
3996 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3997 unsigned int sel;
3998 int field_shift;
3999
4000 if ((filter_mode & filter_sel) == 0)
4001 return -1;
4002
4003 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4004 switch (filter_mode & sel) {
4005 case F_FCOE:
4006 field_shift += W_FT_FCOE;
4007 break;
4008 case F_PORT:
4009 field_shift += W_FT_PORT;
4010 break;
4011 case F_VNIC_ID:
4012 field_shift += W_FT_VNIC_ID;
4013 break;
4014 case F_VLAN:
4015 field_shift += W_FT_VLAN;
4016 break;
4017 case F_TOS:
4018 field_shift += W_FT_TOS;
4019 break;
4020 case F_PROTOCOL:
4021 field_shift += W_FT_PROTOCOL;
4022 break;
4023 case F_ETHERTYPE:
4024 field_shift += W_FT_ETHERTYPE;
4025 break;
4026 case F_MACMATCH:
4027 field_shift += W_FT_MACMATCH;
4028 break;
4029 case F_MPSHITTYPE:
4030 field_shift += W_FT_MPSHITTYPE;
4031 break;
4032 case F_FRAGMENTATION:
4033 field_shift += W_FT_FRAGMENTATION;
4034 break;
4035 }
4036 }
4037 return field_shift;
4038}
4039
91744948 4040int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
56d36be4
DM
4041{
4042 u8 addr[6];
4043 int ret, i, j = 0;
4044 struct fw_port_cmd c;
f796564a 4045 struct fw_rss_vi_config_cmd rvc;
56d36be4
DM
4046
4047 memset(&c, 0, sizeof(c));
f796564a 4048 memset(&rvc, 0, sizeof(rvc));
56d36be4
DM
4049
4050 for_each_port(adap, i) {
4051 unsigned int rss_size;
4052 struct port_info *p = adap2pinfo(adap, i);
4053
4054 while ((adap->params.portvec & (1 << j)) == 0)
4055 j++;
4056
4057 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
4058 FW_CMD_REQUEST | FW_CMD_READ |
4059 FW_PORT_CMD_PORTID(j));
4060 c.action_to_len16 = htonl(
4061 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4062 FW_LEN16(c));
4063 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4064 if (ret)
4065 return ret;
4066
4067 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4068 if (ret < 0)
4069 return ret;
4070
4071 p->viid = ret;
4072 p->tx_chan = j;
4073 p->lport = j;
4074 p->rss_size = rss_size;
4075 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
40c9f8ab 4076 adap->port[i]->dev_port = j;
56d36be4
DM
4077
4078 ret = ntohl(c.u.info.lstatus_to_modtype);
4079 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
4080 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
4081 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
a0881cab 4082 p->mod_type = FW_PORT_MOD_TYPE_NA;
56d36be4 4083
f796564a
DM
4084 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4085 FW_CMD_REQUEST | FW_CMD_READ |
4086 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4087 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4088 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4089 if (ret)
4090 return ret;
4091 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4092
56d36be4
DM
4093 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4094 j++;
4095 }
4096 return 0;
4097}