RDMA/cxgb4: Cleanup Filter related macros/register defines
[linux-2.6-block.git] / drivers / net / ethernet / chelsio / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
ce100b8b 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
56d36be4
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
56d36be4
DM
35#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
38#include "t4fw_api.h"
39
40/**
41 * t4_wait_op_done_val - wait until an operation is completed
42 * @adapter: the adapter performing the operation
43 * @reg: the register to check for completion
44 * @mask: a single-bit field within @reg that indicates completion
45 * @polarity: the value of the field when the operation is completed
46 * @attempts: number of check iterations
47 * @delay: delay in usecs between iterations
48 * @valp: where to store the value of the register at completion time
49 *
50 * Wait until an operation is completed by checking a bit in a register
51 * up to @attempts times. If @valp is not NULL the value of the register
52 * at the time it indicated completion is stored there. Returns 0 if the
53 * operation completes and -EAGAIN otherwise.
54 */
de498c89
RD
55static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
57{
58 while (1) {
59 u32 val = t4_read_reg(adapter, reg);
60
61 if (!!(val & mask) == polarity) {
62 if (valp)
63 *valp = val;
64 return 0;
65 }
66 if (--attempts == 0)
67 return -EAGAIN;
68 if (delay)
69 udelay(delay);
70 }
71}
72
73static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
74 int polarity, int attempts, int delay)
75{
76 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
77 delay, NULL);
78}
79
80/**
81 * t4_set_reg_field - set a register field to a value
82 * @adapter: the adapter to program
83 * @addr: the register address
84 * @mask: specifies the portion of the register to modify
85 * @val: the new value for the register field
86 *
87 * Sets a register field specified by the supplied mask to the
88 * given value.
89 */
90void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
91 u32 val)
92{
93 u32 v = t4_read_reg(adapter, addr) & ~mask;
94
95 t4_write_reg(adapter, addr, v | val);
96 (void) t4_read_reg(adapter, addr); /* flush */
97}
98
99/**
100 * t4_read_indirect - read indirectly addressed registers
101 * @adap: the adapter
102 * @addr_reg: register holding the indirect address
103 * @data_reg: register holding the value of the indirect register
104 * @vals: where the read register values are stored
105 * @nregs: how many indirect registers to read
106 * @start_idx: index of first indirect register to read
107 *
108 * Reads registers that are accessed indirectly through an address/data
109 * register pair.
110 */
f2b7e78d 111void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
de498c89
RD
112 unsigned int data_reg, u32 *vals,
113 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
114{
115 while (nregs--) {
116 t4_write_reg(adap, addr_reg, start_idx);
117 *vals++ = t4_read_reg(adap, data_reg);
118 start_idx++;
119 }
120}
121
13ee15d3
VP
122/**
123 * t4_write_indirect - write indirectly addressed registers
124 * @adap: the adapter
125 * @addr_reg: register holding the indirect addresses
126 * @data_reg: register holding the value for the indirect registers
127 * @vals: values to write
128 * @nregs: how many indirect registers to write
129 * @start_idx: address of first indirect register to write
130 *
131 * Writes a sequential block of registers that are accessed indirectly
132 * through an address/data register pair.
133 */
134void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
135 unsigned int data_reg, const u32 *vals,
136 unsigned int nregs, unsigned int start_idx)
137{
138 while (nregs--) {
139 t4_write_reg(adap, addr_reg, start_idx++);
140 t4_write_reg(adap, data_reg, *vals++);
141 }
142}
143
0abfd152
HS
144/*
145 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
146 * mechanism. This guarantees that we get the real value even if we're
147 * operating within a Virtual Machine and the Hypervisor is trapping our
148 * Configuration Space accesses.
149 */
150void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
151{
152 u32 req = ENABLE | FUNCTION(adap->fn) | reg;
153
154 if (is_t4(adap->params.chip))
155 req |= F_LOCALCFG;
156
157 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
158 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
159
160 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
161 * Configuration Space read. (None of the other fields matter when
162 * ENABLE is 0 so a simple register write is easier than a
163 * read-modify-write via t4_set_reg_field().)
164 */
165 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
166}
167
31d55c2d
HS
168/*
169 * t4_report_fw_error - report firmware error
170 * @adap: the adapter
171 *
172 * The adapter firmware can indicate error conditions to the host.
173 * If the firmware has indicated an error, print out the reason for
174 * the firmware error.
175 */
176static void t4_report_fw_error(struct adapter *adap)
177{
178 static const char *const reason[] = {
179 "Crash", /* PCIE_FW_EVAL_CRASH */
180 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
181 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
182 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
183 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
184 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
185 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
186 "Reserved", /* reserved */
187 };
188 u32 pcie_fw;
189
190 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
191 if (pcie_fw & FW_PCIE_FW_ERR)
192 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
193 reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
194}
195
56d36be4
DM
196/*
197 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
198 */
199static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
200 u32 mbox_addr)
201{
202 for ( ; nflit; nflit--, mbox_addr += 8)
203 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
204}
205
206/*
207 * Handle a FW assertion reported in a mailbox.
208 */
209static void fw_asrt(struct adapter *adap, u32 mbox_addr)
210{
211 struct fw_debug_cmd asrt;
212
213 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
214 dev_alert(adap->pdev_dev,
215 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
216 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
217 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
218}
219
220static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
221{
222 dev_err(adap->pdev_dev,
223 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
224 (unsigned long long)t4_read_reg64(adap, data_reg),
225 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
226 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
227 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
228 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
229 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
230 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
231 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
232}
233
234/**
235 * t4_wr_mbox_meat - send a command to FW through the given mailbox
236 * @adap: the adapter
237 * @mbox: index of the mailbox to use
238 * @cmd: the command to write
239 * @size: command length in bytes
240 * @rpl: where to optionally store the reply
241 * @sleep_ok: if true we may sleep while awaiting command completion
242 *
243 * Sends the given command to FW through the selected mailbox and waits
244 * for the FW to execute the command. If @rpl is not %NULL it is used to
245 * store the FW's reply to the command. The command and its optional
246 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
247 * to respond. @sleep_ok determines whether we may sleep while awaiting
248 * the response. If sleeping is allowed we use progressive backoff
249 * otherwise we spin.
250 *
251 * The return value is 0 on success or a negative errno on failure. A
252 * failure can happen either because we are not able to execute the
253 * command or FW executes it but signals an error. In the latter case
254 * the return value is the error code indicated by FW (negated).
255 */
256int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
257 void *rpl, bool sleep_ok)
258{
005b5717 259 static const int delay[] = {
56d36be4
DM
260 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
261 };
262
263 u32 v;
264 u64 res;
265 int i, ms, delay_idx;
266 const __be64 *p = cmd;
267 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
268 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
269
270 if ((size & 15) || size > MBOX_LEN)
271 return -EINVAL;
272
204dc3c0
DM
273 /*
274 * If the device is off-line, as in EEH, commands will time out.
275 * Fail them early so we don't waste time waiting.
276 */
277 if (adap->pdev->error_state != pci_channel_io_normal)
278 return -EIO;
279
56d36be4
DM
280 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
281 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
282 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
283
284 if (v != MBOX_OWNER_DRV)
285 return v ? -EBUSY : -ETIMEDOUT;
286
287 for (i = 0; i < size; i += 8)
288 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
289
290 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
291 t4_read_reg(adap, ctl_reg); /* flush write */
292
293 delay_idx = 0;
294 ms = delay[0];
295
296 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
297 if (sleep_ok) {
298 ms = delay[delay_idx]; /* last element may repeat */
299 if (delay_idx < ARRAY_SIZE(delay) - 1)
300 delay_idx++;
301 msleep(ms);
302 } else
303 mdelay(ms);
304
305 v = t4_read_reg(adap, ctl_reg);
306 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
307 if (!(v & MBMSGVALID)) {
308 t4_write_reg(adap, ctl_reg, 0);
309 continue;
310 }
311
312 res = t4_read_reg64(adap, data_reg);
e2ac9628 313 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
56d36be4 314 fw_asrt(adap, data_reg);
e2ac9628
HS
315 res = FW_CMD_RETVAL_V(EIO);
316 } else if (rpl) {
56d36be4 317 get_mbox_rpl(adap, rpl, size / 8, data_reg);
e2ac9628 318 }
56d36be4 319
e2ac9628 320 if (FW_CMD_RETVAL_G((int)res))
56d36be4
DM
321 dump_mbox(adap, mbox, data_reg);
322 t4_write_reg(adap, ctl_reg, 0);
e2ac9628 323 return -FW_CMD_RETVAL_G((int)res);
56d36be4
DM
324 }
325 }
326
327 dump_mbox(adap, mbox, data_reg);
328 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
329 *(const u8 *)cmd, mbox);
31d55c2d 330 t4_report_fw_error(adap);
56d36be4
DM
331 return -ETIMEDOUT;
332}
333
334/**
335 * t4_mc_read - read from MC through backdoor accesses
336 * @adap: the adapter
337 * @addr: address of first byte requested
19dd37ba 338 * @idx: which MC to access
56d36be4
DM
339 * @data: 64 bytes of data containing the requested address
340 * @ecc: where to store the corresponding 64-bit ECC word
341 *
342 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
343 * that covers the requested address @addr. If @parity is not %NULL it
344 * is assigned the 64-bit ECC word for the read data.
345 */
19dd37ba 346int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
56d36be4
DM
347{
348 int i;
19dd37ba
SR
349 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
350 u32 mc_bist_status_rdata, mc_bist_data_pattern;
56d36be4 351
d14807dd 352 if (is_t4(adap->params.chip)) {
19dd37ba
SR
353 mc_bist_cmd = MC_BIST_CMD;
354 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
355 mc_bist_cmd_len = MC_BIST_CMD_LEN;
356 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
357 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
358 } else {
359 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
360 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
361 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
362 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
363 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
364 }
365
366 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
56d36be4 367 return -EBUSY;
19dd37ba
SR
368 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
369 t4_write_reg(adap, mc_bist_cmd_len, 64);
370 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
371 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
56d36be4 372 BIST_CMD_GAP(1));
19dd37ba 373 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
56d36be4
DM
374 if (i)
375 return i;
376
19dd37ba 377#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
56d36be4
DM
378
379 for (i = 15; i >= 0; i--)
380 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
381 if (ecc)
382 *ecc = t4_read_reg64(adap, MC_DATA(16));
383#undef MC_DATA
384 return 0;
385}
386
387/**
388 * t4_edc_read - read from EDC through backdoor accesses
389 * @adap: the adapter
390 * @idx: which EDC to access
391 * @addr: address of first byte requested
392 * @data: 64 bytes of data containing the requested address
393 * @ecc: where to store the corresponding 64-bit ECC word
394 *
395 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
396 * that covers the requested address @addr. If @parity is not %NULL it
397 * is assigned the 64-bit ECC word for the read data.
398 */
399int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
400{
401 int i;
19dd37ba
SR
402 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
403 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
56d36be4 404
d14807dd 405 if (is_t4(adap->params.chip)) {
19dd37ba
SR
406 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
407 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
408 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
409 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
410 idx);
411 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
412 idx);
413 } else {
414 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
415 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
416 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
417 edc_bist_cmd_data_pattern =
418 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
419 edc_bist_status_rdata =
420 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
421 }
422
423 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
56d36be4 424 return -EBUSY;
19dd37ba
SR
425 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
426 t4_write_reg(adap, edc_bist_cmd_len, 64);
427 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
428 t4_write_reg(adap, edc_bist_cmd,
56d36be4 429 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
19dd37ba 430 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
56d36be4
DM
431 if (i)
432 return i;
433
19dd37ba 434#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
56d36be4
DM
435
436 for (i = 15; i >= 0; i--)
437 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
438 if (ecc)
439 *ecc = t4_read_reg64(adap, EDC_DATA(16));
440#undef EDC_DATA
441 return 0;
442}
443
5afc8b84
VP
444/**
445 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
446 * @adap: the adapter
fc5ab020 447 * @win: PCI-E Memory Window to use
5afc8b84
VP
448 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
449 * @addr: address within indicated memory type
450 * @len: amount of memory to transfer
451 * @buf: host memory buffer
fc5ab020 452 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5afc8b84
VP
453 *
454 * Reads/writes an [almost] arbitrary memory region in the firmware: the
fc5ab020
HS
455 * firmware memory address and host buffer must be aligned on 32-bit
456 * boudaries; the length may be arbitrary. The memory is transferred as
457 * a raw byte sequence from/to the firmware's memory. If this memory
458 * contains data structures which contain multi-byte integers, it's the
459 * caller's responsibility to perform appropriate byte order conversions.
5afc8b84 460 */
fc5ab020
HS
461int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
462 u32 len, __be32 *buf, int dir)
5afc8b84 463{
fc5ab020
HS
464 u32 pos, offset, resid, memoffset;
465 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
5afc8b84 466
fc5ab020 467 /* Argument sanity checks ...
5afc8b84 468 */
fc5ab020 469 if (addr & 0x3)
5afc8b84
VP
470 return -EINVAL;
471
fc5ab020
HS
472 /* It's convenient to be able to handle lengths which aren't a
473 * multiple of 32-bits because we often end up transferring files to
474 * the firmware. So we'll handle that by normalizing the length here
475 * and then handling any residual transfer at the end.
476 */
477 resid = len & 0x3;
478 len -= resid;
8c357ebd 479
19dd37ba 480 /* Offset into the region of memory which is being accessed
5afc8b84
VP
481 * MEM_EDC0 = 0
482 * MEM_EDC1 = 1
19dd37ba
SR
483 * MEM_MC = 2 -- T4
484 * MEM_MC0 = 2 -- For T5
485 * MEM_MC1 = 3 -- For T5
5afc8b84 486 */
6559a7e8 487 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
19dd37ba
SR
488 if (mtype != MEM_MC1)
489 memoffset = (mtype * (edc_size * 1024 * 1024));
490 else {
6559a7e8
HS
491 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
492 MA_EXT_MEMORY1_BAR_A));
19dd37ba
SR
493 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
494 }
5afc8b84
VP
495
496 /* Determine the PCIE_MEM_ACCESS_OFFSET */
497 addr = addr + memoffset;
498
fc5ab020
HS
499 /* Each PCI-E Memory Window is programmed with a window size -- or
500 * "aperture" -- which controls the granularity of its mapping onto
501 * adapter memory. We need to grab that aperture in order to know
502 * how to use the specified window. The window is also programmed
503 * with the base address of the Memory Window in BAR0's address
504 * space. For T4 this is an absolute PCI-E Bus Address. For T5
505 * the address is relative to BAR0.
5afc8b84 506 */
fc5ab020
HS
507 mem_reg = t4_read_reg(adap,
508 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
509 win));
510 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
511 mem_base = GET_PCIEOFST(mem_reg) << 10;
512 if (is_t4(adap->params.chip))
513 mem_base -= adap->t4_bar0;
514 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
5afc8b84 515
fc5ab020
HS
516 /* Calculate our initial PCI-E Memory Window Position and Offset into
517 * that Window.
518 */
519 pos = addr & ~(mem_aperture-1);
520 offset = addr - pos;
5afc8b84 521
fc5ab020
HS
522 /* Set up initial PCI-E Memory Window to cover the start of our
523 * transfer. (Read it back to ensure that changes propagate before we
524 * attempt to use the new value.)
525 */
526 t4_write_reg(adap,
527 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
528 pos | win_pf);
529 t4_read_reg(adap,
530 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
531
532 /* Transfer data to/from the adapter as long as there's an integral
533 * number of 32-bit transfers to complete.
534 */
535 while (len > 0) {
536 if (dir == T4_MEMORY_READ)
537 *buf++ = (__force __be32) t4_read_reg(adap,
538 mem_base + offset);
539 else
540 t4_write_reg(adap, mem_base + offset,
541 (__force u32) *buf++);
542 offset += sizeof(__be32);
543 len -= sizeof(__be32);
544
545 /* If we've reached the end of our current window aperture,
546 * move the PCI-E Memory Window on to the next. Note that
547 * doing this here after "len" may be 0 allows us to set up
548 * the PCI-E Memory Window for a possible final residual
549 * transfer below ...
5afc8b84 550 */
fc5ab020
HS
551 if (offset == mem_aperture) {
552 pos += mem_aperture;
553 offset = 0;
554 t4_write_reg(adap,
555 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
556 win), pos | win_pf);
557 t4_read_reg(adap,
558 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
559 win));
5afc8b84 560 }
5afc8b84
VP
561 }
562
fc5ab020
HS
563 /* If the original transfer had a length which wasn't a multiple of
564 * 32-bits, now's where we need to finish off the transfer of the
565 * residual amount. The PCI-E Memory Window has already been moved
566 * above (if necessary) to cover this final transfer.
567 */
568 if (resid) {
569 union {
570 __be32 word;
571 char byte[4];
572 } last;
573 unsigned char *bp;
574 int i;
575
c81576c2 576 if (dir == T4_MEMORY_READ) {
fc5ab020
HS
577 last.word = (__force __be32) t4_read_reg(adap,
578 mem_base + offset);
579 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
580 bp[i] = last.byte[i];
581 } else {
582 last.word = *buf;
583 for (i = resid; i < 4; i++)
584 last.byte[i] = 0;
585 t4_write_reg(adap, mem_base + offset,
586 (__force u32) last.word);
587 }
588 }
5afc8b84 589
fc5ab020 590 return 0;
5afc8b84
VP
591}
592
56d36be4 593#define EEPROM_STAT_ADDR 0x7bfc
47ce9c48
SR
594#define VPD_BASE 0x400
595#define VPD_BASE_OLD 0
0a57a536 596#define VPD_LEN 1024
63a92fe6 597#define CHELSIO_VPD_UNIQUE_ID 0x82
56d36be4
DM
598
599/**
600 * t4_seeprom_wp - enable/disable EEPROM write protection
601 * @adapter: the adapter
602 * @enable: whether to enable or disable write protection
603 *
604 * Enables or disables write protection on the serial EEPROM.
605 */
606int t4_seeprom_wp(struct adapter *adapter, bool enable)
607{
608 unsigned int v = enable ? 0xc : 0;
609 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
610 return ret < 0 ? ret : 0;
611}
612
613/**
614 * get_vpd_params - read VPD parameters from VPD EEPROM
615 * @adapter: adapter to read
616 * @p: where to store the parameters
617 *
618 * Reads card parameters stored in VPD EEPROM.
619 */
636f9d37 620int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
56d36be4 621{
636f9d37 622 u32 cclk_param, cclk_val;
47ce9c48 623 int i, ret, addr;
a94cd705 624 int ec, sn, pn;
8c357ebd 625 u8 *vpd, csum;
23d88e1d 626 unsigned int vpdr_len, kw_offset, id_len;
56d36be4 627
8c357ebd
VP
628 vpd = vmalloc(VPD_LEN);
629 if (!vpd)
630 return -ENOMEM;
631
47ce9c48
SR
632 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
633 if (ret < 0)
634 goto out;
63a92fe6
HS
635
636 /* The VPD shall have a unique identifier specified by the PCI SIG.
637 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
638 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
639 * is expected to automatically put this entry at the
640 * beginning of the VPD.
641 */
642 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
47ce9c48
SR
643
644 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
56d36be4 645 if (ret < 0)
8c357ebd 646 goto out;
56d36be4 647
23d88e1d
DM
648 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
649 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
8c357ebd
VP
650 ret = -EINVAL;
651 goto out;
23d88e1d
DM
652 }
653
654 id_len = pci_vpd_lrdt_size(vpd);
655 if (id_len > ID_LEN)
656 id_len = ID_LEN;
657
658 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
659 if (i < 0) {
660 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
8c357ebd
VP
661 ret = -EINVAL;
662 goto out;
23d88e1d
DM
663 }
664
665 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
666 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
667 if (vpdr_len + kw_offset > VPD_LEN) {
226ec5fd 668 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
8c357ebd
VP
669 ret = -EINVAL;
670 goto out;
226ec5fd
DM
671 }
672
673#define FIND_VPD_KW(var, name) do { \
23d88e1d 674 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
226ec5fd
DM
675 if (var < 0) { \
676 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
8c357ebd
VP
677 ret = -EINVAL; \
678 goto out; \
226ec5fd
DM
679 } \
680 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
681} while (0)
682
683 FIND_VPD_KW(i, "RV");
684 for (csum = 0; i >= 0; i--)
685 csum += vpd[i];
56d36be4
DM
686
687 if (csum) {
688 dev_err(adapter->pdev_dev,
689 "corrupted VPD EEPROM, actual csum %u\n", csum);
8c357ebd
VP
690 ret = -EINVAL;
691 goto out;
56d36be4
DM
692 }
693
226ec5fd
DM
694 FIND_VPD_KW(ec, "EC");
695 FIND_VPD_KW(sn, "SN");
a94cd705 696 FIND_VPD_KW(pn, "PN");
226ec5fd
DM
697#undef FIND_VPD_KW
698
23d88e1d 699 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
56d36be4 700 strim(p->id);
226ec5fd 701 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 702 strim(p->ec);
226ec5fd
DM
703 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
704 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4 705 strim(p->sn);
63a92fe6 706 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
a94cd705
KS
707 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
708 strim(p->pn);
636f9d37
VP
709
710 /*
711 * Ask firmware for the Core Clock since it knows how to translate the
712 * Reference Clock ('V2') VPD field into a Core Clock value ...
713 */
714 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
715 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
716 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
717 1, &cclk_param, &cclk_val);
8c357ebd
VP
718
719out:
720 vfree(vpd);
636f9d37
VP
721 if (ret)
722 return ret;
723 p->cclk = cclk_val;
724
56d36be4
DM
725 return 0;
726}
727
728/* serial flash and firmware constants */
729enum {
730 SF_ATTEMPTS = 10, /* max retries for SF operations */
731
732 /* flash command opcodes */
733 SF_PROG_PAGE = 2, /* program page */
734 SF_WR_DISABLE = 4, /* disable writes */
735 SF_RD_STATUS = 5, /* read status register */
736 SF_WR_ENABLE = 6, /* enable writes */
737 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 738 SF_RD_ID = 0x9f, /* read ID */
56d36be4
DM
739 SF_ERASE_SECTOR = 0xd8, /* erase sector */
740
6f1d7210 741 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
56d36be4
DM
742};
743
744/**
745 * sf1_read - read data from the serial flash
746 * @adapter: the adapter
747 * @byte_cnt: number of bytes to read
748 * @cont: whether another operation will be chained
749 * @lock: whether to lock SF for PL access only
750 * @valp: where to store the read data
751 *
752 * Reads up to 4 bytes of data from the serial flash. The location of
753 * the read needs to be specified prior to calling this by issuing the
754 * appropriate commands to the serial flash.
755 */
756static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
757 int lock, u32 *valp)
758{
759 int ret;
760
761 if (!byte_cnt || byte_cnt > 4)
762 return -EINVAL;
ce91a923 763 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
56d36be4
DM
764 return -EBUSY;
765 cont = cont ? SF_CONT : 0;
766 lock = lock ? SF_LOCK : 0;
767 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
ce91a923 768 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
56d36be4
DM
769 if (!ret)
770 *valp = t4_read_reg(adapter, SF_DATA);
771 return ret;
772}
773
774/**
775 * sf1_write - write data to the serial flash
776 * @adapter: the adapter
777 * @byte_cnt: number of bytes to write
778 * @cont: whether another operation will be chained
779 * @lock: whether to lock SF for PL access only
780 * @val: value to write
781 *
782 * Writes up to 4 bytes of data to the serial flash. The location of
783 * the write needs to be specified prior to calling this by issuing the
784 * appropriate commands to the serial flash.
785 */
786static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
787 int lock, u32 val)
788{
789 if (!byte_cnt || byte_cnt > 4)
790 return -EINVAL;
ce91a923 791 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
56d36be4
DM
792 return -EBUSY;
793 cont = cont ? SF_CONT : 0;
794 lock = lock ? SF_LOCK : 0;
795 t4_write_reg(adapter, SF_DATA, val);
796 t4_write_reg(adapter, SF_OP, lock |
797 cont | BYTECNT(byte_cnt - 1) | OP_WR);
ce91a923 798 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
56d36be4
DM
799}
800
801/**
802 * flash_wait_op - wait for a flash operation to complete
803 * @adapter: the adapter
804 * @attempts: max number of polls of the status register
805 * @delay: delay between polls in ms
806 *
807 * Wait for a flash operation to complete by polling the status register.
808 */
809static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
810{
811 int ret;
812 u32 status;
813
814 while (1) {
815 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
816 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
817 return ret;
818 if (!(status & 1))
819 return 0;
820 if (--attempts == 0)
821 return -EAGAIN;
822 if (delay)
823 msleep(delay);
824 }
825}
826
827/**
828 * t4_read_flash - read words from serial flash
829 * @adapter: the adapter
830 * @addr: the start address for the read
831 * @nwords: how many 32-bit words to read
832 * @data: where to store the read data
833 * @byte_oriented: whether to store data as bytes or as words
834 *
835 * Read the specified number of 32-bit words from the serial flash.
836 * If @byte_oriented is set the read data is stored as a byte array
837 * (i.e., big-endian), otherwise as 32-bit words in the platform's
838 * natural endianess.
839 */
de498c89
RD
840static int t4_read_flash(struct adapter *adapter, unsigned int addr,
841 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
842{
843 int ret;
844
900a6596 845 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
846 return -EINVAL;
847
848 addr = swab32(addr) | SF_RD_DATA_FAST;
849
850 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
851 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
852 return ret;
853
854 for ( ; nwords; nwords--, data++) {
855 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
856 if (nwords == 1)
857 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
858 if (ret)
859 return ret;
860 if (byte_oriented)
404d9e3f 861 *data = (__force __u32) (htonl(*data));
56d36be4
DM
862 }
863 return 0;
864}
865
866/**
867 * t4_write_flash - write up to a page of data to the serial flash
868 * @adapter: the adapter
869 * @addr: the start address to write
870 * @n: length of data to write in bytes
871 * @data: the data to write
872 *
873 * Writes up to a page of data (256 bytes) to the serial flash starting
874 * at the given address. All the data must be written to the same page.
875 */
876static int t4_write_flash(struct adapter *adapter, unsigned int addr,
877 unsigned int n, const u8 *data)
878{
879 int ret;
880 u32 buf[64];
881 unsigned int i, c, left, val, offset = addr & 0xff;
882
900a6596 883 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
884 return -EINVAL;
885
886 val = swab32(addr) | SF_PROG_PAGE;
887
888 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
889 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
890 goto unlock;
891
892 for (left = n; left; left -= c) {
893 c = min(left, 4U);
894 for (val = 0, i = 0; i < c; ++i)
895 val = (val << 8) + *data++;
896
897 ret = sf1_write(adapter, c, c != left, 1, val);
898 if (ret)
899 goto unlock;
900 }
900a6596 901 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
902 if (ret)
903 goto unlock;
904
905 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
906
907 /* Read the page to verify the write succeeded */
908 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
909 if (ret)
910 return ret;
911
912 if (memcmp(data - n, (u8 *)buf + offset, n)) {
913 dev_err(adapter->pdev_dev,
914 "failed to correctly write the flash page at %#x\n",
915 addr);
916 return -EIO;
917 }
918 return 0;
919
920unlock:
921 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
922 return ret;
923}
924
925/**
16e47624 926 * t4_get_fw_version - read the firmware version
56d36be4
DM
927 * @adapter: the adapter
928 * @vers: where to place the version
929 *
930 * Reads the FW version from flash.
931 */
16e47624 932int t4_get_fw_version(struct adapter *adapter, u32 *vers)
56d36be4 933{
16e47624
HS
934 return t4_read_flash(adapter, FLASH_FW_START +
935 offsetof(struct fw_hdr, fw_ver), 1,
936 vers, 0);
56d36be4
DM
937}
938
939/**
16e47624 940 * t4_get_tp_version - read the TP microcode version
56d36be4
DM
941 * @adapter: the adapter
942 * @vers: where to place the version
943 *
944 * Reads the TP microcode version from flash.
945 */
16e47624 946int t4_get_tp_version(struct adapter *adapter, u32 *vers)
56d36be4 947{
16e47624 948 return t4_read_flash(adapter, FLASH_FW_START +
900a6596 949 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
950 1, vers, 0);
951}
952
16e47624
HS
953/* Is the given firmware API compatible with the one the driver was compiled
954 * with?
56d36be4 955 */
16e47624 956static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
56d36be4 957{
56d36be4 958
16e47624
HS
959 /* short circuit if it's the exact same firmware version */
960 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
961 return 1;
56d36be4 962
16e47624
HS
963#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
964 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
965 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
966 return 1;
967#undef SAME_INTF
0a57a536 968
16e47624
HS
969 return 0;
970}
56d36be4 971
16e47624
HS
972/* The firmware in the filesystem is usable, but should it be installed?
973 * This routine explains itself in detail if it indicates the filesystem
974 * firmware should be installed.
975 */
976static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
977 int k, int c)
978{
979 const char *reason;
980
981 if (!card_fw_usable) {
982 reason = "incompatible or unusable";
983 goto install;
e69972f5
JH
984 }
985
16e47624
HS
986 if (k > c) {
987 reason = "older than the version supported with this driver";
988 goto install;
56d36be4
DM
989 }
990
16e47624
HS
991 return 0;
992
993install:
994 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
995 "installing firmware %u.%u.%u.%u on card.\n",
996 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
997 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
998 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
999 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
56d36be4 1000
56d36be4
DM
1001 return 1;
1002}
1003
16e47624
HS
1004int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1005 const u8 *fw_data, unsigned int fw_size,
1006 struct fw_hdr *card_fw, enum dev_state state,
1007 int *reset)
1008{
1009 int ret, card_fw_usable, fs_fw_usable;
1010 const struct fw_hdr *fs_fw;
1011 const struct fw_hdr *drv_fw;
1012
1013 drv_fw = &fw_info->fw_hdr;
1014
1015 /* Read the header of the firmware on the card */
1016 ret = -t4_read_flash(adap, FLASH_FW_START,
1017 sizeof(*card_fw) / sizeof(uint32_t),
1018 (uint32_t *)card_fw, 1);
1019 if (ret == 0) {
1020 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
1021 } else {
1022 dev_err(adap->pdev_dev,
1023 "Unable to read card's firmware header: %d\n", ret);
1024 card_fw_usable = 0;
1025 }
1026
1027 if (fw_data != NULL) {
1028 fs_fw = (const void *)fw_data;
1029 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1030 } else {
1031 fs_fw = NULL;
1032 fs_fw_usable = 0;
1033 }
1034
1035 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1036 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1037 /* Common case: the firmware on the card is an exact match and
1038 * the filesystem one is an exact match too, or the filesystem
1039 * one is absent/incompatible.
1040 */
1041 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1042 should_install_fs_fw(adap, card_fw_usable,
1043 be32_to_cpu(fs_fw->fw_ver),
1044 be32_to_cpu(card_fw->fw_ver))) {
1045 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1046 fw_size, 0);
1047 if (ret != 0) {
1048 dev_err(adap->pdev_dev,
1049 "failed to install firmware: %d\n", ret);
1050 goto bye;
1051 }
1052
1053 /* Installed successfully, update the cached header too. */
1054 memcpy(card_fw, fs_fw, sizeof(*card_fw));
1055 card_fw_usable = 1;
1056 *reset = 0; /* already reset as part of load_fw */
1057 }
1058
1059 if (!card_fw_usable) {
1060 uint32_t d, c, k;
1061
1062 d = be32_to_cpu(drv_fw->fw_ver);
1063 c = be32_to_cpu(card_fw->fw_ver);
1064 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1065
1066 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1067 "chip state %d, "
1068 "driver compiled with %d.%d.%d.%d, "
1069 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1070 state,
1071 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1072 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1073 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1074 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1075 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1076 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1077 ret = EINVAL;
1078 goto bye;
1079 }
1080
1081 /* We're using whatever's on the card and it's known to be good. */
1082 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1083 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1084
1085bye:
1086 return ret;
1087}
1088
56d36be4
DM
1089/**
1090 * t4_flash_erase_sectors - erase a range of flash sectors
1091 * @adapter: the adapter
1092 * @start: the first sector to erase
1093 * @end: the last sector to erase
1094 *
1095 * Erases the sectors in the given inclusive range.
1096 */
1097static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1098{
1099 int ret = 0;
1100
c0d5b8cf
HS
1101 if (end >= adapter->params.sf_nsec)
1102 return -EINVAL;
1103
56d36be4
DM
1104 while (start <= end) {
1105 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1106 (ret = sf1_write(adapter, 4, 0, 1,
1107 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 1108 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
1109 dev_err(adapter->pdev_dev,
1110 "erase of flash sector %d failed, error %d\n",
1111 start, ret);
1112 break;
1113 }
1114 start++;
1115 }
1116 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
1117 return ret;
1118}
1119
636f9d37
VP
1120/**
1121 * t4_flash_cfg_addr - return the address of the flash configuration file
1122 * @adapter: the adapter
1123 *
1124 * Return the address within the flash where the Firmware Configuration
1125 * File is stored.
1126 */
1127unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1128{
1129 if (adapter->params.sf_size == 0x100000)
1130 return FLASH_FPGA_CFG_START;
1131 else
1132 return FLASH_CFG_START;
1133}
1134
56d36be4
DM
1135/**
1136 * t4_load_fw - download firmware
1137 * @adap: the adapter
1138 * @fw_data: the firmware image to write
1139 * @size: image size
1140 *
1141 * Write the supplied firmware image to the card's serial flash.
1142 */
1143int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1144{
1145 u32 csum;
1146 int ret, addr;
1147 unsigned int i;
1148 u8 first_page[SF_PAGE_SIZE];
404d9e3f 1149 const __be32 *p = (const __be32 *)fw_data;
56d36be4 1150 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596
DM
1151 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1152 unsigned int fw_img_start = adap->params.sf_fw_start;
1153 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
56d36be4
DM
1154
1155 if (!size) {
1156 dev_err(adap->pdev_dev, "FW image has no data\n");
1157 return -EINVAL;
1158 }
1159 if (size & 511) {
1160 dev_err(adap->pdev_dev,
1161 "FW image size not multiple of 512 bytes\n");
1162 return -EINVAL;
1163 }
1164 if (ntohs(hdr->len512) * 512 != size) {
1165 dev_err(adap->pdev_dev,
1166 "FW image size differs from size in FW header\n");
1167 return -EINVAL;
1168 }
1169 if (size > FW_MAX_SIZE) {
1170 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1171 FW_MAX_SIZE);
1172 return -EFBIG;
1173 }
1174
1175 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1176 csum += ntohl(p[i]);
1177
1178 if (csum != 0xffffffff) {
1179 dev_err(adap->pdev_dev,
1180 "corrupted firmware image, checksum %#x\n", csum);
1181 return -EINVAL;
1182 }
1183
900a6596
DM
1184 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1185 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
1186 if (ret)
1187 goto out;
1188
1189 /*
1190 * We write the correct version at the end so the driver can see a bad
1191 * version if the FW write fails. Start by writing a copy of the
1192 * first page with a bad version.
1193 */
1194 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1195 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
900a6596 1196 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
1197 if (ret)
1198 goto out;
1199
900a6596 1200 addr = fw_img_start;
56d36be4
DM
1201 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1202 addr += SF_PAGE_SIZE;
1203 fw_data += SF_PAGE_SIZE;
1204 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1205 if (ret)
1206 goto out;
1207 }
1208
1209 ret = t4_write_flash(adap,
900a6596 1210 fw_img_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
1211 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1212out:
1213 if (ret)
1214 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1215 ret);
1216 return ret;
1217}
1218
1219#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
72aca4bf
KS
1220 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1221 FW_PORT_CAP_ANEG)
56d36be4
DM
1222
1223/**
1224 * t4_link_start - apply link configuration to MAC/PHY
1225 * @phy: the PHY to setup
1226 * @mac: the MAC to setup
1227 * @lc: the requested link configuration
1228 *
1229 * Set up a port's MAC and PHY according to a desired link configuration.
1230 * - If the PHY can auto-negotiate first decide what to advertise, then
1231 * enable/disable auto-negotiation as desired, and reset.
1232 * - If the PHY does not auto-negotiate just reset it.
1233 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1234 * otherwise do it later based on the outcome of auto-negotiation.
1235 */
1236int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1237 struct link_config *lc)
1238{
1239 struct fw_port_cmd c;
1240 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1241
1242 lc->link_ok = 0;
1243 if (lc->requested_fc & PAUSE_RX)
1244 fc |= FW_PORT_CAP_FC_RX;
1245 if (lc->requested_fc & PAUSE_TX)
1246 fc |= FW_PORT_CAP_FC_TX;
1247
1248 memset(&c, 0, sizeof(c));
e2ac9628
HS
1249 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
1250 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID(port));
56d36be4
DM
1251 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1252 FW_LEN16(c));
1253
1254 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1255 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1256 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1257 } else if (lc->autoneg == AUTONEG_DISABLE) {
1258 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1259 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1260 } else
1261 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1262
1263 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1264}
1265
1266/**
1267 * t4_restart_aneg - restart autonegotiation
1268 * @adap: the adapter
1269 * @mbox: mbox to use for the FW command
1270 * @port: the port id
1271 *
1272 * Restarts autonegotiation for the selected port.
1273 */
1274int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1275{
1276 struct fw_port_cmd c;
1277
1278 memset(&c, 0, sizeof(c));
e2ac9628
HS
1279 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
1280 FW_CMD_EXEC_F | FW_PORT_CMD_PORTID(port));
56d36be4
DM
1281 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1282 FW_LEN16(c));
1283 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1284 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1285}
1286
8caa1e84
VP
1287typedef void (*int_handler_t)(struct adapter *adap);
1288
56d36be4
DM
1289struct intr_info {
1290 unsigned int mask; /* bits to check in interrupt status */
1291 const char *msg; /* message to print or NULL */
1292 short stat_idx; /* stat counter to increment or -1 */
1293 unsigned short fatal; /* whether the condition reported is fatal */
8caa1e84 1294 int_handler_t int_handler; /* platform-specific int handler */
56d36be4
DM
1295};
1296
1297/**
1298 * t4_handle_intr_status - table driven interrupt handler
1299 * @adapter: the adapter that generated the interrupt
1300 * @reg: the interrupt status register to process
1301 * @acts: table of interrupt actions
1302 *
1303 * A table driven interrupt handler that applies a set of masks to an
1304 * interrupt status word and performs the corresponding actions if the
25985edc 1305 * interrupts described by the mask have occurred. The actions include
56d36be4
DM
1306 * optionally emitting a warning or alert message. The table is terminated
1307 * by an entry specifying mask 0. Returns the number of fatal interrupt
1308 * conditions.
1309 */
1310static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1311 const struct intr_info *acts)
1312{
1313 int fatal = 0;
1314 unsigned int mask = 0;
1315 unsigned int status = t4_read_reg(adapter, reg);
1316
1317 for ( ; acts->mask; ++acts) {
1318 if (!(status & acts->mask))
1319 continue;
1320 if (acts->fatal) {
1321 fatal++;
1322 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1323 status & acts->mask);
1324 } else if (acts->msg && printk_ratelimit())
1325 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1326 status & acts->mask);
8caa1e84
VP
1327 if (acts->int_handler)
1328 acts->int_handler(adapter);
56d36be4
DM
1329 mask |= acts->mask;
1330 }
1331 status &= mask;
1332 if (status) /* clear processed interrupts */
1333 t4_write_reg(adapter, reg, status);
1334 return fatal;
1335}
1336
1337/*
1338 * Interrupt handler for the PCIE module.
1339 */
1340static void pcie_intr_handler(struct adapter *adapter)
1341{
005b5717 1342 static const struct intr_info sysbus_intr_info[] = {
56d36be4
DM
1343 { RNPP, "RXNP array parity error", -1, 1 },
1344 { RPCP, "RXPC array parity error", -1, 1 },
1345 { RCIP, "RXCIF array parity error", -1, 1 },
1346 { RCCP, "Rx completions control array parity error", -1, 1 },
1347 { RFTP, "RXFT array parity error", -1, 1 },
1348 { 0 }
1349 };
005b5717 1350 static const struct intr_info pcie_port_intr_info[] = {
56d36be4
DM
1351 { TPCP, "TXPC array parity error", -1, 1 },
1352 { TNPP, "TXNP array parity error", -1, 1 },
1353 { TFTP, "TXFT array parity error", -1, 1 },
1354 { TCAP, "TXCA array parity error", -1, 1 },
1355 { TCIP, "TXCIF array parity error", -1, 1 },
1356 { RCAP, "RXCA array parity error", -1, 1 },
1357 { OTDD, "outbound request TLP discarded", -1, 1 },
1358 { RDPE, "Rx data parity error", -1, 1 },
1359 { TDUE, "Tx uncorrectable data error", -1, 1 },
1360 { 0 }
1361 };
005b5717 1362 static const struct intr_info pcie_intr_info[] = {
56d36be4
DM
1363 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1364 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1365 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1366 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1367 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1368 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1369 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1370 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1371 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1372 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1373 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1374 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1375 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1376 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1377 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1378 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1379 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1380 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1381 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1382 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1383 { FIDPERR, "PCI FID parity error", -1, 1 },
1384 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1385 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1386 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1387 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1388 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1389 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1390 { PCIESINT, "PCI core secondary fault", -1, 1 },
1391 { PCIEPINT, "PCI core primary fault", -1, 1 },
1392 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1393 { 0 }
1394 };
1395
0a57a536
SR
1396 static struct intr_info t5_pcie_intr_info[] = {
1397 { MSTGRPPERR, "Master Response Read Queue parity error",
1398 -1, 1 },
1399 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1400 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1401 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1402 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1403 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1404 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1405 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1406 -1, 1 },
1407 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1408 -1, 1 },
1409 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1410 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1411 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1412 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1413 { DREQWRPERR, "PCI DMA channel write request parity error",
1414 -1, 1 },
1415 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1416 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1417 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1418 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1419 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1420 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1421 { FIDPERR, "PCI FID parity error", -1, 1 },
1422 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1423 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1424 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1425 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1426 -1, 1 },
1427 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1428 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1429 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1430 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1431 { READRSPERR, "Outbound read error", -1, 0 },
1432 { 0 }
1433 };
1434
56d36be4
DM
1435 int fat;
1436
9bb59b96
HS
1437 if (is_t4(adapter->params.chip))
1438 fat = t4_handle_intr_status(adapter,
1439 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1440 sysbus_intr_info) +
1441 t4_handle_intr_status(adapter,
1442 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1443 pcie_port_intr_info) +
1444 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1445 pcie_intr_info);
1446 else
1447 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1448 t5_pcie_intr_info);
0a57a536 1449
56d36be4
DM
1450 if (fat)
1451 t4_fatal_err(adapter);
1452}
1453
1454/*
1455 * TP interrupt handler.
1456 */
1457static void tp_intr_handler(struct adapter *adapter)
1458{
005b5717 1459 static const struct intr_info tp_intr_info[] = {
56d36be4
DM
1460 { 0x3fffffff, "TP parity error", -1, 1 },
1461 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1462 { 0 }
1463 };
1464
1465 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1466 t4_fatal_err(adapter);
1467}
1468
1469/*
1470 * SGE interrupt handler.
1471 */
1472static void sge_intr_handler(struct adapter *adapter)
1473{
1474 u64 v;
1475
005b5717 1476 static const struct intr_info sge_intr_info[] = {
56d36be4
DM
1477 { ERR_CPL_EXCEED_IQE_SIZE,
1478 "SGE received CPL exceeding IQE size", -1, 1 },
1479 { ERR_INVALID_CIDX_INC,
1480 "SGE GTS CIDX increment too large", -1, 0 },
1481 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
840f3000
VP
1482 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1483 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1484 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
56d36be4
DM
1485 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1486 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1487 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1488 0 },
1489 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1490 0 },
1491 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1492 0 },
1493 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1494 0 },
1495 { ERR_ING_CTXT_PRIO,
1496 "SGE too many priority ingress contexts", -1, 0 },
1497 { ERR_EGR_CTXT_PRIO,
1498 "SGE too many priority egress contexts", -1, 0 },
1499 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1500 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1501 { 0 }
1502 };
1503
1504 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
8caa1e84 1505 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
56d36be4
DM
1506 if (v) {
1507 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
8caa1e84 1508 (unsigned long long)v);
56d36be4
DM
1509 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1510 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1511 }
1512
1513 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1514 v != 0)
1515 t4_fatal_err(adapter);
1516}
1517
1518/*
1519 * CIM interrupt handler.
1520 */
1521static void cim_intr_handler(struct adapter *adapter)
1522{
005b5717 1523 static const struct intr_info cim_intr_info[] = {
56d36be4
DM
1524 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1525 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1526 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1527 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1528 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1529 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1530 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1531 { 0 }
1532 };
005b5717 1533 static const struct intr_info cim_upintr_info[] = {
56d36be4
DM
1534 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1535 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1536 { ILLWRINT, "CIM illegal write", -1, 1 },
1537 { ILLRDINT, "CIM illegal read", -1, 1 },
1538 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1539 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1540 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1541 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1542 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1543 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1544 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1545 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1546 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1547 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1548 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1549 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1550 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1551 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1552 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1553 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1554 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1555 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1556 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1557 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1558 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1559 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1560 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1561 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1562 { 0 }
1563 };
1564
1565 int fat;
1566
31d55c2d
HS
1567 if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
1568 t4_report_fw_error(adapter);
1569
56d36be4
DM
1570 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1571 cim_intr_info) +
1572 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1573 cim_upintr_info);
1574 if (fat)
1575 t4_fatal_err(adapter);
1576}
1577
1578/*
1579 * ULP RX interrupt handler.
1580 */
1581static void ulprx_intr_handler(struct adapter *adapter)
1582{
005b5717 1583 static const struct intr_info ulprx_intr_info[] = {
91e9a1ec 1584 { 0x1800000, "ULPRX context error", -1, 1 },
56d36be4
DM
1585 { 0x7fffff, "ULPRX parity error", -1, 1 },
1586 { 0 }
1587 };
1588
1589 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1590 t4_fatal_err(adapter);
1591}
1592
1593/*
1594 * ULP TX interrupt handler.
1595 */
1596static void ulptx_intr_handler(struct adapter *adapter)
1597{
005b5717 1598 static const struct intr_info ulptx_intr_info[] = {
56d36be4
DM
1599 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1600 0 },
1601 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1602 0 },
1603 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1604 0 },
1605 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1606 0 },
1607 { 0xfffffff, "ULPTX parity error", -1, 1 },
1608 { 0 }
1609 };
1610
1611 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1612 t4_fatal_err(adapter);
1613}
1614
1615/*
1616 * PM TX interrupt handler.
1617 */
1618static void pmtx_intr_handler(struct adapter *adapter)
1619{
005b5717 1620 static const struct intr_info pmtx_intr_info[] = {
56d36be4
DM
1621 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1622 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1623 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1624 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1625 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1626 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1627 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1628 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1629 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1630 { 0 }
1631 };
1632
1633 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1634 t4_fatal_err(adapter);
1635}
1636
1637/*
1638 * PM RX interrupt handler.
1639 */
1640static void pmrx_intr_handler(struct adapter *adapter)
1641{
005b5717 1642 static const struct intr_info pmrx_intr_info[] = {
56d36be4
DM
1643 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1644 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1645 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1646 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1647 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1648 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1649 { 0 }
1650 };
1651
1652 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1653 t4_fatal_err(adapter);
1654}
1655
1656/*
1657 * CPL switch interrupt handler.
1658 */
1659static void cplsw_intr_handler(struct adapter *adapter)
1660{
005b5717 1661 static const struct intr_info cplsw_intr_info[] = {
56d36be4
DM
1662 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1663 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1664 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1665 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1666 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1667 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1668 { 0 }
1669 };
1670
1671 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1672 t4_fatal_err(adapter);
1673}
1674
1675/*
1676 * LE interrupt handler.
1677 */
1678static void le_intr_handler(struct adapter *adap)
1679{
005b5717 1680 static const struct intr_info le_intr_info[] = {
56d36be4
DM
1681 { LIPMISS, "LE LIP miss", -1, 0 },
1682 { LIP0, "LE 0 LIP error", -1, 0 },
1683 { PARITYERR, "LE parity error", -1, 1 },
1684 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1685 { REQQPARERR, "LE request queue parity error", -1, 1 },
1686 { 0 }
1687 };
1688
1689 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1690 t4_fatal_err(adap);
1691}
1692
1693/*
1694 * MPS interrupt handler.
1695 */
1696static void mps_intr_handler(struct adapter *adapter)
1697{
005b5717 1698 static const struct intr_info mps_rx_intr_info[] = {
56d36be4
DM
1699 { 0xffffff, "MPS Rx parity error", -1, 1 },
1700 { 0 }
1701 };
005b5717 1702 static const struct intr_info mps_tx_intr_info[] = {
56d36be4
DM
1703 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1704 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1705 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1706 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1707 { BUBBLE, "MPS Tx underflow", -1, 1 },
1708 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1709 { FRMERR, "MPS Tx framing error", -1, 1 },
1710 { 0 }
1711 };
005b5717 1712 static const struct intr_info mps_trc_intr_info[] = {
56d36be4
DM
1713 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1714 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1715 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1716 { 0 }
1717 };
005b5717 1718 static const struct intr_info mps_stat_sram_intr_info[] = {
56d36be4
DM
1719 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1720 { 0 }
1721 };
005b5717 1722 static const struct intr_info mps_stat_tx_intr_info[] = {
56d36be4
DM
1723 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1724 { 0 }
1725 };
005b5717 1726 static const struct intr_info mps_stat_rx_intr_info[] = {
56d36be4
DM
1727 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1728 { 0 }
1729 };
005b5717 1730 static const struct intr_info mps_cls_intr_info[] = {
56d36be4
DM
1731 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1732 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1733 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1734 { 0 }
1735 };
1736
1737 int fat;
1738
1739 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1740 mps_rx_intr_info) +
1741 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1742 mps_tx_intr_info) +
1743 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1744 mps_trc_intr_info) +
1745 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1746 mps_stat_sram_intr_info) +
1747 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1748 mps_stat_tx_intr_info) +
1749 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1750 mps_stat_rx_intr_info) +
1751 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1752 mps_cls_intr_info);
1753
1754 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1755 RXINT | TXINT | STATINT);
1756 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1757 if (fat)
1758 t4_fatal_err(adapter);
1759}
1760
1761#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1762
1763/*
1764 * EDC/MC interrupt handler.
1765 */
1766static void mem_intr_handler(struct adapter *adapter, int idx)
1767{
822dd8a8 1768 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
56d36be4
DM
1769
1770 unsigned int addr, cnt_addr, v;
1771
1772 if (idx <= MEM_EDC1) {
1773 addr = EDC_REG(EDC_INT_CAUSE, idx);
1774 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
822dd8a8
HS
1775 } else if (idx == MEM_MC) {
1776 if (is_t4(adapter->params.chip)) {
1777 addr = MC_INT_CAUSE;
1778 cnt_addr = MC_ECC_STATUS;
1779 } else {
1780 addr = MC_P_INT_CAUSE;
1781 cnt_addr = MC_P_ECC_STATUS;
1782 }
56d36be4 1783 } else {
822dd8a8
HS
1784 addr = MC_REG(MC_P_INT_CAUSE, 1);
1785 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
56d36be4
DM
1786 }
1787
1788 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1789 if (v & PERR_INT_CAUSE)
1790 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1791 name[idx]);
1792 if (v & ECC_CE_INT_CAUSE) {
1793 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1794
1795 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1796 if (printk_ratelimit())
1797 dev_warn(adapter->pdev_dev,
1798 "%u %s correctable ECC data error%s\n",
1799 cnt, name[idx], cnt > 1 ? "s" : "");
1800 }
1801 if (v & ECC_UE_INT_CAUSE)
1802 dev_alert(adapter->pdev_dev,
1803 "%s uncorrectable ECC data error\n", name[idx]);
1804
1805 t4_write_reg(adapter, addr, v);
1806 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1807 t4_fatal_err(adapter);
1808}
1809
1810/*
1811 * MA interrupt handler.
1812 */
1813static void ma_intr_handler(struct adapter *adap)
1814{
1815 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1816
9bb59b96 1817 if (status & MEM_PERR_INT_CAUSE) {
56d36be4
DM
1818 dev_alert(adap->pdev_dev,
1819 "MA parity error, parity status %#x\n",
1820 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
9bb59b96
HS
1821 if (is_t5(adap->params.chip))
1822 dev_alert(adap->pdev_dev,
1823 "MA parity error, parity status %#x\n",
1824 t4_read_reg(adap,
1825 MA_PARITY_ERROR_STATUS2));
1826 }
56d36be4
DM
1827 if (status & MEM_WRAP_INT_CAUSE) {
1828 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1829 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1830 "client %u to address %#x\n",
1831 MEM_WRAP_CLIENT_NUM_GET(v),
1832 MEM_WRAP_ADDRESS_GET(v) << 4);
1833 }
1834 t4_write_reg(adap, MA_INT_CAUSE, status);
1835 t4_fatal_err(adap);
1836}
1837
1838/*
1839 * SMB interrupt handler.
1840 */
1841static void smb_intr_handler(struct adapter *adap)
1842{
005b5717 1843 static const struct intr_info smb_intr_info[] = {
56d36be4
DM
1844 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1845 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1846 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1847 { 0 }
1848 };
1849
1850 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1851 t4_fatal_err(adap);
1852}
1853
1854/*
1855 * NC-SI interrupt handler.
1856 */
1857static void ncsi_intr_handler(struct adapter *adap)
1858{
005b5717 1859 static const struct intr_info ncsi_intr_info[] = {
56d36be4
DM
1860 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1861 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1862 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1863 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1864 { 0 }
1865 };
1866
1867 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1868 t4_fatal_err(adap);
1869}
1870
1871/*
1872 * XGMAC interrupt handler.
1873 */
1874static void xgmac_intr_handler(struct adapter *adap, int port)
1875{
0a57a536
SR
1876 u32 v, int_cause_reg;
1877
d14807dd 1878 if (is_t4(adap->params.chip))
0a57a536
SR
1879 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1880 else
1881 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1882
1883 v = t4_read_reg(adap, int_cause_reg);
56d36be4
DM
1884
1885 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1886 if (!v)
1887 return;
1888
1889 if (v & TXFIFO_PRTY_ERR)
1890 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1891 port);
1892 if (v & RXFIFO_PRTY_ERR)
1893 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1894 port);
1895 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1896 t4_fatal_err(adap);
1897}
1898
1899/*
1900 * PL interrupt handler.
1901 */
1902static void pl_intr_handler(struct adapter *adap)
1903{
005b5717 1904 static const struct intr_info pl_intr_info[] = {
56d36be4
DM
1905 { FATALPERR, "T4 fatal parity error", -1, 1 },
1906 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1907 { 0 }
1908 };
1909
1910 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1911 t4_fatal_err(adap);
1912}
1913
63bcceec 1914#define PF_INTR_MASK (PFSW)
56d36be4
DM
1915#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1916 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1917 CPL_SWITCH | SGE | ULP_TX)
1918
1919/**
1920 * t4_slow_intr_handler - control path interrupt handler
1921 * @adapter: the adapter
1922 *
1923 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1924 * The designation 'slow' is because it involves register reads, while
1925 * data interrupts typically don't involve any MMIOs.
1926 */
1927int t4_slow_intr_handler(struct adapter *adapter)
1928{
1929 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1930
1931 if (!(cause & GLBL_INTR_MASK))
1932 return 0;
1933 if (cause & CIM)
1934 cim_intr_handler(adapter);
1935 if (cause & MPS)
1936 mps_intr_handler(adapter);
1937 if (cause & NCSI)
1938 ncsi_intr_handler(adapter);
1939 if (cause & PL)
1940 pl_intr_handler(adapter);
1941 if (cause & SMB)
1942 smb_intr_handler(adapter);
1943 if (cause & XGMAC0)
1944 xgmac_intr_handler(adapter, 0);
1945 if (cause & XGMAC1)
1946 xgmac_intr_handler(adapter, 1);
1947 if (cause & XGMAC_KR0)
1948 xgmac_intr_handler(adapter, 2);
1949 if (cause & XGMAC_KR1)
1950 xgmac_intr_handler(adapter, 3);
1951 if (cause & PCIE)
1952 pcie_intr_handler(adapter);
1953 if (cause & MC)
1954 mem_intr_handler(adapter, MEM_MC);
822dd8a8
HS
1955 if (!is_t4(adapter->params.chip) && (cause & MC1))
1956 mem_intr_handler(adapter, MEM_MC1);
56d36be4
DM
1957 if (cause & EDC0)
1958 mem_intr_handler(adapter, MEM_EDC0);
1959 if (cause & EDC1)
1960 mem_intr_handler(adapter, MEM_EDC1);
1961 if (cause & LE)
1962 le_intr_handler(adapter);
1963 if (cause & TP)
1964 tp_intr_handler(adapter);
1965 if (cause & MA)
1966 ma_intr_handler(adapter);
1967 if (cause & PM_TX)
1968 pmtx_intr_handler(adapter);
1969 if (cause & PM_RX)
1970 pmrx_intr_handler(adapter);
1971 if (cause & ULP_RX)
1972 ulprx_intr_handler(adapter);
1973 if (cause & CPL_SWITCH)
1974 cplsw_intr_handler(adapter);
1975 if (cause & SGE)
1976 sge_intr_handler(adapter);
1977 if (cause & ULP_TX)
1978 ulptx_intr_handler(adapter);
1979
1980 /* Clear the interrupts just processed for which we are the master. */
1981 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1982 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1983 return 1;
1984}
1985
1986/**
1987 * t4_intr_enable - enable interrupts
1988 * @adapter: the adapter whose interrupts should be enabled
1989 *
1990 * Enable PF-specific interrupts for the calling function and the top-level
1991 * interrupt concentrator for global interrupts. Interrupts are already
1992 * enabled at each module, here we just enable the roots of the interrupt
1993 * hierarchies.
1994 *
1995 * Note: this function should be called only when the driver manages
1996 * non PF-specific interrupts from the various HW modules. Only one PCI
1997 * function at a time should be doing this.
1998 */
1999void t4_intr_enable(struct adapter *adapter)
2000{
2001 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2002
2003 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
2004 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
2005 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
2006 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
2007 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
2008 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
2009 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
840f3000 2010 DBFIFO_HP_INT | DBFIFO_LP_INT |
56d36be4
DM
2011 EGRESS_SIZE_ERR);
2012 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
2013 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
2014}
2015
2016/**
2017 * t4_intr_disable - disable interrupts
2018 * @adapter: the adapter whose interrupts should be disabled
2019 *
2020 * Disable interrupts. We only disable the top-level interrupt
2021 * concentrators. The caller must be a PCI function managing global
2022 * interrupts.
2023 */
2024void t4_intr_disable(struct adapter *adapter)
2025{
2026 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2027
2028 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
2029 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
2030}
2031
56d36be4
DM
2032/**
2033 * hash_mac_addr - return the hash value of a MAC address
2034 * @addr: the 48-bit Ethernet MAC address
2035 *
2036 * Hashes a MAC address according to the hash function used by HW inexact
2037 * (hash) address matching.
2038 */
2039static int hash_mac_addr(const u8 *addr)
2040{
2041 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2042 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2043 a ^= b;
2044 a ^= (a >> 12);
2045 a ^= (a >> 6);
2046 return a & 0x3f;
2047}
2048
2049/**
2050 * t4_config_rss_range - configure a portion of the RSS mapping table
2051 * @adapter: the adapter
2052 * @mbox: mbox to use for the FW command
2053 * @viid: virtual interface whose RSS subtable is to be written
2054 * @start: start entry in the table to write
2055 * @n: how many table entries to write
2056 * @rspq: values for the response queue lookup table
2057 * @nrspq: number of values in @rspq
2058 *
2059 * Programs the selected part of the VI's RSS mapping table with the
2060 * provided values. If @nrspq < @n the supplied values are used repeatedly
2061 * until the full table range is populated.
2062 *
2063 * The caller must ensure the values in @rspq are in the range allowed for
2064 * @viid.
2065 */
2066int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2067 int start, int n, const u16 *rspq, unsigned int nrspq)
2068{
2069 int ret;
2070 const u16 *rsp = rspq;
2071 const u16 *rsp_end = rspq + nrspq;
2072 struct fw_rss_ind_tbl_cmd cmd;
2073
2074 memset(&cmd, 0, sizeof(cmd));
e2ac9628
HS
2075 cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
2076 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
56d36be4
DM
2077 FW_RSS_IND_TBL_CMD_VIID(viid));
2078 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2079
2080 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2081 while (n > 0) {
2082 int nq = min(n, 32);
2083 __be32 *qp = &cmd.iq0_to_iq2;
2084
2085 cmd.niqid = htons(nq);
2086 cmd.startidx = htons(start);
2087
2088 start += nq;
2089 n -= nq;
2090
2091 while (nq > 0) {
2092 unsigned int v;
2093
2094 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2095 if (++rsp >= rsp_end)
2096 rsp = rspq;
2097 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2098 if (++rsp >= rsp_end)
2099 rsp = rspq;
2100 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2101 if (++rsp >= rsp_end)
2102 rsp = rspq;
2103
2104 *qp++ = htonl(v);
2105 nq -= 3;
2106 }
2107
2108 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2109 if (ret)
2110 return ret;
2111 }
2112 return 0;
2113}
2114
2115/**
2116 * t4_config_glbl_rss - configure the global RSS mode
2117 * @adapter: the adapter
2118 * @mbox: mbox to use for the FW command
2119 * @mode: global RSS mode
2120 * @flags: mode-specific flags
2121 *
2122 * Sets the global RSS mode.
2123 */
2124int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2125 unsigned int flags)
2126{
2127 struct fw_rss_glb_config_cmd c;
2128
2129 memset(&c, 0, sizeof(c));
e2ac9628
HS
2130 c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
2131 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
56d36be4
DM
2132 c.retval_len16 = htonl(FW_LEN16(c));
2133 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2134 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2135 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2136 c.u.basicvirtual.mode_pkd =
2137 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2138 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2139 } else
2140 return -EINVAL;
2141 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2142}
2143
56d36be4
DM
2144/**
2145 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2146 * @adap: the adapter
2147 * @v4: holds the TCP/IP counter values
2148 * @v6: holds the TCP/IPv6 counter values
2149 *
2150 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2151 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2152 */
2153void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2154 struct tp_tcp_stats *v6)
2155{
2156 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2157
2158#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2159#define STAT(x) val[STAT_IDX(x)]
2160#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2161
2162 if (v4) {
2163 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2164 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2165 v4->tcpOutRsts = STAT(OUT_RST);
2166 v4->tcpInSegs = STAT64(IN_SEG);
2167 v4->tcpOutSegs = STAT64(OUT_SEG);
2168 v4->tcpRetransSegs = STAT64(RXT_SEG);
2169 }
2170 if (v6) {
2171 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2172 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2173 v6->tcpOutRsts = STAT(OUT_RST);
2174 v6->tcpInSegs = STAT64(IN_SEG);
2175 v6->tcpOutSegs = STAT64(OUT_SEG);
2176 v6->tcpRetransSegs = STAT64(RXT_SEG);
2177 }
2178#undef STAT64
2179#undef STAT
2180#undef STAT_IDX
2181}
2182
56d36be4
DM
2183/**
2184 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2185 * @adap: the adapter
2186 * @mtus: where to store the MTU values
2187 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2188 *
2189 * Reads the HW path MTU table.
2190 */
2191void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2192{
2193 u32 v;
2194 int i;
2195
2196 for (i = 0; i < NMTUS; ++i) {
2197 t4_write_reg(adap, TP_MTU_TABLE,
2198 MTUINDEX(0xff) | MTUVALUE(i));
2199 v = t4_read_reg(adap, TP_MTU_TABLE);
2200 mtus[i] = MTUVALUE_GET(v);
2201 if (mtu_log)
2202 mtu_log[i] = MTUWIDTH_GET(v);
2203 }
2204}
2205
636f9d37
VP
2206/**
2207 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2208 * @adap: the adapter
2209 * @addr: the indirect TP register address
2210 * @mask: specifies the field within the register to modify
2211 * @val: new value for the field
2212 *
2213 * Sets a field of an indirect TP register to the given value.
2214 */
2215void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2216 unsigned int mask, unsigned int val)
2217{
2218 t4_write_reg(adap, TP_PIO_ADDR, addr);
2219 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2220 t4_write_reg(adap, TP_PIO_DATA, val);
2221}
2222
56d36be4
DM
2223/**
2224 * init_cong_ctrl - initialize congestion control parameters
2225 * @a: the alpha values for congestion control
2226 * @b: the beta values for congestion control
2227 *
2228 * Initialize the congestion control parameters.
2229 */
91744948 2230static void init_cong_ctrl(unsigned short *a, unsigned short *b)
56d36be4
DM
2231{
2232 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2233 a[9] = 2;
2234 a[10] = 3;
2235 a[11] = 4;
2236 a[12] = 5;
2237 a[13] = 6;
2238 a[14] = 7;
2239 a[15] = 8;
2240 a[16] = 9;
2241 a[17] = 10;
2242 a[18] = 14;
2243 a[19] = 17;
2244 a[20] = 21;
2245 a[21] = 25;
2246 a[22] = 30;
2247 a[23] = 35;
2248 a[24] = 45;
2249 a[25] = 60;
2250 a[26] = 80;
2251 a[27] = 100;
2252 a[28] = 200;
2253 a[29] = 300;
2254 a[30] = 400;
2255 a[31] = 500;
2256
2257 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2258 b[9] = b[10] = 1;
2259 b[11] = b[12] = 2;
2260 b[13] = b[14] = b[15] = b[16] = 3;
2261 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2262 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2263 b[28] = b[29] = 6;
2264 b[30] = b[31] = 7;
2265}
2266
2267/* The minimum additive increment value for the congestion control table */
2268#define CC_MIN_INCR 2U
2269
2270/**
2271 * t4_load_mtus - write the MTU and congestion control HW tables
2272 * @adap: the adapter
2273 * @mtus: the values for the MTU table
2274 * @alpha: the values for the congestion control alpha parameter
2275 * @beta: the values for the congestion control beta parameter
2276 *
2277 * Write the HW MTU table with the supplied MTUs and the high-speed
2278 * congestion control table with the supplied alpha, beta, and MTUs.
2279 * We write the two tables together because the additive increments
2280 * depend on the MTUs.
2281 */
2282void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2283 const unsigned short *alpha, const unsigned short *beta)
2284{
2285 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2286 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2287 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2288 28672, 40960, 57344, 81920, 114688, 163840, 229376
2289 };
2290
2291 unsigned int i, w;
2292
2293 for (i = 0; i < NMTUS; ++i) {
2294 unsigned int mtu = mtus[i];
2295 unsigned int log2 = fls(mtu);
2296
2297 if (!(mtu & ((1 << log2) >> 2))) /* round */
2298 log2--;
2299 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2300 MTUWIDTH(log2) | MTUVALUE(mtu));
2301
2302 for (w = 0; w < NCCTRL_WIN; ++w) {
2303 unsigned int inc;
2304
2305 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2306 CC_MIN_INCR);
2307
2308 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2309 (w << 16) | (beta[w] << 13) | inc);
2310 }
2311 }
2312}
2313
56d36be4
DM
2314/**
2315 * get_mps_bg_map - return the buffer groups associated with a port
2316 * @adap: the adapter
2317 * @idx: the port index
2318 *
2319 * Returns a bitmap indicating which MPS buffer groups are associated
2320 * with the given port. Bit i is set if buffer group i is used by the
2321 * port.
2322 */
2323static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2324{
2325 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2326
2327 if (n == 0)
2328 return idx == 0 ? 0xf : 0;
2329 if (n == 1)
2330 return idx < 2 ? (3 << (2 * idx)) : 0;
2331 return 1 << idx;
2332}
2333
72aca4bf
KS
2334/**
2335 * t4_get_port_type_description - return Port Type string description
2336 * @port_type: firmware Port Type enumeration
2337 */
2338const char *t4_get_port_type_description(enum fw_port_type port_type)
2339{
2340 static const char *const port_type_description[] = {
2341 "R XFI",
2342 "R XAUI",
2343 "T SGMII",
2344 "T XFI",
2345 "T XAUI",
2346 "KX4",
2347 "CX4",
2348 "KX",
2349 "KR",
2350 "R SFP+",
2351 "KR/KX",
2352 "KR/KX/KX4",
2353 "R QSFP_10G",
2354 "",
2355 "R QSFP",
2356 "R BP40_BA",
2357 };
2358
2359 if (port_type < ARRAY_SIZE(port_type_description))
2360 return port_type_description[port_type];
2361 return "UNKNOWN";
2362}
2363
56d36be4
DM
2364/**
2365 * t4_get_port_stats - collect port statistics
2366 * @adap: the adapter
2367 * @idx: the port index
2368 * @p: the stats structure to fill
2369 *
2370 * Collect statistics related to the given port from HW.
2371 */
2372void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2373{
2374 u32 bgmap = get_mps_bg_map(adap, idx);
2375
2376#define GET_STAT(name) \
0a57a536 2377 t4_read_reg64(adap, \
d14807dd 2378 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
0a57a536 2379 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
56d36be4
DM
2380#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2381
2382 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2383 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2384 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2385 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2386 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2387 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2388 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2389 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2390 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2391 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2392 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2393 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2394 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2395 p->tx_drop = GET_STAT(TX_PORT_DROP);
2396 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2397 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2398 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2399 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2400 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2401 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2402 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2403 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2404 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2405
2406 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2407 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2408 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2409 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2410 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2411 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2412 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2413 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2414 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2415 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2416 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2417 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2418 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2419 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2420 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2421 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2422 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2423 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2424 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2425 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2426 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2427 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2428 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2429 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2430 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2431 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2432 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2433
2434 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2435 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2436 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2437 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2438 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2439 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2440 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2441 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2442
2443#undef GET_STAT
2444#undef GET_STAT_COM
2445}
2446
56d36be4
DM
2447/**
2448 * t4_wol_magic_enable - enable/disable magic packet WoL
2449 * @adap: the adapter
2450 * @port: the physical port index
2451 * @addr: MAC address expected in magic packets, %NULL to disable
2452 *
2453 * Enables/disables magic packet wake-on-LAN for the selected port.
2454 */
2455void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2456 const u8 *addr)
2457{
0a57a536
SR
2458 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2459
d14807dd 2460 if (is_t4(adap->params.chip)) {
0a57a536
SR
2461 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2462 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2463 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2464 } else {
2465 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2466 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2467 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2468 }
2469
56d36be4 2470 if (addr) {
0a57a536 2471 t4_write_reg(adap, mag_id_reg_l,
56d36be4
DM
2472 (addr[2] << 24) | (addr[3] << 16) |
2473 (addr[4] << 8) | addr[5]);
0a57a536 2474 t4_write_reg(adap, mag_id_reg_h,
56d36be4
DM
2475 (addr[0] << 8) | addr[1]);
2476 }
0a57a536 2477 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
56d36be4
DM
2478 addr ? MAGICEN : 0);
2479}
2480
2481/**
2482 * t4_wol_pat_enable - enable/disable pattern-based WoL
2483 * @adap: the adapter
2484 * @port: the physical port index
2485 * @map: bitmap of which HW pattern filters to set
2486 * @mask0: byte mask for bytes 0-63 of a packet
2487 * @mask1: byte mask for bytes 64-127 of a packet
2488 * @crc: Ethernet CRC for selected bytes
2489 * @enable: enable/disable switch
2490 *
2491 * Sets the pattern filters indicated in @map to mask out the bytes
2492 * specified in @mask0/@mask1 in received packets and compare the CRC of
2493 * the resulting packet against @crc. If @enable is %true pattern-based
2494 * WoL is enabled, otherwise disabled.
2495 */
2496int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2497 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2498{
2499 int i;
0a57a536
SR
2500 u32 port_cfg_reg;
2501
d14807dd 2502 if (is_t4(adap->params.chip))
0a57a536
SR
2503 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2504 else
2505 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
56d36be4
DM
2506
2507 if (!enable) {
0a57a536 2508 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
56d36be4
DM
2509 return 0;
2510 }
2511 if (map > 0xff)
2512 return -EINVAL;
2513
0a57a536 2514#define EPIO_REG(name) \
d14807dd 2515 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
0a57a536 2516 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
56d36be4
DM
2517
2518 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2519 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2520 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2521
2522 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2523 if (!(map & 1))
2524 continue;
2525
2526 /* write byte masks */
2527 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2528 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2529 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
ce91a923 2530 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
56d36be4
DM
2531 return -ETIMEDOUT;
2532
2533 /* write CRC */
2534 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2535 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2536 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
ce91a923 2537 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
56d36be4
DM
2538 return -ETIMEDOUT;
2539 }
2540#undef EPIO_REG
2541
2542 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2543 return 0;
2544}
2545
f2b7e78d
VP
2546/* t4_mk_filtdelwr - create a delete filter WR
2547 * @ftid: the filter ID
2548 * @wr: the filter work request to populate
2549 * @qid: ingress queue to receive the delete notification
2550 *
2551 * Creates a filter work request to delete the supplied filter. If @qid is
2552 * negative the delete notification is suppressed.
2553 */
2554void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2555{
2556 memset(wr, 0, sizeof(*wr));
e2ac9628
HS
2557 wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
2558 wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
77a80e23
HS
2559 wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
2560 FW_FILTER_WR_NOREPLY_V(qid < 0));
2561 wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
f2b7e78d 2562 if (qid >= 0)
77a80e23 2563 wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
f2b7e78d
VP
2564}
2565
56d36be4 2566#define INIT_CMD(var, cmd, rd_wr) do { \
e2ac9628
HS
2567 (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
2568 FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
56d36be4
DM
2569 (var).retval_len16 = htonl(FW_LEN16(var)); \
2570} while (0)
2571
8caa1e84
VP
2572int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2573 u32 addr, u32 val)
2574{
2575 struct fw_ldst_cmd c;
2576
2577 memset(&c, 0, sizeof(c));
e2ac9628
HS
2578 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2579 FW_CMD_WRITE_F |
636f9d37 2580 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
8caa1e84
VP
2581 c.cycles_to_len16 = htonl(FW_LEN16(c));
2582 c.u.addrval.addr = htonl(addr);
2583 c.u.addrval.val = htonl(val);
2584
2585 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2586}
2587
56d36be4
DM
2588/**
2589 * t4_mdio_rd - read a PHY register through MDIO
2590 * @adap: the adapter
2591 * @mbox: mailbox to use for the FW command
2592 * @phy_addr: the PHY address
2593 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2594 * @reg: the register to read
2595 * @valp: where to store the value
2596 *
2597 * Issues a FW command through the given mailbox to read a PHY register.
2598 */
2599int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2600 unsigned int mmd, unsigned int reg, u16 *valp)
2601{
2602 int ret;
2603 struct fw_ldst_cmd c;
2604
2605 memset(&c, 0, sizeof(c));
e2ac9628
HS
2606 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2607 FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
56d36be4
DM
2608 c.cycles_to_len16 = htonl(FW_LEN16(c));
2609 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2610 FW_LDST_CMD_MMD(mmd));
2611 c.u.mdio.raddr = htons(reg);
2612
2613 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2614 if (ret == 0)
2615 *valp = ntohs(c.u.mdio.rval);
2616 return ret;
2617}
2618
2619/**
2620 * t4_mdio_wr - write a PHY register through MDIO
2621 * @adap: the adapter
2622 * @mbox: mailbox to use for the FW command
2623 * @phy_addr: the PHY address
2624 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2625 * @reg: the register to write
2626 * @valp: value to write
2627 *
2628 * Issues a FW command through the given mailbox to write a PHY register.
2629 */
2630int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2631 unsigned int mmd, unsigned int reg, u16 val)
2632{
2633 struct fw_ldst_cmd c;
2634
2635 memset(&c, 0, sizeof(c));
e2ac9628
HS
2636 c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
2637 FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
56d36be4
DM
2638 c.cycles_to_len16 = htonl(FW_LEN16(c));
2639 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2640 FW_LDST_CMD_MMD(mmd));
2641 c.u.mdio.raddr = htons(reg);
2642 c.u.mdio.rval = htons(val);
2643
2644 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2645}
2646
68bce192
KS
2647/**
2648 * t4_sge_decode_idma_state - decode the idma state
2649 * @adap: the adapter
2650 * @state: the state idma is stuck in
2651 */
2652void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2653{
2654 static const char * const t4_decode[] = {
2655 "IDMA_IDLE",
2656 "IDMA_PUSH_MORE_CPL_FIFO",
2657 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2658 "Not used",
2659 "IDMA_PHYSADDR_SEND_PCIEHDR",
2660 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2661 "IDMA_PHYSADDR_SEND_PAYLOAD",
2662 "IDMA_SEND_FIFO_TO_IMSG",
2663 "IDMA_FL_REQ_DATA_FL_PREP",
2664 "IDMA_FL_REQ_DATA_FL",
2665 "IDMA_FL_DROP",
2666 "IDMA_FL_H_REQ_HEADER_FL",
2667 "IDMA_FL_H_SEND_PCIEHDR",
2668 "IDMA_FL_H_PUSH_CPL_FIFO",
2669 "IDMA_FL_H_SEND_CPL",
2670 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2671 "IDMA_FL_H_SEND_IP_HDR",
2672 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2673 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2674 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2675 "IDMA_FL_D_SEND_PCIEHDR",
2676 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2677 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2678 "IDMA_FL_SEND_PCIEHDR",
2679 "IDMA_FL_PUSH_CPL_FIFO",
2680 "IDMA_FL_SEND_CPL",
2681 "IDMA_FL_SEND_PAYLOAD_FIRST",
2682 "IDMA_FL_SEND_PAYLOAD",
2683 "IDMA_FL_REQ_NEXT_DATA_FL",
2684 "IDMA_FL_SEND_NEXT_PCIEHDR",
2685 "IDMA_FL_SEND_PADDING",
2686 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2687 "IDMA_FL_SEND_FIFO_TO_IMSG",
2688 "IDMA_FL_REQ_DATAFL_DONE",
2689 "IDMA_FL_REQ_HEADERFL_DONE",
2690 };
2691 static const char * const t5_decode[] = {
2692 "IDMA_IDLE",
2693 "IDMA_ALMOST_IDLE",
2694 "IDMA_PUSH_MORE_CPL_FIFO",
2695 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2696 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2697 "IDMA_PHYSADDR_SEND_PCIEHDR",
2698 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2699 "IDMA_PHYSADDR_SEND_PAYLOAD",
2700 "IDMA_SEND_FIFO_TO_IMSG",
2701 "IDMA_FL_REQ_DATA_FL",
2702 "IDMA_FL_DROP",
2703 "IDMA_FL_DROP_SEND_INC",
2704 "IDMA_FL_H_REQ_HEADER_FL",
2705 "IDMA_FL_H_SEND_PCIEHDR",
2706 "IDMA_FL_H_PUSH_CPL_FIFO",
2707 "IDMA_FL_H_SEND_CPL",
2708 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2709 "IDMA_FL_H_SEND_IP_HDR",
2710 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2711 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2712 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2713 "IDMA_FL_D_SEND_PCIEHDR",
2714 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2715 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2716 "IDMA_FL_SEND_PCIEHDR",
2717 "IDMA_FL_PUSH_CPL_FIFO",
2718 "IDMA_FL_SEND_CPL",
2719 "IDMA_FL_SEND_PAYLOAD_FIRST",
2720 "IDMA_FL_SEND_PAYLOAD",
2721 "IDMA_FL_REQ_NEXT_DATA_FL",
2722 "IDMA_FL_SEND_NEXT_PCIEHDR",
2723 "IDMA_FL_SEND_PADDING",
2724 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2725 };
2726 static const u32 sge_regs[] = {
2727 SGE_DEBUG_DATA_LOW_INDEX_2,
2728 SGE_DEBUG_DATA_LOW_INDEX_3,
2729 SGE_DEBUG_DATA_HIGH_INDEX_10,
2730 };
2731 const char **sge_idma_decode;
2732 int sge_idma_decode_nstates;
2733 int i;
2734
2735 if (is_t4(adapter->params.chip)) {
2736 sge_idma_decode = (const char **)t4_decode;
2737 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2738 } else {
2739 sge_idma_decode = (const char **)t5_decode;
2740 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2741 }
2742
2743 if (state < sge_idma_decode_nstates)
2744 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2745 else
2746 CH_WARN(adapter, "idma state %d unknown\n", state);
2747
2748 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2749 CH_WARN(adapter, "SGE register %#x value %#x\n",
2750 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2751}
2752
56d36be4 2753/**
636f9d37
VP
2754 * t4_fw_hello - establish communication with FW
2755 * @adap: the adapter
2756 * @mbox: mailbox to use for the FW command
2757 * @evt_mbox: mailbox to receive async FW events
2758 * @master: specifies the caller's willingness to be the device master
2759 * @state: returns the current device state (if non-NULL)
56d36be4 2760 *
636f9d37
VP
2761 * Issues a command to establish communication with FW. Returns either
2762 * an error (negative integer) or the mailbox of the Master PF.
56d36be4
DM
2763 */
2764int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2765 enum dev_master master, enum dev_state *state)
2766{
2767 int ret;
2768 struct fw_hello_cmd c;
636f9d37
VP
2769 u32 v;
2770 unsigned int master_mbox;
2771 int retries = FW_CMD_HELLO_RETRIES;
56d36be4 2772
636f9d37
VP
2773retry:
2774 memset(&c, 0, sizeof(c));
56d36be4 2775 INIT_CMD(c, HELLO, WRITE);
ce91a923 2776 c.err_to_clearinit = htonl(
56d36be4
DM
2777 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2778 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
636f9d37
VP
2779 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2780 FW_HELLO_CMD_MBMASTER_MASK) |
2781 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2782 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2783 FW_HELLO_CMD_CLEARINIT);
56d36be4 2784
636f9d37
VP
2785 /*
2786 * Issue the HELLO command to the firmware. If it's not successful
2787 * but indicates that we got a "busy" or "timeout" condition, retry
31d55c2d
HS
2788 * the HELLO until we exhaust our retry limit. If we do exceed our
2789 * retry limit, check to see if the firmware left us any error
2790 * information and report that if so.
636f9d37 2791 */
56d36be4 2792 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
636f9d37
VP
2793 if (ret < 0) {
2794 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2795 goto retry;
31d55c2d
HS
2796 if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
2797 t4_report_fw_error(adap);
636f9d37
VP
2798 return ret;
2799 }
2800
ce91a923 2801 v = ntohl(c.err_to_clearinit);
636f9d37
VP
2802 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2803 if (state) {
2804 if (v & FW_HELLO_CMD_ERR)
56d36be4 2805 *state = DEV_STATE_ERR;
636f9d37
VP
2806 else if (v & FW_HELLO_CMD_INIT)
2807 *state = DEV_STATE_INIT;
56d36be4
DM
2808 else
2809 *state = DEV_STATE_UNINIT;
2810 }
636f9d37
VP
2811
2812 /*
2813 * If we're not the Master PF then we need to wait around for the
2814 * Master PF Driver to finish setting up the adapter.
2815 *
2816 * Note that we also do this wait if we're a non-Master-capable PF and
2817 * there is no current Master PF; a Master PF may show up momentarily
2818 * and we wouldn't want to fail pointlessly. (This can happen when an
2819 * OS loads lots of different drivers rapidly at the same time). In
2820 * this case, the Master PF returned by the firmware will be
2821 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2822 */
2823 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2824 master_mbox != mbox) {
2825 int waiting = FW_CMD_HELLO_TIMEOUT;
2826
2827 /*
2828 * Wait for the firmware to either indicate an error or
2829 * initialized state. If we see either of these we bail out
2830 * and report the issue to the caller. If we exhaust the
2831 * "hello timeout" and we haven't exhausted our retries, try
2832 * again. Otherwise bail with a timeout error.
2833 */
2834 for (;;) {
2835 u32 pcie_fw;
2836
2837 msleep(50);
2838 waiting -= 50;
2839
2840 /*
2841 * If neither Error nor Initialialized are indicated
2842 * by the firmware keep waiting till we exaust our
2843 * timeout ... and then retry if we haven't exhausted
2844 * our retries ...
2845 */
2846 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2847 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2848 if (waiting <= 0) {
2849 if (retries-- > 0)
2850 goto retry;
2851
2852 return -ETIMEDOUT;
2853 }
2854 continue;
2855 }
2856
2857 /*
2858 * We either have an Error or Initialized condition
2859 * report errors preferentially.
2860 */
2861 if (state) {
2862 if (pcie_fw & FW_PCIE_FW_ERR)
2863 *state = DEV_STATE_ERR;
2864 else if (pcie_fw & FW_PCIE_FW_INIT)
2865 *state = DEV_STATE_INIT;
2866 }
2867
2868 /*
2869 * If we arrived before a Master PF was selected and
2870 * there's not a valid Master PF, grab its identity
2871 * for our caller.
2872 */
2873 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2874 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2875 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2876 break;
2877 }
2878 }
2879
2880 return master_mbox;
56d36be4
DM
2881}
2882
2883/**
2884 * t4_fw_bye - end communication with FW
2885 * @adap: the adapter
2886 * @mbox: mailbox to use for the FW command
2887 *
2888 * Issues a command to terminate communication with FW.
2889 */
2890int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2891{
2892 struct fw_bye_cmd c;
2893
0062b15c 2894 memset(&c, 0, sizeof(c));
56d36be4
DM
2895 INIT_CMD(c, BYE, WRITE);
2896 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2897}
2898
2899/**
2900 * t4_init_cmd - ask FW to initialize the device
2901 * @adap: the adapter
2902 * @mbox: mailbox to use for the FW command
2903 *
2904 * Issues a command to FW to partially initialize the device. This
2905 * performs initialization that generally doesn't depend on user input.
2906 */
2907int t4_early_init(struct adapter *adap, unsigned int mbox)
2908{
2909 struct fw_initialize_cmd c;
2910
0062b15c 2911 memset(&c, 0, sizeof(c));
56d36be4
DM
2912 INIT_CMD(c, INITIALIZE, WRITE);
2913 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2914}
2915
2916/**
2917 * t4_fw_reset - issue a reset to FW
2918 * @adap: the adapter
2919 * @mbox: mailbox to use for the FW command
2920 * @reset: specifies the type of reset to perform
2921 *
2922 * Issues a reset command of the specified type to FW.
2923 */
2924int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2925{
2926 struct fw_reset_cmd c;
2927
0062b15c 2928 memset(&c, 0, sizeof(c));
56d36be4
DM
2929 INIT_CMD(c, RESET, WRITE);
2930 c.val = htonl(reset);
2931 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2932}
2933
26f7cbc0
VP
2934/**
2935 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2936 * @adap: the adapter
2937 * @mbox: mailbox to use for the FW RESET command (if desired)
2938 * @force: force uP into RESET even if FW RESET command fails
2939 *
2940 * Issues a RESET command to firmware (if desired) with a HALT indication
2941 * and then puts the microprocessor into RESET state. The RESET command
2942 * will only be issued if a legitimate mailbox is provided (mbox <=
2943 * FW_PCIE_FW_MASTER_MASK).
2944 *
2945 * This is generally used in order for the host to safely manipulate the
2946 * adapter without fear of conflicting with whatever the firmware might
2947 * be doing. The only way out of this state is to RESTART the firmware
2948 * ...
2949 */
de5b8677 2950static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
26f7cbc0
VP
2951{
2952 int ret = 0;
2953
2954 /*
2955 * If a legitimate mailbox is provided, issue a RESET command
2956 * with a HALT indication.
2957 */
2958 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2959 struct fw_reset_cmd c;
2960
2961 memset(&c, 0, sizeof(c));
2962 INIT_CMD(c, RESET, WRITE);
2963 c.val = htonl(PIORST | PIORSTMODE);
2964 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2965 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2966 }
2967
2968 /*
2969 * Normally we won't complete the operation if the firmware RESET
2970 * command fails but if our caller insists we'll go ahead and put the
2971 * uP into RESET. This can be useful if the firmware is hung or even
2972 * missing ... We'll have to take the risk of putting the uP into
2973 * RESET without the cooperation of firmware in that case.
2974 *
2975 * We also force the firmware's HALT flag to be on in case we bypassed
2976 * the firmware RESET command above or we're dealing with old firmware
2977 * which doesn't have the HALT capability. This will serve as a flag
2978 * for the incoming firmware to know that it's coming out of a HALT
2979 * rather than a RESET ... if it's new enough to understand that ...
2980 */
2981 if (ret == 0 || force) {
2982 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2983 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2984 FW_PCIE_FW_HALT);
2985 }
2986
2987 /*
2988 * And we always return the result of the firmware RESET command
2989 * even when we force the uP into RESET ...
2990 */
2991 return ret;
2992}
2993
2994/**
2995 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2996 * @adap: the adapter
2997 * @reset: if we want to do a RESET to restart things
2998 *
2999 * Restart firmware previously halted by t4_fw_halt(). On successful
3000 * return the previous PF Master remains as the new PF Master and there
3001 * is no need to issue a new HELLO command, etc.
3002 *
3003 * We do this in two ways:
3004 *
3005 * 1. If we're dealing with newer firmware we'll simply want to take
3006 * the chip's microprocessor out of RESET. This will cause the
3007 * firmware to start up from its start vector. And then we'll loop
3008 * until the firmware indicates it's started again (PCIE_FW.HALT
3009 * reset to 0) or we timeout.
3010 *
3011 * 2. If we're dealing with older firmware then we'll need to RESET
3012 * the chip since older firmware won't recognize the PCIE_FW.HALT
3013 * flag and automatically RESET itself on startup.
3014 */
de5b8677 3015static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
26f7cbc0
VP
3016{
3017 if (reset) {
3018 /*
3019 * Since we're directing the RESET instead of the firmware
3020 * doing it automatically, we need to clear the PCIE_FW.HALT
3021 * bit.
3022 */
3023 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
3024
3025 /*
3026 * If we've been given a valid mailbox, first try to get the
3027 * firmware to do the RESET. If that works, great and we can
3028 * return success. Otherwise, if we haven't been given a
3029 * valid mailbox or the RESET command failed, fall back to
3030 * hitting the chip with a hammer.
3031 */
3032 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
3033 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3034 msleep(100);
3035 if (t4_fw_reset(adap, mbox,
3036 PIORST | PIORSTMODE) == 0)
3037 return 0;
3038 }
3039
3040 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
3041 msleep(2000);
3042 } else {
3043 int ms;
3044
3045 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3046 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3047 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
3048 return 0;
3049 msleep(100);
3050 ms += 100;
3051 }
3052 return -ETIMEDOUT;
3053 }
3054 return 0;
3055}
3056
3057/**
3058 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3059 * @adap: the adapter
3060 * @mbox: mailbox to use for the FW RESET command (if desired)
3061 * @fw_data: the firmware image to write
3062 * @size: image size
3063 * @force: force upgrade even if firmware doesn't cooperate
3064 *
3065 * Perform all of the steps necessary for upgrading an adapter's
3066 * firmware image. Normally this requires the cooperation of the
3067 * existing firmware in order to halt all existing activities
3068 * but if an invalid mailbox token is passed in we skip that step
3069 * (though we'll still put the adapter microprocessor into RESET in
3070 * that case).
3071 *
3072 * On successful return the new firmware will have been loaded and
3073 * the adapter will have been fully RESET losing all previous setup
3074 * state. On unsuccessful return the adapter may be completely hosed ...
3075 * positive errno indicates that the adapter is ~probably~ intact, a
3076 * negative errno indicates that things are looking bad ...
3077 */
22c0b963
HS
3078int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3079 const u8 *fw_data, unsigned int size, int force)
26f7cbc0
VP
3080{
3081 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3082 int reset, ret;
3083
3084 ret = t4_fw_halt(adap, mbox, force);
3085 if (ret < 0 && !force)
3086 return ret;
3087
3088 ret = t4_load_fw(adap, fw_data, size);
3089 if (ret < 0)
3090 return ret;
3091
3092 /*
3093 * Older versions of the firmware don't understand the new
3094 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3095 * restart. So for newly loaded older firmware we'll have to do the
3096 * RESET for it so it starts up on a clean slate. We can tell if
3097 * the newly loaded firmware will handle this right by checking
3098 * its header flags to see if it advertises the capability.
3099 */
3100 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3101 return t4_fw_restart(adap, mbox, reset);
3102}
3103
636f9d37
VP
3104/**
3105 * t4_fixup_host_params - fix up host-dependent parameters
3106 * @adap: the adapter
3107 * @page_size: the host's Base Page Size
3108 * @cache_line_size: the host's Cache Line Size
3109 *
3110 * Various registers in T4 contain values which are dependent on the
3111 * host's Base Page and Cache Line Sizes. This function will fix all of
3112 * those registers with the appropriate values as passed in ...
3113 */
3114int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3115 unsigned int cache_line_size)
3116{
3117 unsigned int page_shift = fls(page_size) - 1;
3118 unsigned int sge_hps = page_shift - 10;
3119 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3120 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3121 unsigned int fl_align_log = fls(fl_align) - 1;
3122
3123 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
3124 HOSTPAGESIZEPF0(sge_hps) |
3125 HOSTPAGESIZEPF1(sge_hps) |
3126 HOSTPAGESIZEPF2(sge_hps) |
3127 HOSTPAGESIZEPF3(sge_hps) |
3128 HOSTPAGESIZEPF4(sge_hps) |
3129 HOSTPAGESIZEPF5(sge_hps) |
3130 HOSTPAGESIZEPF6(sge_hps) |
3131 HOSTPAGESIZEPF7(sge_hps));
3132
ce8f407a
HS
3133 if (is_t4(adap->params.chip)) {
3134 t4_set_reg_field(adap, SGE_CONTROL,
3135 INGPADBOUNDARY_MASK |
3136 EGRSTATUSPAGESIZE_MASK,
3137 INGPADBOUNDARY(fl_align_log - 5) |
3138 EGRSTATUSPAGESIZE(stat_len != 64));
3139 } else {
3140 /* T5 introduced the separation of the Free List Padding and
3141 * Packing Boundaries. Thus, we can select a smaller Padding
3142 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3143 * Bandwidth, and use a Packing Boundary which is large enough
3144 * to avoid false sharing between CPUs, etc.
3145 *
3146 * For the PCI Link, the smaller the Padding Boundary the
3147 * better. For the Memory Controller, a smaller Padding
3148 * Boundary is better until we cross under the Memory Line
3149 * Size (the minimum unit of transfer to/from Memory). If we
3150 * have a Padding Boundary which is smaller than the Memory
3151 * Line Size, that'll involve a Read-Modify-Write cycle on the
3152 * Memory Controller which is never good. For T5 the smallest
3153 * Padding Boundary which we can select is 32 bytes which is
3154 * larger than any known Memory Controller Line Size so we'll
3155 * use that.
3156 *
3157 * T5 has a different interpretation of the "0" value for the
3158 * Packing Boundary. This corresponds to 16 bytes instead of
3159 * the expected 32 bytes. We never have a Packing Boundary
3160 * less than 32 bytes so we can't use that special value but
3161 * on the other hand, if we wanted 32 bytes, the best we can
3162 * really do is 64 bytes.
3163 */
3164 if (fl_align <= 32) {
3165 fl_align = 64;
3166 fl_align_log = 6;
3167 }
3168 t4_set_reg_field(adap, SGE_CONTROL,
3169 INGPADBOUNDARY_MASK |
3170 EGRSTATUSPAGESIZE_MASK,
3171 INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
3172 EGRSTATUSPAGESIZE(stat_len != 64));
3173 t4_set_reg_field(adap, SGE_CONTROL2_A,
3174 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3175 INGPACKBOUNDARY_V(fl_align_log -
3176 INGPACKBOUNDARY_SHIFT_X));
3177 }
636f9d37
VP
3178 /*
3179 * Adjust various SGE Free List Host Buffer Sizes.
3180 *
3181 * This is something of a crock since we're using fixed indices into
3182 * the array which are also known by the sge.c code and the T4
3183 * Firmware Configuration File. We need to come up with a much better
3184 * approach to managing this array. For now, the first four entries
3185 * are:
3186 *
3187 * 0: Host Page Size
3188 * 1: 64KB
3189 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3190 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3191 *
3192 * For the single-MTU buffers in unpacked mode we need to include
3193 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3194 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3195 * Padding boundry. All of these are accommodated in the Factory
3196 * Default Firmware Configuration File but we need to adjust it for
3197 * this host's cache line size.
3198 */
3199 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
3200 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
3201 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
3202 & ~(fl_align-1));
3203 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
3204 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
3205 & ~(fl_align-1));
3206
3207 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3208
3209 return 0;
3210}
3211
3212/**
3213 * t4_fw_initialize - ask FW to initialize the device
3214 * @adap: the adapter
3215 * @mbox: mailbox to use for the FW command
3216 *
3217 * Issues a command to FW to partially initialize the device. This
3218 * performs initialization that generally doesn't depend on user input.
3219 */
3220int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3221{
3222 struct fw_initialize_cmd c;
3223
3224 memset(&c, 0, sizeof(c));
3225 INIT_CMD(c, INITIALIZE, WRITE);
3226 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3227}
3228
56d36be4
DM
3229/**
3230 * t4_query_params - query FW or device parameters
3231 * @adap: the adapter
3232 * @mbox: mailbox to use for the FW command
3233 * @pf: the PF
3234 * @vf: the VF
3235 * @nparams: the number of parameters
3236 * @params: the parameter names
3237 * @val: the parameter values
3238 *
3239 * Reads the value of FW or device parameters. Up to 7 parameters can be
3240 * queried at once.
3241 */
3242int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3243 unsigned int vf, unsigned int nparams, const u32 *params,
3244 u32 *val)
3245{
3246 int i, ret;
3247 struct fw_params_cmd c;
3248 __be32 *p = &c.param[0].mnem;
3249
3250 if (nparams > 7)
3251 return -EINVAL;
3252
3253 memset(&c, 0, sizeof(c));
e2ac9628
HS
3254 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
3255 FW_CMD_READ_F | FW_PARAMS_CMD_PFN(pf) |
56d36be4
DM
3256 FW_PARAMS_CMD_VFN(vf));
3257 c.retval_len16 = htonl(FW_LEN16(c));
3258 for (i = 0; i < nparams; i++, p += 2)
3259 *p = htonl(*params++);
3260
3261 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3262 if (ret == 0)
3263 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3264 *val++ = ntohl(*p);
3265 return ret;
3266}
3267
688848b1
AB
3268/**
3269 * t4_set_params_nosleep - sets FW or device parameters
3270 * @adap: the adapter
3271 * @mbox: mailbox to use for the FW command
3272 * @pf: the PF
3273 * @vf: the VF
3274 * @nparams: the number of parameters
3275 * @params: the parameter names
3276 * @val: the parameter values
3277 *
3278 * Does not ever sleep
3279 * Sets the value of FW or device parameters. Up to 7 parameters can be
3280 * specified at once.
3281 */
3282int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3283 unsigned int pf, unsigned int vf,
3284 unsigned int nparams, const u32 *params,
3285 const u32 *val)
3286{
3287 struct fw_params_cmd c;
3288 __be32 *p = &c.param[0].mnem;
3289
3290 if (nparams > 7)
3291 return -EINVAL;
3292
3293 memset(&c, 0, sizeof(c));
e2ac9628
HS
3294 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3295 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
688848b1
AB
3296 FW_PARAMS_CMD_PFN(pf) |
3297 FW_PARAMS_CMD_VFN(vf));
3298 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3299
3300 while (nparams--) {
3301 *p++ = cpu_to_be32(*params++);
3302 *p++ = cpu_to_be32(*val++);
3303 }
3304
3305 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3306}
3307
56d36be4
DM
3308/**
3309 * t4_set_params - sets FW or device parameters
3310 * @adap: the adapter
3311 * @mbox: mailbox to use for the FW command
3312 * @pf: the PF
3313 * @vf: the VF
3314 * @nparams: the number of parameters
3315 * @params: the parameter names
3316 * @val: the parameter values
3317 *
3318 * Sets the value of FW or device parameters. Up to 7 parameters can be
3319 * specified at once.
3320 */
3321int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3322 unsigned int vf, unsigned int nparams, const u32 *params,
3323 const u32 *val)
3324{
3325 struct fw_params_cmd c;
3326 __be32 *p = &c.param[0].mnem;
3327
3328 if (nparams > 7)
3329 return -EINVAL;
3330
3331 memset(&c, 0, sizeof(c));
e2ac9628
HS
3332 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
3333 FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN(pf) |
56d36be4
DM
3334 FW_PARAMS_CMD_VFN(vf));
3335 c.retval_len16 = htonl(FW_LEN16(c));
3336 while (nparams--) {
3337 *p++ = htonl(*params++);
3338 *p++ = htonl(*val++);
3339 }
3340
3341 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3342}
3343
3344/**
3345 * t4_cfg_pfvf - configure PF/VF resource limits
3346 * @adap: the adapter
3347 * @mbox: mailbox to use for the FW command
3348 * @pf: the PF being configured
3349 * @vf: the VF being configured
3350 * @txq: the max number of egress queues
3351 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
3352 * @rxqi: the max number of interrupt-capable ingress queues
3353 * @rxq: the max number of interruptless ingress queues
3354 * @tc: the PCI traffic class
3355 * @vi: the max number of virtual interfaces
3356 * @cmask: the channel access rights mask for the PF/VF
3357 * @pmask: the port access rights mask for the PF/VF
3358 * @nexact: the maximum number of exact MPS filters
3359 * @rcaps: read capabilities
3360 * @wxcaps: write/execute capabilities
3361 *
3362 * Configures resource limits and capabilities for a physical or virtual
3363 * function.
3364 */
3365int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3366 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3367 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3368 unsigned int vi, unsigned int cmask, unsigned int pmask,
3369 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3370{
3371 struct fw_pfvf_cmd c;
3372
3373 memset(&c, 0, sizeof(c));
e2ac9628
HS
3374 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
3375 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN(pf) |
56d36be4
DM
3376 FW_PFVF_CMD_VFN(vf));
3377 c.retval_len16 = htonl(FW_LEN16(c));
3378 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
3379 FW_PFVF_CMD_NIQ(rxq));
81323b74 3380 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
56d36be4
DM
3381 FW_PFVF_CMD_PMASK(pmask) |
3382 FW_PFVF_CMD_NEQ(txq));
3383 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
3384 FW_PFVF_CMD_NEXACTF(nexact));
3385 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3386 FW_PFVF_CMD_WX_CAPS(wxcaps) |
3387 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3388 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3389}
3390
3391/**
3392 * t4_alloc_vi - allocate a virtual interface
3393 * @adap: the adapter
3394 * @mbox: mailbox to use for the FW command
3395 * @port: physical port associated with the VI
3396 * @pf: the PF owning the VI
3397 * @vf: the VF owning the VI
3398 * @nmac: number of MAC addresses needed (1 to 5)
3399 * @mac: the MAC addresses of the VI
3400 * @rss_size: size of RSS table slice associated with this VI
3401 *
3402 * Allocates a virtual interface for the given physical port. If @mac is
3403 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3404 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3405 * stored consecutively so the space needed is @nmac * 6 bytes.
3406 * Returns a negative error number or the non-negative VI id.
3407 */
3408int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3409 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3410 unsigned int *rss_size)
3411{
3412 int ret;
3413 struct fw_vi_cmd c;
3414
3415 memset(&c, 0, sizeof(c));
e2ac9628
HS
3416 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
3417 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
56d36be4
DM
3418 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3419 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3420 c.portid_pkd = FW_VI_CMD_PORTID(port);
3421 c.nmac = nmac - 1;
3422
3423 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3424 if (ret)
3425 return ret;
3426
3427 if (mac) {
3428 memcpy(mac, c.mac, sizeof(c.mac));
3429 switch (nmac) {
3430 case 5:
3431 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3432 case 4:
3433 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3434 case 3:
3435 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3436 case 2:
3437 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3438 }
3439 }
3440 if (rss_size)
3441 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
a0881cab 3442 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
56d36be4
DM
3443}
3444
56d36be4
DM
3445/**
3446 * t4_set_rxmode - set Rx properties of a virtual interface
3447 * @adap: the adapter
3448 * @mbox: mailbox to use for the FW command
3449 * @viid: the VI id
3450 * @mtu: the new MTU or -1
3451 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3452 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3453 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 3454 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
3455 * @sleep_ok: if true we may sleep while awaiting command completion
3456 *
3457 * Sets Rx properties of a virtual interface.
3458 */
3459int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
3460 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3461 bool sleep_ok)
56d36be4
DM
3462{
3463 struct fw_vi_rxmode_cmd c;
3464
3465 /* convert to FW values */
3466 if (mtu < 0)
3467 mtu = FW_RXMODE_MTU_NO_CHG;
3468 if (promisc < 0)
3469 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3470 if (all_multi < 0)
3471 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3472 if (bcast < 0)
3473 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
f8f5aafa
DM
3474 if (vlanex < 0)
3475 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
56d36be4
DM
3476
3477 memset(&c, 0, sizeof(c));
e2ac9628
HS
3478 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
3479 FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID(viid));
56d36be4 3480 c.retval_len16 = htonl(FW_LEN16(c));
f8f5aafa
DM
3481 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3482 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3483 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3484 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3485 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
56d36be4
DM
3486 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3487}
3488
3489/**
3490 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3491 * @adap: the adapter
3492 * @mbox: mailbox to use for the FW command
3493 * @viid: the VI id
3494 * @free: if true any existing filters for this VI id are first removed
3495 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3496 * @addr: the MAC address(es)
3497 * @idx: where to store the index of each allocated filter
3498 * @hash: pointer to hash address filter bitmap
3499 * @sleep_ok: call is allowed to sleep
3500 *
3501 * Allocates an exact-match filter for each of the supplied addresses and
3502 * sets it to the corresponding address. If @idx is not %NULL it should
3503 * have at least @naddr entries, each of which will be set to the index of
3504 * the filter allocated for the corresponding MAC address. If a filter
3505 * could not be allocated for an address its index is set to 0xffff.
3506 * If @hash is not %NULL addresses that fail to allocate an exact filter
3507 * are hashed and update the hash filter bitmap pointed at by @hash.
3508 *
3509 * Returns a negative error number or the number of filters allocated.
3510 */
3511int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3512 unsigned int viid, bool free, unsigned int naddr,
3513 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3514{
3515 int i, ret;
3516 struct fw_vi_mac_cmd c;
3517 struct fw_vi_mac_exact *p;
d14807dd 3518 unsigned int max_naddr = is_t4(adap->params.chip) ?
0a57a536
SR
3519 NUM_MPS_CLS_SRAM_L_INSTANCES :
3520 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
56d36be4
DM
3521
3522 if (naddr > 7)
3523 return -EINVAL;
3524
3525 memset(&c, 0, sizeof(c));
e2ac9628
HS
3526 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3527 FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
56d36be4
DM
3528 FW_VI_MAC_CMD_VIID(viid));
3529 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
e2ac9628 3530 FW_CMD_LEN16_V((naddr + 2) / 2));
56d36be4
DM
3531
3532 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3533 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3534 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3535 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3536 }
3537
3538 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3539 if (ret)
3540 return ret;
3541
3542 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3543 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3544
3545 if (idx)
0a57a536
SR
3546 idx[i] = index >= max_naddr ? 0xffff : index;
3547 if (index < max_naddr)
56d36be4
DM
3548 ret++;
3549 else if (hash)
ce9aeb58 3550 *hash |= (1ULL << hash_mac_addr(addr[i]));
56d36be4
DM
3551 }
3552 return ret;
3553}
3554
3555/**
3556 * t4_change_mac - modifies the exact-match filter for a MAC address
3557 * @adap: the adapter
3558 * @mbox: mailbox to use for the FW command
3559 * @viid: the VI id
3560 * @idx: index of existing filter for old value of MAC address, or -1
3561 * @addr: the new MAC address value
3562 * @persist: whether a new MAC allocation should be persistent
3563 * @add_smt: if true also add the address to the HW SMT
3564 *
3565 * Modifies an exact-match filter and sets it to the new MAC address.
3566 * Note that in general it is not possible to modify the value of a given
3567 * filter so the generic way to modify an address filter is to free the one
3568 * being used by the old address value and allocate a new filter for the
3569 * new address value. @idx can be -1 if the address is a new addition.
3570 *
3571 * Returns a negative error number or the index of the filter with the new
3572 * MAC value.
3573 */
3574int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3575 int idx, const u8 *addr, bool persist, bool add_smt)
3576{
3577 int ret, mode;
3578 struct fw_vi_mac_cmd c;
3579 struct fw_vi_mac_exact *p = c.u.exact;
d14807dd 3580 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
0a57a536
SR
3581 NUM_MPS_CLS_SRAM_L_INSTANCES :
3582 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
56d36be4
DM
3583
3584 if (idx < 0) /* new allocation */
3585 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3586 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3587
3588 memset(&c, 0, sizeof(c));
e2ac9628
HS
3589 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3590 FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID(viid));
3591 c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
56d36be4
DM
3592 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3593 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3594 FW_VI_MAC_CMD_IDX(idx));
3595 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3596
3597 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3598 if (ret == 0) {
3599 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
0a57a536 3600 if (ret >= max_mac_addr)
56d36be4
DM
3601 ret = -ENOMEM;
3602 }
3603 return ret;
3604}
3605
3606/**
3607 * t4_set_addr_hash - program the MAC inexact-match hash filter
3608 * @adap: the adapter
3609 * @mbox: mailbox to use for the FW command
3610 * @viid: the VI id
3611 * @ucast: whether the hash filter should also match unicast addresses
3612 * @vec: the value to be written to the hash filter
3613 * @sleep_ok: call is allowed to sleep
3614 *
3615 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3616 */
3617int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3618 bool ucast, u64 vec, bool sleep_ok)
3619{
3620 struct fw_vi_mac_cmd c;
3621
3622 memset(&c, 0, sizeof(c));
e2ac9628
HS
3623 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
3624 FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID(viid));
56d36be4
DM
3625 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3626 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
e2ac9628 3627 FW_CMD_LEN16_V(1));
56d36be4
DM
3628 c.u.hash.hashvec = cpu_to_be64(vec);
3629 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3630}
3631
688848b1
AB
3632/**
3633 * t4_enable_vi_params - enable/disable a virtual interface
3634 * @adap: the adapter
3635 * @mbox: mailbox to use for the FW command
3636 * @viid: the VI id
3637 * @rx_en: 1=enable Rx, 0=disable Rx
3638 * @tx_en: 1=enable Tx, 0=disable Tx
3639 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3640 *
3641 * Enables/disables a virtual interface. Note that setting DCB Enable
3642 * only makes sense when enabling a Virtual Interface ...
3643 */
3644int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3645 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3646{
3647 struct fw_vi_enable_cmd c;
3648
3649 memset(&c, 0, sizeof(c));
e2ac9628
HS
3650 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
3651 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID(viid));
688848b1
AB
3652
3653 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3654 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
3655 FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
30f00847 3656 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
688848b1
AB
3657}
3658
56d36be4
DM
3659/**
3660 * t4_enable_vi - enable/disable a virtual interface
3661 * @adap: the adapter
3662 * @mbox: mailbox to use for the FW command
3663 * @viid: the VI id
3664 * @rx_en: 1=enable Rx, 0=disable Rx
3665 * @tx_en: 1=enable Tx, 0=disable Tx
3666 *
3667 * Enables/disables a virtual interface.
3668 */
3669int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3670 bool rx_en, bool tx_en)
3671{
688848b1 3672 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
56d36be4
DM
3673}
3674
3675/**
3676 * t4_identify_port - identify a VI's port by blinking its LED
3677 * @adap: the adapter
3678 * @mbox: mailbox to use for the FW command
3679 * @viid: the VI id
3680 * @nblinks: how many times to blink LED at 2.5 Hz
3681 *
3682 * Identifies a VI's port by blinking its LED.
3683 */
3684int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3685 unsigned int nblinks)
3686{
3687 struct fw_vi_enable_cmd c;
3688
0062b15c 3689 memset(&c, 0, sizeof(c));
e2ac9628
HS
3690 c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
3691 FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID(viid));
56d36be4
DM
3692 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3693 c.blinkdur = htons(nblinks);
3694 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
56d36be4
DM
3695}
3696
3697/**
3698 * t4_iq_free - free an ingress queue and its FLs
3699 * @adap: the adapter
3700 * @mbox: mailbox to use for the FW command
3701 * @pf: the PF owning the queues
3702 * @vf: the VF owning the queues
3703 * @iqtype: the ingress queue type
3704 * @iqid: ingress queue id
3705 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3706 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3707 *
3708 * Frees an ingress queue and its associated FLs, if any.
3709 */
3710int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3711 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3712 unsigned int fl0id, unsigned int fl1id)
3713{
3714 struct fw_iq_cmd c;
3715
3716 memset(&c, 0, sizeof(c));
e2ac9628
HS
3717 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
3718 FW_CMD_EXEC_F | FW_IQ_CMD_PFN(pf) |
56d36be4
DM
3719 FW_IQ_CMD_VFN(vf));
3720 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3721 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3722 c.iqid = htons(iqid);
3723 c.fl0id = htons(fl0id);
3724 c.fl1id = htons(fl1id);
3725 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3726}
3727
3728/**
3729 * t4_eth_eq_free - free an Ethernet egress queue
3730 * @adap: the adapter
3731 * @mbox: mailbox to use for the FW command
3732 * @pf: the PF owning the queue
3733 * @vf: the VF owning the queue
3734 * @eqid: egress queue id
3735 *
3736 * Frees an Ethernet egress queue.
3737 */
3738int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3739 unsigned int vf, unsigned int eqid)
3740{
3741 struct fw_eq_eth_cmd c;
3742
3743 memset(&c, 0, sizeof(c));
e2ac9628
HS
3744 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
3745 FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN(pf) |
56d36be4
DM
3746 FW_EQ_ETH_CMD_VFN(vf));
3747 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3748 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3749 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3750}
3751
3752/**
3753 * t4_ctrl_eq_free - free a control egress queue
3754 * @adap: the adapter
3755 * @mbox: mailbox to use for the FW command
3756 * @pf: the PF owning the queue
3757 * @vf: the VF owning the queue
3758 * @eqid: egress queue id
3759 *
3760 * Frees a control egress queue.
3761 */
3762int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3763 unsigned int vf, unsigned int eqid)
3764{
3765 struct fw_eq_ctrl_cmd c;
3766
3767 memset(&c, 0, sizeof(c));
e2ac9628
HS
3768 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
3769 FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN(pf) |
56d36be4
DM
3770 FW_EQ_CTRL_CMD_VFN(vf));
3771 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3772 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3773 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3774}
3775
3776/**
3777 * t4_ofld_eq_free - free an offload egress queue
3778 * @adap: the adapter
3779 * @mbox: mailbox to use for the FW command
3780 * @pf: the PF owning the queue
3781 * @vf: the VF owning the queue
3782 * @eqid: egress queue id
3783 *
3784 * Frees a control egress queue.
3785 */
3786int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3787 unsigned int vf, unsigned int eqid)
3788{
3789 struct fw_eq_ofld_cmd c;
3790
3791 memset(&c, 0, sizeof(c));
e2ac9628
HS
3792 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
3793 FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN(pf) |
56d36be4
DM
3794 FW_EQ_OFLD_CMD_VFN(vf));
3795 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3796 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3797 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3798}
3799
3800/**
3801 * t4_handle_fw_rpl - process a FW reply message
3802 * @adap: the adapter
3803 * @rpl: start of the FW message
3804 *
3805 * Processes a FW message, such as link state change messages.
3806 */
3807int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3808{
3809 u8 opcode = *(const u8 *)rpl;
3810
3811 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3812 int speed = 0, fc = 0;
3813 const struct fw_port_cmd *p = (void *)rpl;
3814 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3815 int port = adap->chan_map[chan];
3816 struct port_info *pi = adap2pinfo(adap, port);
3817 struct link_config *lc = &pi->link_cfg;
3818 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3819 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3820 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3821
3822 if (stat & FW_PORT_CMD_RXPAUSE)
3823 fc |= PAUSE_RX;
3824 if (stat & FW_PORT_CMD_TXPAUSE)
3825 fc |= PAUSE_TX;
3826 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
e8b39015 3827 speed = 100;
56d36be4 3828 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
e8b39015 3829 speed = 1000;
56d36be4 3830 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
e8b39015 3831 speed = 10000;
72aca4bf 3832 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
e8b39015 3833 speed = 40000;
56d36be4
DM
3834
3835 if (link_ok != lc->link_ok || speed != lc->speed ||
3836 fc != lc->fc) { /* something changed */
3837 lc->link_ok = link_ok;
3838 lc->speed = speed;
3839 lc->fc = fc;
444018a7 3840 lc->supported = be16_to_cpu(p->u.info.pcap);
56d36be4
DM
3841 t4_os_link_changed(adap, port, link_ok);
3842 }
3843 if (mod != pi->mod_type) {
3844 pi->mod_type = mod;
3845 t4_os_portmod_changed(adap, port);
3846 }
3847 }
3848 return 0;
3849}
3850
1dd06ae8 3851static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
56d36be4
DM
3852{
3853 u16 val;
56d36be4 3854
e5c8ae5f
JL
3855 if (pci_is_pcie(adapter->pdev)) {
3856 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
56d36be4
DM
3857 p->speed = val & PCI_EXP_LNKSTA_CLS;
3858 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3859 }
3860}
3861
3862/**
3863 * init_link_config - initialize a link's SW state
3864 * @lc: structure holding the link state
3865 * @caps: link capabilities
3866 *
3867 * Initializes the SW state maintained for each link, including the link's
3868 * capabilities and default speed/flow-control/autonegotiation settings.
3869 */
1dd06ae8 3870static void init_link_config(struct link_config *lc, unsigned int caps)
56d36be4
DM
3871{
3872 lc->supported = caps;
3873 lc->requested_speed = 0;
3874 lc->speed = 0;
3875 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3876 if (lc->supported & FW_PORT_CAP_ANEG) {
3877 lc->advertising = lc->supported & ADVERT_MASK;
3878 lc->autoneg = AUTONEG_ENABLE;
3879 lc->requested_fc |= PAUSE_AUTONEG;
3880 } else {
3881 lc->advertising = 0;
3882 lc->autoneg = AUTONEG_DISABLE;
3883 }
3884}
3885
8203b509
HS
3886#define CIM_PF_NOACCESS 0xeeeeeeee
3887
3888int t4_wait_dev_ready(void __iomem *regs)
56d36be4 3889{
8203b509
HS
3890 u32 whoami;
3891
3892 whoami = readl(regs + PL_WHOAMI);
3893 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
56d36be4 3894 return 0;
8203b509 3895
56d36be4 3896 msleep(500);
8203b509
HS
3897 whoami = readl(regs + PL_WHOAMI);
3898 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
56d36be4
DM
3899}
3900
fe2ee139
HS
3901struct flash_desc {
3902 u32 vendor_and_model_id;
3903 u32 size_mb;
3904};
3905
91744948 3906static int get_flash_params(struct adapter *adap)
900a6596 3907{
fe2ee139
HS
3908 /* Table for non-Numonix supported flash parts. Numonix parts are left
3909 * to the preexisting code. All flash parts have 64KB sectors.
3910 */
3911 static struct flash_desc supported_flash[] = {
3912 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
3913 };
3914
900a6596
DM
3915 int ret;
3916 u32 info;
3917
3918 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3919 if (!ret)
3920 ret = sf1_read(adap, 3, 0, 1, &info);
3921 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3922 if (ret)
3923 return ret;
3924
fe2ee139
HS
3925 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
3926 if (supported_flash[ret].vendor_and_model_id == info) {
3927 adap->params.sf_size = supported_flash[ret].size_mb;
3928 adap->params.sf_nsec =
3929 adap->params.sf_size / SF_SEC_SIZE;
3930 return 0;
3931 }
3932
900a6596
DM
3933 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3934 return -EINVAL;
3935 info >>= 16; /* log2 of size */
3936 if (info >= 0x14 && info < 0x18)
3937 adap->params.sf_nsec = 1 << (info - 16);
3938 else if (info == 0x18)
3939 adap->params.sf_nsec = 64;
3940 else
3941 return -EINVAL;
3942 adap->params.sf_size = 1 << info;
3943 adap->params.sf_fw_start =
3944 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
c290607e
HS
3945
3946 if (adap->params.sf_size < FLASH_MIN_SIZE)
3947 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
3948 adap->params.sf_size, FLASH_MIN_SIZE);
900a6596
DM
3949 return 0;
3950}
3951
56d36be4
DM
3952/**
3953 * t4_prep_adapter - prepare SW and HW for operation
3954 * @adapter: the adapter
3955 * @reset: if true perform a HW reset
3956 *
3957 * Initialize adapter SW state for the various HW modules, set initial
3958 * values for some adapter tunables, take PHYs out of reset, and
3959 * initialize the MDIO interface.
3960 */
91744948 3961int t4_prep_adapter(struct adapter *adapter)
56d36be4 3962{
0a57a536
SR
3963 int ret, ver;
3964 uint16_t device_id;
d14807dd 3965 u32 pl_rev;
56d36be4 3966
56d36be4 3967 get_pci_mode(adapter, &adapter->params.pci);
d14807dd 3968 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
56d36be4 3969
900a6596
DM
3970 ret = get_flash_params(adapter);
3971 if (ret < 0) {
3972 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3973 return ret;
3974 }
3975
0a57a536
SR
3976 /* Retrieve adapter's device ID
3977 */
3978 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3979 ver = device_id >> 12;
d14807dd 3980 adapter->params.chip = 0;
0a57a536
SR
3981 switch (ver) {
3982 case CHELSIO_T4:
d14807dd 3983 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
0a57a536
SR
3984 break;
3985 case CHELSIO_T5:
d14807dd 3986 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
0a57a536
SR
3987 break;
3988 default:
3989 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3990 device_id);
3991 return -EINVAL;
3992 }
3993
56d36be4
DM
3994 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3995
3996 /*
3997 * Default port for debugging in case we can't reach FW.
3998 */
3999 adapter->params.nports = 1;
4000 adapter->params.portvec = 1;
636f9d37 4001 adapter->params.vpd.cclk = 50000;
56d36be4
DM
4002 return 0;
4003}
4004
dcf7b6f5
KS
4005/**
4006 * t4_init_tp_params - initialize adap->params.tp
4007 * @adap: the adapter
4008 *
4009 * Initialize various fields of the adapter's TP Parameters structure.
4010 */
4011int t4_init_tp_params(struct adapter *adap)
4012{
4013 int chan;
4014 u32 v;
4015
4016 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
4017 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
4018 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
4019
4020 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4021 for (chan = 0; chan < NCHAN; chan++)
4022 adap->params.tp.tx_modq[chan] = chan;
4023
4024 /* Cache the adapter's Compressed Filter Mode and global Incress
4025 * Configuration.
4026 */
4027 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4028 &adap->params.tp.vlan_pri_map, 1,
4029 TP_VLAN_PRI_MAP);
4030 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4031 &adap->params.tp.ingress_config, 1,
4032 TP_INGRESS_CONFIG);
4033
4034 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
4035 * shift positions of several elements of the Compressed Filter Tuple
4036 * for this adapter which we need frequently ...
4037 */
4038 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
4039 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
4040 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
4041 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
4042 F_PROTOCOL);
4043
4044 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4045 * represents the presense of an Outer VLAN instead of a VNIC ID.
4046 */
4047 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
4048 adap->params.tp.vnic_shift = -1;
4049
4050 return 0;
4051}
4052
4053/**
4054 * t4_filter_field_shift - calculate filter field shift
4055 * @adap: the adapter
4056 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
4057 *
4058 * Return the shift position of a filter field within the Compressed
4059 * Filter Tuple. The filter field is specified via its selection bit
4060 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
4061 */
4062int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
4063{
4064 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
4065 unsigned int sel;
4066 int field_shift;
4067
4068 if ((filter_mode & filter_sel) == 0)
4069 return -1;
4070
4071 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4072 switch (filter_mode & sel) {
4073 case F_FCOE:
4074 field_shift += W_FT_FCOE;
4075 break;
4076 case F_PORT:
4077 field_shift += W_FT_PORT;
4078 break;
4079 case F_VNIC_ID:
4080 field_shift += W_FT_VNIC_ID;
4081 break;
4082 case F_VLAN:
4083 field_shift += W_FT_VLAN;
4084 break;
4085 case F_TOS:
4086 field_shift += W_FT_TOS;
4087 break;
4088 case F_PROTOCOL:
4089 field_shift += W_FT_PROTOCOL;
4090 break;
4091 case F_ETHERTYPE:
4092 field_shift += W_FT_ETHERTYPE;
4093 break;
4094 case F_MACMATCH:
4095 field_shift += W_FT_MACMATCH;
4096 break;
4097 case F_MPSHITTYPE:
4098 field_shift += W_FT_MPSHITTYPE;
4099 break;
4100 case F_FRAGMENTATION:
4101 field_shift += W_FT_FRAGMENTATION;
4102 break;
4103 }
4104 }
4105 return field_shift;
4106}
4107
91744948 4108int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
56d36be4
DM
4109{
4110 u8 addr[6];
4111 int ret, i, j = 0;
4112 struct fw_port_cmd c;
f796564a 4113 struct fw_rss_vi_config_cmd rvc;
56d36be4
DM
4114
4115 memset(&c, 0, sizeof(c));
f796564a 4116 memset(&rvc, 0, sizeof(rvc));
56d36be4
DM
4117
4118 for_each_port(adap, i) {
4119 unsigned int rss_size;
4120 struct port_info *p = adap2pinfo(adap, i);
4121
4122 while ((adap->params.portvec & (1 << j)) == 0)
4123 j++;
4124
e2ac9628
HS
4125 c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
4126 FW_CMD_REQUEST_F | FW_CMD_READ_F |
56d36be4
DM
4127 FW_PORT_CMD_PORTID(j));
4128 c.action_to_len16 = htonl(
4129 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4130 FW_LEN16(c));
4131 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4132 if (ret)
4133 return ret;
4134
4135 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4136 if (ret < 0)
4137 return ret;
4138
4139 p->viid = ret;
4140 p->tx_chan = j;
4141 p->lport = j;
4142 p->rss_size = rss_size;
4143 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
40c9f8ab 4144 adap->port[i]->dev_port = j;
56d36be4
DM
4145
4146 ret = ntohl(c.u.info.lstatus_to_modtype);
4147 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
4148 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
4149 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
a0881cab 4150 p->mod_type = FW_PORT_MOD_TYPE_NA;
56d36be4 4151
e2ac9628
HS
4152 rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
4153 FW_CMD_REQUEST_F | FW_CMD_READ_F |
f796564a
DM
4154 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4155 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4156 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4157 if (ret)
4158 return ret;
4159 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4160
56d36be4
DM
4161 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4162 j++;
4163 }
4164 return 0;
4165}