cxgb4: propagate link initialization errors to .ndo_open's callers
[linux-2.6-block.git] / drivers / net / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
de498c89
RD
56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
58{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
de498c89
RD
112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
de498c89 123#if 0
56d36be4
DM
124/**
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
132 *
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
135 */
de498c89
RD
136static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
139{
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
143 }
144}
de498c89 145#endif
56d36be4
DM
146
147/*
148 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
149 */
150static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
151 u32 mbox_addr)
152{
153 for ( ; nflit; nflit--, mbox_addr += 8)
154 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
155}
156
157/*
158 * Handle a FW assertion reported in a mailbox.
159 */
160static void fw_asrt(struct adapter *adap, u32 mbox_addr)
161{
162 struct fw_debug_cmd asrt;
163
164 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
165 dev_alert(adap->pdev_dev,
166 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
167 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
168 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
169}
170
171static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
172{
173 dev_err(adap->pdev_dev,
174 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
175 (unsigned long long)t4_read_reg64(adap, data_reg),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
181 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
182 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
183}
184
185/**
186 * t4_wr_mbox_meat - send a command to FW through the given mailbox
187 * @adap: the adapter
188 * @mbox: index of the mailbox to use
189 * @cmd: the command to write
190 * @size: command length in bytes
191 * @rpl: where to optionally store the reply
192 * @sleep_ok: if true we may sleep while awaiting command completion
193 *
194 * Sends the given command to FW through the selected mailbox and waits
195 * for the FW to execute the command. If @rpl is not %NULL it is used to
196 * store the FW's reply to the command. The command and its optional
197 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
198 * to respond. @sleep_ok determines whether we may sleep while awaiting
199 * the response. If sleeping is allowed we use progressive backoff
200 * otherwise we spin.
201 *
202 * The return value is 0 on success or a negative errno on failure. A
203 * failure can happen either because we are not able to execute the
204 * command or FW executes it but signals an error. In the latter case
205 * the return value is the error code indicated by FW (negated).
206 */
207int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
208 void *rpl, bool sleep_ok)
209{
210 static int delay[] = {
211 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
212 };
213
214 u32 v;
215 u64 res;
216 int i, ms, delay_idx;
217 const __be64 *p = cmd;
218 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
219 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
220
221 if ((size & 15) || size > MBOX_LEN)
222 return -EINVAL;
223
204dc3c0
DM
224 /*
225 * If the device is off-line, as in EEH, commands will time out.
226 * Fail them early so we don't waste time waiting.
227 */
228 if (adap->pdev->error_state != pci_channel_io_normal)
229 return -EIO;
230
56d36be4
DM
231 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
232 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
233 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
234
235 if (v != MBOX_OWNER_DRV)
236 return v ? -EBUSY : -ETIMEDOUT;
237
238 for (i = 0; i < size; i += 8)
239 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
240
241 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
242 t4_read_reg(adap, ctl_reg); /* flush write */
243
244 delay_idx = 0;
245 ms = delay[0];
246
247 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
248 if (sleep_ok) {
249 ms = delay[delay_idx]; /* last element may repeat */
250 if (delay_idx < ARRAY_SIZE(delay) - 1)
251 delay_idx++;
252 msleep(ms);
253 } else
254 mdelay(ms);
255
256 v = t4_read_reg(adap, ctl_reg);
257 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
258 if (!(v & MBMSGVALID)) {
259 t4_write_reg(adap, ctl_reg, 0);
260 continue;
261 }
262
263 res = t4_read_reg64(adap, data_reg);
264 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
265 fw_asrt(adap, data_reg);
266 res = FW_CMD_RETVAL(EIO);
267 } else if (rpl)
268 get_mbox_rpl(adap, rpl, size / 8, data_reg);
269
270 if (FW_CMD_RETVAL_GET((int)res))
271 dump_mbox(adap, mbox, data_reg);
272 t4_write_reg(adap, ctl_reg, 0);
273 return -FW_CMD_RETVAL_GET((int)res);
274 }
275 }
276
277 dump_mbox(adap, mbox, data_reg);
278 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
279 *(const u8 *)cmd, mbox);
280 return -ETIMEDOUT;
281}
282
283/**
284 * t4_mc_read - read from MC through backdoor accesses
285 * @adap: the adapter
286 * @addr: address of first byte requested
287 * @data: 64 bytes of data containing the requested address
288 * @ecc: where to store the corresponding 64-bit ECC word
289 *
290 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
291 * that covers the requested address @addr. If @parity is not %NULL it
292 * is assigned the 64-bit ECC word for the read data.
293 */
294int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
295{
296 int i;
297
298 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
299 return -EBUSY;
300 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
301 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
302 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
303 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
304 BIST_CMD_GAP(1));
305 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
306 if (i)
307 return i;
308
309#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
310
311 for (i = 15; i >= 0; i--)
312 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
313 if (ecc)
314 *ecc = t4_read_reg64(adap, MC_DATA(16));
315#undef MC_DATA
316 return 0;
317}
318
319/**
320 * t4_edc_read - read from EDC through backdoor accesses
321 * @adap: the adapter
322 * @idx: which EDC to access
323 * @addr: address of first byte requested
324 * @data: 64 bytes of data containing the requested address
325 * @ecc: where to store the corresponding 64-bit ECC word
326 *
327 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
328 * that covers the requested address @addr. If @parity is not %NULL it
329 * is assigned the 64-bit ECC word for the read data.
330 */
331int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
332{
333 int i;
334
335 idx *= EDC_STRIDE;
336 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
337 return -EBUSY;
338 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
339 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
340 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
341 t4_write_reg(adap, EDC_BIST_CMD + idx,
342 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
343 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
344 if (i)
345 return i;
346
347#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
348
349 for (i = 15; i >= 0; i--)
350 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
351 if (ecc)
352 *ecc = t4_read_reg64(adap, EDC_DATA(16));
353#undef EDC_DATA
354 return 0;
355}
356
56d36be4
DM
357/*
358 * Partial EEPROM Vital Product Data structure. Includes only the ID and
226ec5fd 359 * VPD-R header.
56d36be4 360 */
226ec5fd 361struct t4_vpd_hdr {
56d36be4
DM
362 u8 id_tag;
363 u8 id_len[2];
364 u8 id_data[ID_LEN];
365 u8 vpdr_tag;
366 u8 vpdr_len[2];
56d36be4
DM
367};
368
369#define EEPROM_STAT_ADDR 0x7bfc
370#define VPD_BASE 0
226ec5fd 371#define VPD_LEN 512
56d36be4
DM
372
373/**
374 * t4_seeprom_wp - enable/disable EEPROM write protection
375 * @adapter: the adapter
376 * @enable: whether to enable or disable write protection
377 *
378 * Enables or disables write protection on the serial EEPROM.
379 */
380int t4_seeprom_wp(struct adapter *adapter, bool enable)
381{
382 unsigned int v = enable ? 0xc : 0;
383 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
384 return ret < 0 ? ret : 0;
385}
386
387/**
388 * get_vpd_params - read VPD parameters from VPD EEPROM
389 * @adapter: adapter to read
390 * @p: where to store the parameters
391 *
392 * Reads card parameters stored in VPD EEPROM.
393 */
394static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
395{
226ec5fd
DM
396 int i, ret;
397 int ec, sn, v2;
398 u8 vpd[VPD_LEN], csum;
399 unsigned int vpdr_len;
400 const struct t4_vpd_hdr *v;
56d36be4 401
226ec5fd 402 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
56d36be4
DM
403 if (ret < 0)
404 return ret;
405
226ec5fd
DM
406 v = (const struct t4_vpd_hdr *)vpd;
407 vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
408 if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
409 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
410 return -EINVAL;
411 }
412
413#define FIND_VPD_KW(var, name) do { \
414 var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
415 vpdr_len, name); \
416 if (var < 0) { \
417 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
418 return -EINVAL; \
419 } \
420 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
421} while (0)
422
423 FIND_VPD_KW(i, "RV");
424 for (csum = 0; i >= 0; i--)
425 csum += vpd[i];
56d36be4
DM
426
427 if (csum) {
428 dev_err(adapter->pdev_dev,
429 "corrupted VPD EEPROM, actual csum %u\n", csum);
430 return -EINVAL;
431 }
432
226ec5fd
DM
433 FIND_VPD_KW(ec, "EC");
434 FIND_VPD_KW(sn, "SN");
435 FIND_VPD_KW(v2, "V2");
436#undef FIND_VPD_KW
437
438 p->cclk = simple_strtoul(vpd + v2, NULL, 10);
439 memcpy(p->id, v->id_data, ID_LEN);
56d36be4 440 strim(p->id);
226ec5fd 441 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 442 strim(p->ec);
226ec5fd
DM
443 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
444 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4
DM
445 strim(p->sn);
446 return 0;
447}
448
449/* serial flash and firmware constants */
450enum {
451 SF_ATTEMPTS = 10, /* max retries for SF operations */
452
453 /* flash command opcodes */
454 SF_PROG_PAGE = 2, /* program page */
455 SF_WR_DISABLE = 4, /* disable writes */
456 SF_RD_STATUS = 5, /* read status register */
457 SF_WR_ENABLE = 6, /* enable writes */
458 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 459 SF_RD_ID = 0x9f, /* read ID */
56d36be4
DM
460 SF_ERASE_SECTOR = 0xd8, /* erase sector */
461
900a6596 462 FW_MAX_SIZE = 512 * 1024,
56d36be4
DM
463};
464
465/**
466 * sf1_read - read data from the serial flash
467 * @adapter: the adapter
468 * @byte_cnt: number of bytes to read
469 * @cont: whether another operation will be chained
470 * @lock: whether to lock SF for PL access only
471 * @valp: where to store the read data
472 *
473 * Reads up to 4 bytes of data from the serial flash. The location of
474 * the read needs to be specified prior to calling this by issuing the
475 * appropriate commands to the serial flash.
476 */
477static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
478 int lock, u32 *valp)
479{
480 int ret;
481
482 if (!byte_cnt || byte_cnt > 4)
483 return -EINVAL;
484 if (t4_read_reg(adapter, SF_OP) & BUSY)
485 return -EBUSY;
486 cont = cont ? SF_CONT : 0;
487 lock = lock ? SF_LOCK : 0;
488 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
489 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
490 if (!ret)
491 *valp = t4_read_reg(adapter, SF_DATA);
492 return ret;
493}
494
495/**
496 * sf1_write - write data to the serial flash
497 * @adapter: the adapter
498 * @byte_cnt: number of bytes to write
499 * @cont: whether another operation will be chained
500 * @lock: whether to lock SF for PL access only
501 * @val: value to write
502 *
503 * Writes up to 4 bytes of data to the serial flash. The location of
504 * the write needs to be specified prior to calling this by issuing the
505 * appropriate commands to the serial flash.
506 */
507static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
508 int lock, u32 val)
509{
510 if (!byte_cnt || byte_cnt > 4)
511 return -EINVAL;
512 if (t4_read_reg(adapter, SF_OP) & BUSY)
513 return -EBUSY;
514 cont = cont ? SF_CONT : 0;
515 lock = lock ? SF_LOCK : 0;
516 t4_write_reg(adapter, SF_DATA, val);
517 t4_write_reg(adapter, SF_OP, lock |
518 cont | BYTECNT(byte_cnt - 1) | OP_WR);
519 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
520}
521
522/**
523 * flash_wait_op - wait for a flash operation to complete
524 * @adapter: the adapter
525 * @attempts: max number of polls of the status register
526 * @delay: delay between polls in ms
527 *
528 * Wait for a flash operation to complete by polling the status register.
529 */
530static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
531{
532 int ret;
533 u32 status;
534
535 while (1) {
536 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
537 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
538 return ret;
539 if (!(status & 1))
540 return 0;
541 if (--attempts == 0)
542 return -EAGAIN;
543 if (delay)
544 msleep(delay);
545 }
546}
547
548/**
549 * t4_read_flash - read words from serial flash
550 * @adapter: the adapter
551 * @addr: the start address for the read
552 * @nwords: how many 32-bit words to read
553 * @data: where to store the read data
554 * @byte_oriented: whether to store data as bytes or as words
555 *
556 * Read the specified number of 32-bit words from the serial flash.
557 * If @byte_oriented is set the read data is stored as a byte array
558 * (i.e., big-endian), otherwise as 32-bit words in the platform's
559 * natural endianess.
560 */
de498c89
RD
561static int t4_read_flash(struct adapter *adapter, unsigned int addr,
562 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
563{
564 int ret;
565
900a6596 566 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
567 return -EINVAL;
568
569 addr = swab32(addr) | SF_RD_DATA_FAST;
570
571 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
572 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
573 return ret;
574
575 for ( ; nwords; nwords--, data++) {
576 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
577 if (nwords == 1)
578 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
579 if (ret)
580 return ret;
581 if (byte_oriented)
582 *data = htonl(*data);
583 }
584 return 0;
585}
586
587/**
588 * t4_write_flash - write up to a page of data to the serial flash
589 * @adapter: the adapter
590 * @addr: the start address to write
591 * @n: length of data to write in bytes
592 * @data: the data to write
593 *
594 * Writes up to a page of data (256 bytes) to the serial flash starting
595 * at the given address. All the data must be written to the same page.
596 */
597static int t4_write_flash(struct adapter *adapter, unsigned int addr,
598 unsigned int n, const u8 *data)
599{
600 int ret;
601 u32 buf[64];
602 unsigned int i, c, left, val, offset = addr & 0xff;
603
900a6596 604 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
605 return -EINVAL;
606
607 val = swab32(addr) | SF_PROG_PAGE;
608
609 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
610 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
611 goto unlock;
612
613 for (left = n; left; left -= c) {
614 c = min(left, 4U);
615 for (val = 0, i = 0; i < c; ++i)
616 val = (val << 8) + *data++;
617
618 ret = sf1_write(adapter, c, c != left, 1, val);
619 if (ret)
620 goto unlock;
621 }
900a6596 622 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
623 if (ret)
624 goto unlock;
625
626 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
627
628 /* Read the page to verify the write succeeded */
629 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
630 if (ret)
631 return ret;
632
633 if (memcmp(data - n, (u8 *)buf + offset, n)) {
634 dev_err(adapter->pdev_dev,
635 "failed to correctly write the flash page at %#x\n",
636 addr);
637 return -EIO;
638 }
639 return 0;
640
641unlock:
642 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
643 return ret;
644}
645
646/**
647 * get_fw_version - read the firmware version
648 * @adapter: the adapter
649 * @vers: where to place the version
650 *
651 * Reads the FW version from flash.
652 */
653static int get_fw_version(struct adapter *adapter, u32 *vers)
654{
900a6596
DM
655 return t4_read_flash(adapter, adapter->params.sf_fw_start +
656 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
56d36be4
DM
657}
658
659/**
660 * get_tp_version - read the TP microcode version
661 * @adapter: the adapter
662 * @vers: where to place the version
663 *
664 * Reads the TP microcode version from flash.
665 */
666static int get_tp_version(struct adapter *adapter, u32 *vers)
667{
900a6596
DM
668 return t4_read_flash(adapter, adapter->params.sf_fw_start +
669 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
670 1, vers, 0);
671}
672
673/**
674 * t4_check_fw_version - check if the FW is compatible with this driver
675 * @adapter: the adapter
676 *
677 * Checks if an adapter's FW is compatible with the driver. Returns 0
678 * if there's exact match, a negative error if the version could not be
679 * read or there's a major version mismatch, and a positive value if the
680 * expected major version is found but there's a minor version mismatch.
681 */
682int t4_check_fw_version(struct adapter *adapter)
683{
684 u32 api_vers[2];
685 int ret, major, minor, micro;
686
687 ret = get_fw_version(adapter, &adapter->params.fw_vers);
688 if (!ret)
689 ret = get_tp_version(adapter, &adapter->params.tp_vers);
690 if (!ret)
900a6596
DM
691 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
692 offsetof(struct fw_hdr, intfver_nic),
693 2, api_vers, 1);
56d36be4
DM
694 if (ret)
695 return ret;
696
697 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
698 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
699 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
700 memcpy(adapter->params.api_vers, api_vers,
701 sizeof(adapter->params.api_vers));
702
703 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
704 dev_err(adapter->pdev_dev,
705 "card FW has major version %u, driver wants %u\n",
706 major, FW_VERSION_MAJOR);
707 return -EINVAL;
708 }
709
710 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
711 return 0; /* perfect match */
712
713 /* Minor/micro version mismatch. Report it but often it's OK. */
714 return 1;
715}
716
717/**
718 * t4_flash_erase_sectors - erase a range of flash sectors
719 * @adapter: the adapter
720 * @start: the first sector to erase
721 * @end: the last sector to erase
722 *
723 * Erases the sectors in the given inclusive range.
724 */
725static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
726{
727 int ret = 0;
728
729 while (start <= end) {
730 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
731 (ret = sf1_write(adapter, 4, 0, 1,
732 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 733 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
734 dev_err(adapter->pdev_dev,
735 "erase of flash sector %d failed, error %d\n",
736 start, ret);
737 break;
738 }
739 start++;
740 }
741 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
742 return ret;
743}
744
745/**
746 * t4_load_fw - download firmware
747 * @adap: the adapter
748 * @fw_data: the firmware image to write
749 * @size: image size
750 *
751 * Write the supplied firmware image to the card's serial flash.
752 */
753int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
754{
755 u32 csum;
756 int ret, addr;
757 unsigned int i;
758 u8 first_page[SF_PAGE_SIZE];
759 const u32 *p = (const u32 *)fw_data;
760 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596
DM
761 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
762 unsigned int fw_img_start = adap->params.sf_fw_start;
763 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
56d36be4
DM
764
765 if (!size) {
766 dev_err(adap->pdev_dev, "FW image has no data\n");
767 return -EINVAL;
768 }
769 if (size & 511) {
770 dev_err(adap->pdev_dev,
771 "FW image size not multiple of 512 bytes\n");
772 return -EINVAL;
773 }
774 if (ntohs(hdr->len512) * 512 != size) {
775 dev_err(adap->pdev_dev,
776 "FW image size differs from size in FW header\n");
777 return -EINVAL;
778 }
779 if (size > FW_MAX_SIZE) {
780 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
781 FW_MAX_SIZE);
782 return -EFBIG;
783 }
784
785 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
786 csum += ntohl(p[i]);
787
788 if (csum != 0xffffffff) {
789 dev_err(adap->pdev_dev,
790 "corrupted firmware image, checksum %#x\n", csum);
791 return -EINVAL;
792 }
793
900a6596
DM
794 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
795 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
796 if (ret)
797 goto out;
798
799 /*
800 * We write the correct version at the end so the driver can see a bad
801 * version if the FW write fails. Start by writing a copy of the
802 * first page with a bad version.
803 */
804 memcpy(first_page, fw_data, SF_PAGE_SIZE);
805 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
900a6596 806 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
807 if (ret)
808 goto out;
809
900a6596 810 addr = fw_img_start;
56d36be4
DM
811 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
812 addr += SF_PAGE_SIZE;
813 fw_data += SF_PAGE_SIZE;
814 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
815 if (ret)
816 goto out;
817 }
818
819 ret = t4_write_flash(adap,
900a6596 820 fw_img_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
821 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
822out:
823 if (ret)
824 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
825 ret);
826 return ret;
827}
828
829#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
830 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
831
832/**
833 * t4_link_start - apply link configuration to MAC/PHY
834 * @phy: the PHY to setup
835 * @mac: the MAC to setup
836 * @lc: the requested link configuration
837 *
838 * Set up a port's MAC and PHY according to a desired link configuration.
839 * - If the PHY can auto-negotiate first decide what to advertise, then
840 * enable/disable auto-negotiation as desired, and reset.
841 * - If the PHY does not auto-negotiate just reset it.
842 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
843 * otherwise do it later based on the outcome of auto-negotiation.
844 */
845int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
846 struct link_config *lc)
847{
848 struct fw_port_cmd c;
849 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
850
851 lc->link_ok = 0;
852 if (lc->requested_fc & PAUSE_RX)
853 fc |= FW_PORT_CAP_FC_RX;
854 if (lc->requested_fc & PAUSE_TX)
855 fc |= FW_PORT_CAP_FC_TX;
856
857 memset(&c, 0, sizeof(c));
858 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
859 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
860 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
861 FW_LEN16(c));
862
863 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
864 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
865 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
866 } else if (lc->autoneg == AUTONEG_DISABLE) {
867 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
868 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
869 } else
870 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
871
872 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
873}
874
875/**
876 * t4_restart_aneg - restart autonegotiation
877 * @adap: the adapter
878 * @mbox: mbox to use for the FW command
879 * @port: the port id
880 *
881 * Restarts autonegotiation for the selected port.
882 */
883int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
884{
885 struct fw_port_cmd c;
886
887 memset(&c, 0, sizeof(c));
888 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
889 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
890 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
891 FW_LEN16(c));
892 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
893 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
894}
895
56d36be4
DM
896struct intr_info {
897 unsigned int mask; /* bits to check in interrupt status */
898 const char *msg; /* message to print or NULL */
899 short stat_idx; /* stat counter to increment or -1 */
900 unsigned short fatal; /* whether the condition reported is fatal */
901};
902
903/**
904 * t4_handle_intr_status - table driven interrupt handler
905 * @adapter: the adapter that generated the interrupt
906 * @reg: the interrupt status register to process
907 * @acts: table of interrupt actions
908 *
909 * A table driven interrupt handler that applies a set of masks to an
910 * interrupt status word and performs the corresponding actions if the
911 * interrupts described by the mask have occured. The actions include
912 * optionally emitting a warning or alert message. The table is terminated
913 * by an entry specifying mask 0. Returns the number of fatal interrupt
914 * conditions.
915 */
916static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
917 const struct intr_info *acts)
918{
919 int fatal = 0;
920 unsigned int mask = 0;
921 unsigned int status = t4_read_reg(adapter, reg);
922
923 for ( ; acts->mask; ++acts) {
924 if (!(status & acts->mask))
925 continue;
926 if (acts->fatal) {
927 fatal++;
928 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
929 status & acts->mask);
930 } else if (acts->msg && printk_ratelimit())
931 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
932 status & acts->mask);
933 mask |= acts->mask;
934 }
935 status &= mask;
936 if (status) /* clear processed interrupts */
937 t4_write_reg(adapter, reg, status);
938 return fatal;
939}
940
941/*
942 * Interrupt handler for the PCIE module.
943 */
944static void pcie_intr_handler(struct adapter *adapter)
945{
946 static struct intr_info sysbus_intr_info[] = {
947 { RNPP, "RXNP array parity error", -1, 1 },
948 { RPCP, "RXPC array parity error", -1, 1 },
949 { RCIP, "RXCIF array parity error", -1, 1 },
950 { RCCP, "Rx completions control array parity error", -1, 1 },
951 { RFTP, "RXFT array parity error", -1, 1 },
952 { 0 }
953 };
954 static struct intr_info pcie_port_intr_info[] = {
955 { TPCP, "TXPC array parity error", -1, 1 },
956 { TNPP, "TXNP array parity error", -1, 1 },
957 { TFTP, "TXFT array parity error", -1, 1 },
958 { TCAP, "TXCA array parity error", -1, 1 },
959 { TCIP, "TXCIF array parity error", -1, 1 },
960 { RCAP, "RXCA array parity error", -1, 1 },
961 { OTDD, "outbound request TLP discarded", -1, 1 },
962 { RDPE, "Rx data parity error", -1, 1 },
963 { TDUE, "Tx uncorrectable data error", -1, 1 },
964 { 0 }
965 };
966 static struct intr_info pcie_intr_info[] = {
967 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
968 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
969 { MSIDATAPERR, "MSI data parity error", -1, 1 },
970 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
971 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
972 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
973 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
974 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
975 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
976 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
977 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
978 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
979 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
980 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
981 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
982 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
983 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
984 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
985 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
986 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
987 { FIDPERR, "PCI FID parity error", -1, 1 },
988 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
989 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
990 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
991 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
992 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
993 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
994 { PCIESINT, "PCI core secondary fault", -1, 1 },
995 { PCIEPINT, "PCI core primary fault", -1, 1 },
996 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
997 { 0 }
998 };
999
1000 int fat;
1001
1002 fat = t4_handle_intr_status(adapter,
1003 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1004 sysbus_intr_info) +
1005 t4_handle_intr_status(adapter,
1006 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1007 pcie_port_intr_info) +
1008 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1009 if (fat)
1010 t4_fatal_err(adapter);
1011}
1012
1013/*
1014 * TP interrupt handler.
1015 */
1016static void tp_intr_handler(struct adapter *adapter)
1017{
1018 static struct intr_info tp_intr_info[] = {
1019 { 0x3fffffff, "TP parity error", -1, 1 },
1020 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1021 { 0 }
1022 };
1023
1024 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1025 t4_fatal_err(adapter);
1026}
1027
1028/*
1029 * SGE interrupt handler.
1030 */
1031static void sge_intr_handler(struct adapter *adapter)
1032{
1033 u64 v;
1034
1035 static struct intr_info sge_intr_info[] = {
1036 { ERR_CPL_EXCEED_IQE_SIZE,
1037 "SGE received CPL exceeding IQE size", -1, 1 },
1038 { ERR_INVALID_CIDX_INC,
1039 "SGE GTS CIDX increment too large", -1, 0 },
1040 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1041 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1042 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1043 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1044 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1045 0 },
1046 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1047 0 },
1048 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1049 0 },
1050 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1051 0 },
1052 { ERR_ING_CTXT_PRIO,
1053 "SGE too many priority ingress contexts", -1, 0 },
1054 { ERR_EGR_CTXT_PRIO,
1055 "SGE too many priority egress contexts", -1, 0 },
1056 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1057 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1058 { 0 }
1059 };
1060
1061 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1062 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1063 if (v) {
1064 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1065 (unsigned long long)v);
1066 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1067 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1068 }
1069
1070 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1071 v != 0)
1072 t4_fatal_err(adapter);
1073}
1074
1075/*
1076 * CIM interrupt handler.
1077 */
1078static void cim_intr_handler(struct adapter *adapter)
1079{
1080 static struct intr_info cim_intr_info[] = {
1081 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1082 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1083 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1084 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1085 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1086 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1087 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1088 { 0 }
1089 };
1090 static struct intr_info cim_upintr_info[] = {
1091 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1092 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1093 { ILLWRINT, "CIM illegal write", -1, 1 },
1094 { ILLRDINT, "CIM illegal read", -1, 1 },
1095 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1096 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1097 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1098 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1099 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1100 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1101 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1102 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1103 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1104 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1105 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1106 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1107 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1108 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1109 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1110 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1111 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1112 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1113 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1114 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1115 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1116 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1117 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1118 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1119 { 0 }
1120 };
1121
1122 int fat;
1123
1124 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1125 cim_intr_info) +
1126 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1127 cim_upintr_info);
1128 if (fat)
1129 t4_fatal_err(adapter);
1130}
1131
1132/*
1133 * ULP RX interrupt handler.
1134 */
1135static void ulprx_intr_handler(struct adapter *adapter)
1136{
1137 static struct intr_info ulprx_intr_info[] = {
1138 { 0x7fffff, "ULPRX parity error", -1, 1 },
1139 { 0 }
1140 };
1141
1142 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1143 t4_fatal_err(adapter);
1144}
1145
1146/*
1147 * ULP TX interrupt handler.
1148 */
1149static void ulptx_intr_handler(struct adapter *adapter)
1150{
1151 static struct intr_info ulptx_intr_info[] = {
1152 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1153 0 },
1154 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1155 0 },
1156 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1157 0 },
1158 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1159 0 },
1160 { 0xfffffff, "ULPTX parity error", -1, 1 },
1161 { 0 }
1162 };
1163
1164 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1165 t4_fatal_err(adapter);
1166}
1167
1168/*
1169 * PM TX interrupt handler.
1170 */
1171static void pmtx_intr_handler(struct adapter *adapter)
1172{
1173 static struct intr_info pmtx_intr_info[] = {
1174 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1175 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1176 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1177 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1178 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1179 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1180 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1181 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1182 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1183 { 0 }
1184 };
1185
1186 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1187 t4_fatal_err(adapter);
1188}
1189
1190/*
1191 * PM RX interrupt handler.
1192 */
1193static void pmrx_intr_handler(struct adapter *adapter)
1194{
1195 static struct intr_info pmrx_intr_info[] = {
1196 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1197 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1198 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1199 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1200 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1201 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1202 { 0 }
1203 };
1204
1205 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1206 t4_fatal_err(adapter);
1207}
1208
1209/*
1210 * CPL switch interrupt handler.
1211 */
1212static void cplsw_intr_handler(struct adapter *adapter)
1213{
1214 static struct intr_info cplsw_intr_info[] = {
1215 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1216 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1217 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1218 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1219 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1220 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1221 { 0 }
1222 };
1223
1224 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1225 t4_fatal_err(adapter);
1226}
1227
1228/*
1229 * LE interrupt handler.
1230 */
1231static void le_intr_handler(struct adapter *adap)
1232{
1233 static struct intr_info le_intr_info[] = {
1234 { LIPMISS, "LE LIP miss", -1, 0 },
1235 { LIP0, "LE 0 LIP error", -1, 0 },
1236 { PARITYERR, "LE parity error", -1, 1 },
1237 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1238 { REQQPARERR, "LE request queue parity error", -1, 1 },
1239 { 0 }
1240 };
1241
1242 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1243 t4_fatal_err(adap);
1244}
1245
1246/*
1247 * MPS interrupt handler.
1248 */
1249static void mps_intr_handler(struct adapter *adapter)
1250{
1251 static struct intr_info mps_rx_intr_info[] = {
1252 { 0xffffff, "MPS Rx parity error", -1, 1 },
1253 { 0 }
1254 };
1255 static struct intr_info mps_tx_intr_info[] = {
1256 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1257 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1258 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1259 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1260 { BUBBLE, "MPS Tx underflow", -1, 1 },
1261 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1262 { FRMERR, "MPS Tx framing error", -1, 1 },
1263 { 0 }
1264 };
1265 static struct intr_info mps_trc_intr_info[] = {
1266 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1267 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1268 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1269 { 0 }
1270 };
1271 static struct intr_info mps_stat_sram_intr_info[] = {
1272 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1273 { 0 }
1274 };
1275 static struct intr_info mps_stat_tx_intr_info[] = {
1276 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1277 { 0 }
1278 };
1279 static struct intr_info mps_stat_rx_intr_info[] = {
1280 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1281 { 0 }
1282 };
1283 static struct intr_info mps_cls_intr_info[] = {
1284 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1285 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1286 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1287 { 0 }
1288 };
1289
1290 int fat;
1291
1292 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1293 mps_rx_intr_info) +
1294 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1295 mps_tx_intr_info) +
1296 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1297 mps_trc_intr_info) +
1298 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1299 mps_stat_sram_intr_info) +
1300 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1301 mps_stat_tx_intr_info) +
1302 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1303 mps_stat_rx_intr_info) +
1304 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1305 mps_cls_intr_info);
1306
1307 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1308 RXINT | TXINT | STATINT);
1309 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1310 if (fat)
1311 t4_fatal_err(adapter);
1312}
1313
1314#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1315
1316/*
1317 * EDC/MC interrupt handler.
1318 */
1319static void mem_intr_handler(struct adapter *adapter, int idx)
1320{
1321 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1322
1323 unsigned int addr, cnt_addr, v;
1324
1325 if (idx <= MEM_EDC1) {
1326 addr = EDC_REG(EDC_INT_CAUSE, idx);
1327 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1328 } else {
1329 addr = MC_INT_CAUSE;
1330 cnt_addr = MC_ECC_STATUS;
1331 }
1332
1333 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1334 if (v & PERR_INT_CAUSE)
1335 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1336 name[idx]);
1337 if (v & ECC_CE_INT_CAUSE) {
1338 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1339
1340 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1341 if (printk_ratelimit())
1342 dev_warn(adapter->pdev_dev,
1343 "%u %s correctable ECC data error%s\n",
1344 cnt, name[idx], cnt > 1 ? "s" : "");
1345 }
1346 if (v & ECC_UE_INT_CAUSE)
1347 dev_alert(adapter->pdev_dev,
1348 "%s uncorrectable ECC data error\n", name[idx]);
1349
1350 t4_write_reg(adapter, addr, v);
1351 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1352 t4_fatal_err(adapter);
1353}
1354
1355/*
1356 * MA interrupt handler.
1357 */
1358static void ma_intr_handler(struct adapter *adap)
1359{
1360 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1361
1362 if (status & MEM_PERR_INT_CAUSE)
1363 dev_alert(adap->pdev_dev,
1364 "MA parity error, parity status %#x\n",
1365 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1366 if (status & MEM_WRAP_INT_CAUSE) {
1367 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1368 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1369 "client %u to address %#x\n",
1370 MEM_WRAP_CLIENT_NUM_GET(v),
1371 MEM_WRAP_ADDRESS_GET(v) << 4);
1372 }
1373 t4_write_reg(adap, MA_INT_CAUSE, status);
1374 t4_fatal_err(adap);
1375}
1376
1377/*
1378 * SMB interrupt handler.
1379 */
1380static void smb_intr_handler(struct adapter *adap)
1381{
1382 static struct intr_info smb_intr_info[] = {
1383 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1384 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1385 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1386 { 0 }
1387 };
1388
1389 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1390 t4_fatal_err(adap);
1391}
1392
1393/*
1394 * NC-SI interrupt handler.
1395 */
1396static void ncsi_intr_handler(struct adapter *adap)
1397{
1398 static struct intr_info ncsi_intr_info[] = {
1399 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1400 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1401 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1402 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1403 { 0 }
1404 };
1405
1406 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1407 t4_fatal_err(adap);
1408}
1409
1410/*
1411 * XGMAC interrupt handler.
1412 */
1413static void xgmac_intr_handler(struct adapter *adap, int port)
1414{
1415 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1416
1417 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1418 if (!v)
1419 return;
1420
1421 if (v & TXFIFO_PRTY_ERR)
1422 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1423 port);
1424 if (v & RXFIFO_PRTY_ERR)
1425 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1426 port);
1427 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1428 t4_fatal_err(adap);
1429}
1430
1431/*
1432 * PL interrupt handler.
1433 */
1434static void pl_intr_handler(struct adapter *adap)
1435{
1436 static struct intr_info pl_intr_info[] = {
1437 { FATALPERR, "T4 fatal parity error", -1, 1 },
1438 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1439 { 0 }
1440 };
1441
1442 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1443 t4_fatal_err(adap);
1444}
1445
1446#define PF_INTR_MASK (PFSW | PFCIM)
1447#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1448 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1449 CPL_SWITCH | SGE | ULP_TX)
1450
1451/**
1452 * t4_slow_intr_handler - control path interrupt handler
1453 * @adapter: the adapter
1454 *
1455 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1456 * The designation 'slow' is because it involves register reads, while
1457 * data interrupts typically don't involve any MMIOs.
1458 */
1459int t4_slow_intr_handler(struct adapter *adapter)
1460{
1461 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1462
1463 if (!(cause & GLBL_INTR_MASK))
1464 return 0;
1465 if (cause & CIM)
1466 cim_intr_handler(adapter);
1467 if (cause & MPS)
1468 mps_intr_handler(adapter);
1469 if (cause & NCSI)
1470 ncsi_intr_handler(adapter);
1471 if (cause & PL)
1472 pl_intr_handler(adapter);
1473 if (cause & SMB)
1474 smb_intr_handler(adapter);
1475 if (cause & XGMAC0)
1476 xgmac_intr_handler(adapter, 0);
1477 if (cause & XGMAC1)
1478 xgmac_intr_handler(adapter, 1);
1479 if (cause & XGMAC_KR0)
1480 xgmac_intr_handler(adapter, 2);
1481 if (cause & XGMAC_KR1)
1482 xgmac_intr_handler(adapter, 3);
1483 if (cause & PCIE)
1484 pcie_intr_handler(adapter);
1485 if (cause & MC)
1486 mem_intr_handler(adapter, MEM_MC);
1487 if (cause & EDC0)
1488 mem_intr_handler(adapter, MEM_EDC0);
1489 if (cause & EDC1)
1490 mem_intr_handler(adapter, MEM_EDC1);
1491 if (cause & LE)
1492 le_intr_handler(adapter);
1493 if (cause & TP)
1494 tp_intr_handler(adapter);
1495 if (cause & MA)
1496 ma_intr_handler(adapter);
1497 if (cause & PM_TX)
1498 pmtx_intr_handler(adapter);
1499 if (cause & PM_RX)
1500 pmrx_intr_handler(adapter);
1501 if (cause & ULP_RX)
1502 ulprx_intr_handler(adapter);
1503 if (cause & CPL_SWITCH)
1504 cplsw_intr_handler(adapter);
1505 if (cause & SGE)
1506 sge_intr_handler(adapter);
1507 if (cause & ULP_TX)
1508 ulptx_intr_handler(adapter);
1509
1510 /* Clear the interrupts just processed for which we are the master. */
1511 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1512 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1513 return 1;
1514}
1515
1516/**
1517 * t4_intr_enable - enable interrupts
1518 * @adapter: the adapter whose interrupts should be enabled
1519 *
1520 * Enable PF-specific interrupts for the calling function and the top-level
1521 * interrupt concentrator for global interrupts. Interrupts are already
1522 * enabled at each module, here we just enable the roots of the interrupt
1523 * hierarchies.
1524 *
1525 * Note: this function should be called only when the driver manages
1526 * non PF-specific interrupts from the various HW modules. Only one PCI
1527 * function at a time should be doing this.
1528 */
1529void t4_intr_enable(struct adapter *adapter)
1530{
1531 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1532
1533 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1534 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1535 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1536 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1537 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1538 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1539 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1540 EGRESS_SIZE_ERR);
1541 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1542 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1543}
1544
1545/**
1546 * t4_intr_disable - disable interrupts
1547 * @adapter: the adapter whose interrupts should be disabled
1548 *
1549 * Disable interrupts. We only disable the top-level interrupt
1550 * concentrators. The caller must be a PCI function managing global
1551 * interrupts.
1552 */
1553void t4_intr_disable(struct adapter *adapter)
1554{
1555 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1556
1557 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1558 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1559}
1560
1561/**
1562 * t4_intr_clear - clear all interrupts
1563 * @adapter: the adapter whose interrupts should be cleared
1564 *
1565 * Clears all interrupts. The caller must be a PCI function managing
1566 * global interrupts.
1567 */
1568void t4_intr_clear(struct adapter *adapter)
1569{
1570 static const unsigned int cause_reg[] = {
1571 SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
1572 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1573 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1574 PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
1575 MC_INT_CAUSE,
1576 MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
1577 EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
1578 CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
1579 MYPF_REG(CIM_PF_HOST_INT_CAUSE),
1580 TP_INT_CAUSE,
1581 ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
1582 PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
1583 MPS_RX_PERR_INT_CAUSE,
1584 CPL_INTR_CAUSE,
1585 MYPF_REG(PL_PF_INT_CAUSE),
1586 PL_PL_INT_CAUSE,
1587 LE_DB_INT_CAUSE,
1588 };
1589
1590 unsigned int i;
1591
1592 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
1593 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
1594
1595 t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
1596 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1597}
1598
1599/**
1600 * hash_mac_addr - return the hash value of a MAC address
1601 * @addr: the 48-bit Ethernet MAC address
1602 *
1603 * Hashes a MAC address according to the hash function used by HW inexact
1604 * (hash) address matching.
1605 */
1606static int hash_mac_addr(const u8 *addr)
1607{
1608 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1609 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1610 a ^= b;
1611 a ^= (a >> 12);
1612 a ^= (a >> 6);
1613 return a & 0x3f;
1614}
1615
1616/**
1617 * t4_config_rss_range - configure a portion of the RSS mapping table
1618 * @adapter: the adapter
1619 * @mbox: mbox to use for the FW command
1620 * @viid: virtual interface whose RSS subtable is to be written
1621 * @start: start entry in the table to write
1622 * @n: how many table entries to write
1623 * @rspq: values for the response queue lookup table
1624 * @nrspq: number of values in @rspq
1625 *
1626 * Programs the selected part of the VI's RSS mapping table with the
1627 * provided values. If @nrspq < @n the supplied values are used repeatedly
1628 * until the full table range is populated.
1629 *
1630 * The caller must ensure the values in @rspq are in the range allowed for
1631 * @viid.
1632 */
1633int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1634 int start, int n, const u16 *rspq, unsigned int nrspq)
1635{
1636 int ret;
1637 const u16 *rsp = rspq;
1638 const u16 *rsp_end = rspq + nrspq;
1639 struct fw_rss_ind_tbl_cmd cmd;
1640
1641 memset(&cmd, 0, sizeof(cmd));
1642 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1643 FW_CMD_REQUEST | FW_CMD_WRITE |
1644 FW_RSS_IND_TBL_CMD_VIID(viid));
1645 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1646
1647 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1648 while (n > 0) {
1649 int nq = min(n, 32);
1650 __be32 *qp = &cmd.iq0_to_iq2;
1651
1652 cmd.niqid = htons(nq);
1653 cmd.startidx = htons(start);
1654
1655 start += nq;
1656 n -= nq;
1657
1658 while (nq > 0) {
1659 unsigned int v;
1660
1661 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1662 if (++rsp >= rsp_end)
1663 rsp = rspq;
1664 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1665 if (++rsp >= rsp_end)
1666 rsp = rspq;
1667 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1668 if (++rsp >= rsp_end)
1669 rsp = rspq;
1670
1671 *qp++ = htonl(v);
1672 nq -= 3;
1673 }
1674
1675 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1676 if (ret)
1677 return ret;
1678 }
1679 return 0;
1680}
1681
1682/**
1683 * t4_config_glbl_rss - configure the global RSS mode
1684 * @adapter: the adapter
1685 * @mbox: mbox to use for the FW command
1686 * @mode: global RSS mode
1687 * @flags: mode-specific flags
1688 *
1689 * Sets the global RSS mode.
1690 */
1691int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1692 unsigned int flags)
1693{
1694 struct fw_rss_glb_config_cmd c;
1695
1696 memset(&c, 0, sizeof(c));
1697 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1698 FW_CMD_REQUEST | FW_CMD_WRITE);
1699 c.retval_len16 = htonl(FW_LEN16(c));
1700 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1701 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1702 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1703 c.u.basicvirtual.mode_pkd =
1704 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1705 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1706 } else
1707 return -EINVAL;
1708 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1709}
1710
1711/* Read an RSS table row */
1712static int rd_rss_row(struct adapter *adap, int row, u32 *val)
1713{
1714 t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
1715 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
1716 5, 0, val);
1717}
1718
1719/**
1720 * t4_read_rss - read the contents of the RSS mapping table
1721 * @adapter: the adapter
1722 * @map: holds the contents of the RSS mapping table
1723 *
1724 * Reads the contents of the RSS hash->queue mapping table.
1725 */
1726int t4_read_rss(struct adapter *adapter, u16 *map)
1727{
1728 u32 val;
1729 int i, ret;
1730
1731 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
1732 ret = rd_rss_row(adapter, i, &val);
1733 if (ret)
1734 return ret;
1735 *map++ = LKPTBLQUEUE0_GET(val);
1736 *map++ = LKPTBLQUEUE1_GET(val);
1737 }
1738 return 0;
1739}
1740
1741/**
1742 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1743 * @adap: the adapter
1744 * @v4: holds the TCP/IP counter values
1745 * @v6: holds the TCP/IPv6 counter values
1746 *
1747 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1748 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1749 */
1750void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1751 struct tp_tcp_stats *v6)
1752{
1753 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1754
1755#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1756#define STAT(x) val[STAT_IDX(x)]
1757#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1758
1759 if (v4) {
1760 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1761 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1762 v4->tcpOutRsts = STAT(OUT_RST);
1763 v4->tcpInSegs = STAT64(IN_SEG);
1764 v4->tcpOutSegs = STAT64(OUT_SEG);
1765 v4->tcpRetransSegs = STAT64(RXT_SEG);
1766 }
1767 if (v6) {
1768 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1769 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1770 v6->tcpOutRsts = STAT(OUT_RST);
1771 v6->tcpInSegs = STAT64(IN_SEG);
1772 v6->tcpOutSegs = STAT64(OUT_SEG);
1773 v6->tcpRetransSegs = STAT64(RXT_SEG);
1774 }
1775#undef STAT64
1776#undef STAT
1777#undef STAT_IDX
1778}
1779
1780/**
1781 * t4_tp_get_err_stats - read TP's error MIB counters
1782 * @adap: the adapter
1783 * @st: holds the counter values
1784 *
1785 * Returns the values of TP's error counters.
1786 */
1787void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
1788{
1789 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
1790 12, TP_MIB_MAC_IN_ERR_0);
1791 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
1792 8, TP_MIB_TNL_CNG_DROP_0);
1793 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
1794 4, TP_MIB_TNL_DROP_0);
1795 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
1796 4, TP_MIB_OFD_VLN_DROP_0);
1797 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
1798 4, TP_MIB_TCP_V6IN_ERR_0);
1799 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
1800 2, TP_MIB_OFD_ARP_DROP);
1801}
1802
1803/**
1804 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1805 * @adap: the adapter
1806 * @mtus: where to store the MTU values
1807 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1808 *
1809 * Reads the HW path MTU table.
1810 */
1811void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1812{
1813 u32 v;
1814 int i;
1815
1816 for (i = 0; i < NMTUS; ++i) {
1817 t4_write_reg(adap, TP_MTU_TABLE,
1818 MTUINDEX(0xff) | MTUVALUE(i));
1819 v = t4_read_reg(adap, TP_MTU_TABLE);
1820 mtus[i] = MTUVALUE_GET(v);
1821 if (mtu_log)
1822 mtu_log[i] = MTUWIDTH_GET(v);
1823 }
1824}
1825
1826/**
1827 * init_cong_ctrl - initialize congestion control parameters
1828 * @a: the alpha values for congestion control
1829 * @b: the beta values for congestion control
1830 *
1831 * Initialize the congestion control parameters.
1832 */
1833static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1834{
1835 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1836 a[9] = 2;
1837 a[10] = 3;
1838 a[11] = 4;
1839 a[12] = 5;
1840 a[13] = 6;
1841 a[14] = 7;
1842 a[15] = 8;
1843 a[16] = 9;
1844 a[17] = 10;
1845 a[18] = 14;
1846 a[19] = 17;
1847 a[20] = 21;
1848 a[21] = 25;
1849 a[22] = 30;
1850 a[23] = 35;
1851 a[24] = 45;
1852 a[25] = 60;
1853 a[26] = 80;
1854 a[27] = 100;
1855 a[28] = 200;
1856 a[29] = 300;
1857 a[30] = 400;
1858 a[31] = 500;
1859
1860 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1861 b[9] = b[10] = 1;
1862 b[11] = b[12] = 2;
1863 b[13] = b[14] = b[15] = b[16] = 3;
1864 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1865 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1866 b[28] = b[29] = 6;
1867 b[30] = b[31] = 7;
1868}
1869
1870/* The minimum additive increment value for the congestion control table */
1871#define CC_MIN_INCR 2U
1872
1873/**
1874 * t4_load_mtus - write the MTU and congestion control HW tables
1875 * @adap: the adapter
1876 * @mtus: the values for the MTU table
1877 * @alpha: the values for the congestion control alpha parameter
1878 * @beta: the values for the congestion control beta parameter
1879 *
1880 * Write the HW MTU table with the supplied MTUs and the high-speed
1881 * congestion control table with the supplied alpha, beta, and MTUs.
1882 * We write the two tables together because the additive increments
1883 * depend on the MTUs.
1884 */
1885void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1886 const unsigned short *alpha, const unsigned short *beta)
1887{
1888 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1889 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1890 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1891 28672, 40960, 57344, 81920, 114688, 163840, 229376
1892 };
1893
1894 unsigned int i, w;
1895
1896 for (i = 0; i < NMTUS; ++i) {
1897 unsigned int mtu = mtus[i];
1898 unsigned int log2 = fls(mtu);
1899
1900 if (!(mtu & ((1 << log2) >> 2))) /* round */
1901 log2--;
1902 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1903 MTUWIDTH(log2) | MTUVALUE(mtu));
1904
1905 for (w = 0; w < NCCTRL_WIN; ++w) {
1906 unsigned int inc;
1907
1908 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1909 CC_MIN_INCR);
1910
1911 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1912 (w << 16) | (beta[w] << 13) | inc);
1913 }
1914 }
1915}
1916
1917/**
1918 * t4_set_trace_filter - configure one of the tracing filters
1919 * @adap: the adapter
1920 * @tp: the desired trace filter parameters
1921 * @idx: which filter to configure
1922 * @enable: whether to enable or disable the filter
1923 *
1924 * Configures one of the tracing filters available in HW. If @enable is
1925 * %0 @tp is not examined and may be %NULL.
1926 */
1927int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
1928 int idx, int enable)
1929{
1930 int i, ofst = idx * 4;
1931 u32 data_reg, mask_reg, cfg;
1932 u32 multitrc = TRCMULTIFILTER;
1933
1934 if (!enable) {
1935 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1936 goto out;
1937 }
1938
1939 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
1940 tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
1941 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
1942 return -EINVAL;
1943
1944 if (tp->snap_len > 256) { /* must be tracer 0 */
1945 if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
1946 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
1947 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
1948 return -EINVAL; /* other tracers are enabled */
1949 multitrc = 0;
1950 } else if (idx) {
1951 i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
1952 if (TFCAPTUREMAX_GET(i) > 256 &&
1953 (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
1954 return -EINVAL;
1955 }
1956
1957 /* stop the tracer we'll be changing */
1958 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1959
1960 /* disable tracing globally if running in the wrong single/multi mode */
1961 cfg = t4_read_reg(adap, MPS_TRC_CFG);
1962 if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
1963 t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
1964 t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1965 msleep(1);
1966 if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
1967 return -ETIMEDOUT;
1968 }
1969 /*
1970 * At this point either the tracing is enabled and in the right mode or
1971 * disabled.
1972 */
1973
1974 idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
1975 data_reg = MPS_TRC_FILTER0_MATCH + idx;
1976 mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
1977
1978 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
1979 t4_write_reg(adap, data_reg, tp->data[i]);
1980 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
1981 }
1982 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
1983 TFCAPTUREMAX(tp->snap_len) |
1984 TFMINPKTSIZE(tp->min_len));
1985 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
1986 TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
1987 TFPORT(tp->port) | TFEN |
1988 (tp->invert ? TFINVERTMATCH : 0));
1989
1990 cfg &= ~TRCMULTIFILTER;
1991 t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
1992out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1993 return 0;
1994}
1995
1996/**
1997 * t4_get_trace_filter - query one of the tracing filters
1998 * @adap: the adapter
1999 * @tp: the current trace filter parameters
2000 * @idx: which trace filter to query
2001 * @enabled: non-zero if the filter is enabled
2002 *
2003 * Returns the current settings of one of the HW tracing filters.
2004 */
2005void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
2006 int *enabled)
2007{
2008 u32 ctla, ctlb;
2009 int i, ofst = idx * 4;
2010 u32 data_reg, mask_reg;
2011
2012 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
2013 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
2014
2015 *enabled = !!(ctla & TFEN);
2016 tp->snap_len = TFCAPTUREMAX_GET(ctlb);
2017 tp->min_len = TFMINPKTSIZE_GET(ctlb);
2018 tp->skip_ofst = TFOFFSET_GET(ctla);
2019 tp->skip_len = TFLENGTH_GET(ctla);
2020 tp->invert = !!(ctla & TFINVERTMATCH);
2021 tp->port = TFPORT_GET(ctla);
2022
2023 ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
2024 data_reg = MPS_TRC_FILTER0_MATCH + ofst;
2025 mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
2026
2027 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
2028 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
2029 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
2030 }
2031}
2032
2033/**
2034 * get_mps_bg_map - return the buffer groups associated with a port
2035 * @adap: the adapter
2036 * @idx: the port index
2037 *
2038 * Returns a bitmap indicating which MPS buffer groups are associated
2039 * with the given port. Bit i is set if buffer group i is used by the
2040 * port.
2041 */
2042static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2043{
2044 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2045
2046 if (n == 0)
2047 return idx == 0 ? 0xf : 0;
2048 if (n == 1)
2049 return idx < 2 ? (3 << (2 * idx)) : 0;
2050 return 1 << idx;
2051}
2052
2053/**
2054 * t4_get_port_stats - collect port statistics
2055 * @adap: the adapter
2056 * @idx: the port index
2057 * @p: the stats structure to fill
2058 *
2059 * Collect statistics related to the given port from HW.
2060 */
2061void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2062{
2063 u32 bgmap = get_mps_bg_map(adap, idx);
2064
2065#define GET_STAT(name) \
2066 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2067#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2068
2069 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2070 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2071 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2072 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2073 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2074 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2075 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2076 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2077 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2078 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2079 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2080 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2081 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2082 p->tx_drop = GET_STAT(TX_PORT_DROP);
2083 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2084 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2085 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2086 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2087 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2088 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2089 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2090 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2091 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2092
2093 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2094 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2095 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2096 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2097 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2098 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2099 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2100 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2101 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2102 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2103 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2104 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2105 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2106 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2107 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2108 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2109 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2110 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2111 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2112 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2113 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2114 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2115 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2116 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2117 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2118 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2119 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2120
2121 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2122 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2123 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2124 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2125 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2126 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2127 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2128 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2129
2130#undef GET_STAT
2131#undef GET_STAT_COM
2132}
2133
2134/**
2135 * t4_get_lb_stats - collect loopback port statistics
2136 * @adap: the adapter
2137 * @idx: the loopback port index
2138 * @p: the stats structure to fill
2139 *
2140 * Return HW statistics for the given loopback port.
2141 */
2142void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
2143{
2144 u32 bgmap = get_mps_bg_map(adap, idx);
2145
2146#define GET_STAT(name) \
2147 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
2148#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2149
2150 p->octets = GET_STAT(BYTES);
2151 p->frames = GET_STAT(FRAMES);
2152 p->bcast_frames = GET_STAT(BCAST);
2153 p->mcast_frames = GET_STAT(MCAST);
2154 p->ucast_frames = GET_STAT(UCAST);
2155 p->error_frames = GET_STAT(ERROR);
2156
2157 p->frames_64 = GET_STAT(64B);
2158 p->frames_65_127 = GET_STAT(65B_127B);
2159 p->frames_128_255 = GET_STAT(128B_255B);
2160 p->frames_256_511 = GET_STAT(256B_511B);
2161 p->frames_512_1023 = GET_STAT(512B_1023B);
2162 p->frames_1024_1518 = GET_STAT(1024B_1518B);
2163 p->frames_1519_max = GET_STAT(1519B_MAX);
2164 p->drop = t4_read_reg(adap, PORT_REG(idx,
2165 MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
2166
2167 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
2168 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
2169 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
2170 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
2171 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
2172 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
2173 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
2174 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
2175
2176#undef GET_STAT
2177#undef GET_STAT_COM
2178}
2179
2180/**
2181 * t4_wol_magic_enable - enable/disable magic packet WoL
2182 * @adap: the adapter
2183 * @port: the physical port index
2184 * @addr: MAC address expected in magic packets, %NULL to disable
2185 *
2186 * Enables/disables magic packet wake-on-LAN for the selected port.
2187 */
2188void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2189 const u8 *addr)
2190{
2191 if (addr) {
2192 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2193 (addr[2] << 24) | (addr[3] << 16) |
2194 (addr[4] << 8) | addr[5]);
2195 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2196 (addr[0] << 8) | addr[1]);
2197 }
2198 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2199 addr ? MAGICEN : 0);
2200}
2201
2202/**
2203 * t4_wol_pat_enable - enable/disable pattern-based WoL
2204 * @adap: the adapter
2205 * @port: the physical port index
2206 * @map: bitmap of which HW pattern filters to set
2207 * @mask0: byte mask for bytes 0-63 of a packet
2208 * @mask1: byte mask for bytes 64-127 of a packet
2209 * @crc: Ethernet CRC for selected bytes
2210 * @enable: enable/disable switch
2211 *
2212 * Sets the pattern filters indicated in @map to mask out the bytes
2213 * specified in @mask0/@mask1 in received packets and compare the CRC of
2214 * the resulting packet against @crc. If @enable is %true pattern-based
2215 * WoL is enabled, otherwise disabled.
2216 */
2217int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2218 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2219{
2220 int i;
2221
2222 if (!enable) {
2223 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2224 PATEN, 0);
2225 return 0;
2226 }
2227 if (map > 0xff)
2228 return -EINVAL;
2229
2230#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2231
2232 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2233 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2234 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2235
2236 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2237 if (!(map & 1))
2238 continue;
2239
2240 /* write byte masks */
2241 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2242 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2243 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2244 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2245 return -ETIMEDOUT;
2246
2247 /* write CRC */
2248 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2249 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2250 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2251 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2252 return -ETIMEDOUT;
2253 }
2254#undef EPIO_REG
2255
2256 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2257 return 0;
2258}
2259
2260#define INIT_CMD(var, cmd, rd_wr) do { \
2261 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2262 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2263 (var).retval_len16 = htonl(FW_LEN16(var)); \
2264} while (0)
2265
2266/**
2267 * t4_mdio_rd - read a PHY register through MDIO
2268 * @adap: the adapter
2269 * @mbox: mailbox to use for the FW command
2270 * @phy_addr: the PHY address
2271 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2272 * @reg: the register to read
2273 * @valp: where to store the value
2274 *
2275 * Issues a FW command through the given mailbox to read a PHY register.
2276 */
2277int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2278 unsigned int mmd, unsigned int reg, u16 *valp)
2279{
2280 int ret;
2281 struct fw_ldst_cmd c;
2282
2283 memset(&c, 0, sizeof(c));
2284 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2285 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2286 c.cycles_to_len16 = htonl(FW_LEN16(c));
2287 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2288 FW_LDST_CMD_MMD(mmd));
2289 c.u.mdio.raddr = htons(reg);
2290
2291 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2292 if (ret == 0)
2293 *valp = ntohs(c.u.mdio.rval);
2294 return ret;
2295}
2296
2297/**
2298 * t4_mdio_wr - write a PHY register through MDIO
2299 * @adap: the adapter
2300 * @mbox: mailbox to use for the FW command
2301 * @phy_addr: the PHY address
2302 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2303 * @reg: the register to write
2304 * @valp: value to write
2305 *
2306 * Issues a FW command through the given mailbox to write a PHY register.
2307 */
2308int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2309 unsigned int mmd, unsigned int reg, u16 val)
2310{
2311 struct fw_ldst_cmd c;
2312
2313 memset(&c, 0, sizeof(c));
2314 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2315 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2316 c.cycles_to_len16 = htonl(FW_LEN16(c));
2317 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2318 FW_LDST_CMD_MMD(mmd));
2319 c.u.mdio.raddr = htons(reg);
2320 c.u.mdio.rval = htons(val);
2321
2322 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2323}
2324
2325/**
2326 * t4_fw_hello - establish communication with FW
2327 * @adap: the adapter
2328 * @mbox: mailbox to use for the FW command
2329 * @evt_mbox: mailbox to receive async FW events
2330 * @master: specifies the caller's willingness to be the device master
2331 * @state: returns the current device state
2332 *
2333 * Issues a command to establish communication with FW.
2334 */
2335int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2336 enum dev_master master, enum dev_state *state)
2337{
2338 int ret;
2339 struct fw_hello_cmd c;
2340
2341 INIT_CMD(c, HELLO, WRITE);
2342 c.err_to_mbasyncnot = htonl(
2343 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2344 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2345 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2346 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2347
2348 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2349 if (ret == 0 && state) {
2350 u32 v = ntohl(c.err_to_mbasyncnot);
2351 if (v & FW_HELLO_CMD_INIT)
2352 *state = DEV_STATE_INIT;
2353 else if (v & FW_HELLO_CMD_ERR)
2354 *state = DEV_STATE_ERR;
2355 else
2356 *state = DEV_STATE_UNINIT;
2357 }
2358 return ret;
2359}
2360
2361/**
2362 * t4_fw_bye - end communication with FW
2363 * @adap: the adapter
2364 * @mbox: mailbox to use for the FW command
2365 *
2366 * Issues a command to terminate communication with FW.
2367 */
2368int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2369{
2370 struct fw_bye_cmd c;
2371
2372 INIT_CMD(c, BYE, WRITE);
2373 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2374}
2375
2376/**
2377 * t4_init_cmd - ask FW to initialize the device
2378 * @adap: the adapter
2379 * @mbox: mailbox to use for the FW command
2380 *
2381 * Issues a command to FW to partially initialize the device. This
2382 * performs initialization that generally doesn't depend on user input.
2383 */
2384int t4_early_init(struct adapter *adap, unsigned int mbox)
2385{
2386 struct fw_initialize_cmd c;
2387
2388 INIT_CMD(c, INITIALIZE, WRITE);
2389 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2390}
2391
2392/**
2393 * t4_fw_reset - issue a reset to FW
2394 * @adap: the adapter
2395 * @mbox: mailbox to use for the FW command
2396 * @reset: specifies the type of reset to perform
2397 *
2398 * Issues a reset command of the specified type to FW.
2399 */
2400int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2401{
2402 struct fw_reset_cmd c;
2403
2404 INIT_CMD(c, RESET, WRITE);
2405 c.val = htonl(reset);
2406 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2407}
2408
2409/**
2410 * t4_query_params - query FW or device parameters
2411 * @adap: the adapter
2412 * @mbox: mailbox to use for the FW command
2413 * @pf: the PF
2414 * @vf: the VF
2415 * @nparams: the number of parameters
2416 * @params: the parameter names
2417 * @val: the parameter values
2418 *
2419 * Reads the value of FW or device parameters. Up to 7 parameters can be
2420 * queried at once.
2421 */
2422int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2423 unsigned int vf, unsigned int nparams, const u32 *params,
2424 u32 *val)
2425{
2426 int i, ret;
2427 struct fw_params_cmd c;
2428 __be32 *p = &c.param[0].mnem;
2429
2430 if (nparams > 7)
2431 return -EINVAL;
2432
2433 memset(&c, 0, sizeof(c));
2434 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2435 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2436 FW_PARAMS_CMD_VFN(vf));
2437 c.retval_len16 = htonl(FW_LEN16(c));
2438 for (i = 0; i < nparams; i++, p += 2)
2439 *p = htonl(*params++);
2440
2441 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2442 if (ret == 0)
2443 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2444 *val++ = ntohl(*p);
2445 return ret;
2446}
2447
2448/**
2449 * t4_set_params - sets FW or device parameters
2450 * @adap: the adapter
2451 * @mbox: mailbox to use for the FW command
2452 * @pf: the PF
2453 * @vf: the VF
2454 * @nparams: the number of parameters
2455 * @params: the parameter names
2456 * @val: the parameter values
2457 *
2458 * Sets the value of FW or device parameters. Up to 7 parameters can be
2459 * specified at once.
2460 */
2461int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2462 unsigned int vf, unsigned int nparams, const u32 *params,
2463 const u32 *val)
2464{
2465 struct fw_params_cmd c;
2466 __be32 *p = &c.param[0].mnem;
2467
2468 if (nparams > 7)
2469 return -EINVAL;
2470
2471 memset(&c, 0, sizeof(c));
2472 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2473 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2474 FW_PARAMS_CMD_VFN(vf));
2475 c.retval_len16 = htonl(FW_LEN16(c));
2476 while (nparams--) {
2477 *p++ = htonl(*params++);
2478 *p++ = htonl(*val++);
2479 }
2480
2481 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2482}
2483
2484/**
2485 * t4_cfg_pfvf - configure PF/VF resource limits
2486 * @adap: the adapter
2487 * @mbox: mailbox to use for the FW command
2488 * @pf: the PF being configured
2489 * @vf: the VF being configured
2490 * @txq: the max number of egress queues
2491 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2492 * @rxqi: the max number of interrupt-capable ingress queues
2493 * @rxq: the max number of interruptless ingress queues
2494 * @tc: the PCI traffic class
2495 * @vi: the max number of virtual interfaces
2496 * @cmask: the channel access rights mask for the PF/VF
2497 * @pmask: the port access rights mask for the PF/VF
2498 * @nexact: the maximum number of exact MPS filters
2499 * @rcaps: read capabilities
2500 * @wxcaps: write/execute capabilities
2501 *
2502 * Configures resource limits and capabilities for a physical or virtual
2503 * function.
2504 */
2505int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2506 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2507 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2508 unsigned int vi, unsigned int cmask, unsigned int pmask,
2509 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2510{
2511 struct fw_pfvf_cmd c;
2512
2513 memset(&c, 0, sizeof(c));
2514 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2515 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2516 FW_PFVF_CMD_VFN(vf));
2517 c.retval_len16 = htonl(FW_LEN16(c));
2518 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2519 FW_PFVF_CMD_NIQ(rxq));
2520 c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2521 FW_PFVF_CMD_PMASK(pmask) |
2522 FW_PFVF_CMD_NEQ(txq));
2523 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2524 FW_PFVF_CMD_NEXACTF(nexact));
2525 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2526 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2527 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2528 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2529}
2530
2531/**
2532 * t4_alloc_vi - allocate a virtual interface
2533 * @adap: the adapter
2534 * @mbox: mailbox to use for the FW command
2535 * @port: physical port associated with the VI
2536 * @pf: the PF owning the VI
2537 * @vf: the VF owning the VI
2538 * @nmac: number of MAC addresses needed (1 to 5)
2539 * @mac: the MAC addresses of the VI
2540 * @rss_size: size of RSS table slice associated with this VI
2541 *
2542 * Allocates a virtual interface for the given physical port. If @mac is
2543 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2544 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2545 * stored consecutively so the space needed is @nmac * 6 bytes.
2546 * Returns a negative error number or the non-negative VI id.
2547 */
2548int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2549 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2550 unsigned int *rss_size)
2551{
2552 int ret;
2553 struct fw_vi_cmd c;
2554
2555 memset(&c, 0, sizeof(c));
2556 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2557 FW_CMD_WRITE | FW_CMD_EXEC |
2558 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2559 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2560 c.portid_pkd = FW_VI_CMD_PORTID(port);
2561 c.nmac = nmac - 1;
2562
2563 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2564 if (ret)
2565 return ret;
2566
2567 if (mac) {
2568 memcpy(mac, c.mac, sizeof(c.mac));
2569 switch (nmac) {
2570 case 5:
2571 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2572 case 4:
2573 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2574 case 3:
2575 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2576 case 2:
2577 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2578 }
2579 }
2580 if (rss_size)
2581 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2582 return ntohs(c.viid_pkd);
2583}
2584
2585/**
2586 * t4_free_vi - free a virtual interface
2587 * @adap: the adapter
2588 * @mbox: mailbox to use for the FW command
2589 * @pf: the PF owning the VI
2590 * @vf: the VF owning the VI
2591 * @viid: virtual interface identifiler
2592 *
2593 * Free a previously allocated virtual interface.
2594 */
2595int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2596 unsigned int vf, unsigned int viid)
2597{
2598 struct fw_vi_cmd c;
2599
2600 memset(&c, 0, sizeof(c));
2601 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2602 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2603 FW_VI_CMD_VFN(vf));
2604 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2605 c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
2606 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2607}
2608
2609/**
2610 * t4_set_rxmode - set Rx properties of a virtual interface
2611 * @adap: the adapter
2612 * @mbox: mailbox to use for the FW command
2613 * @viid: the VI id
2614 * @mtu: the new MTU or -1
2615 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2616 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2617 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 2618 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
2619 * @sleep_ok: if true we may sleep while awaiting command completion
2620 *
2621 * Sets Rx properties of a virtual interface.
2622 */
2623int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
2624 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2625 bool sleep_ok)
56d36be4
DM
2626{
2627 struct fw_vi_rxmode_cmd c;
2628
2629 /* convert to FW values */
2630 if (mtu < 0)
2631 mtu = FW_RXMODE_MTU_NO_CHG;
2632 if (promisc < 0)
2633 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2634 if (all_multi < 0)
2635 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2636 if (bcast < 0)
2637 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
f8f5aafa
DM
2638 if (vlanex < 0)
2639 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
56d36be4
DM
2640
2641 memset(&c, 0, sizeof(c));
2642 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2643 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2644 c.retval_len16 = htonl(FW_LEN16(c));
f8f5aafa
DM
2645 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2646 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2647 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2648 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2649 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
56d36be4
DM
2650 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2651}
2652
2653/**
2654 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2655 * @adap: the adapter
2656 * @mbox: mailbox to use for the FW command
2657 * @viid: the VI id
2658 * @free: if true any existing filters for this VI id are first removed
2659 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2660 * @addr: the MAC address(es)
2661 * @idx: where to store the index of each allocated filter
2662 * @hash: pointer to hash address filter bitmap
2663 * @sleep_ok: call is allowed to sleep
2664 *
2665 * Allocates an exact-match filter for each of the supplied addresses and
2666 * sets it to the corresponding address. If @idx is not %NULL it should
2667 * have at least @naddr entries, each of which will be set to the index of
2668 * the filter allocated for the corresponding MAC address. If a filter
2669 * could not be allocated for an address its index is set to 0xffff.
2670 * If @hash is not %NULL addresses that fail to allocate an exact filter
2671 * are hashed and update the hash filter bitmap pointed at by @hash.
2672 *
2673 * Returns a negative error number or the number of filters allocated.
2674 */
2675int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2676 unsigned int viid, bool free, unsigned int naddr,
2677 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2678{
2679 int i, ret;
2680 struct fw_vi_mac_cmd c;
2681 struct fw_vi_mac_exact *p;
2682
2683 if (naddr > 7)
2684 return -EINVAL;
2685
2686 memset(&c, 0, sizeof(c));
2687 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2688 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2689 FW_VI_MAC_CMD_VIID(viid));
2690 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2691 FW_CMD_LEN16((naddr + 2) / 2));
2692
2693 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2694 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2695 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2696 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2697 }
2698
2699 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2700 if (ret)
2701 return ret;
2702
2703 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2704 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2705
2706 if (idx)
2707 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2708 if (index < NEXACT_MAC)
2709 ret++;
2710 else if (hash)
2711 *hash |= (1 << hash_mac_addr(addr[i]));
2712 }
2713 return ret;
2714}
2715
2716/**
2717 * t4_change_mac - modifies the exact-match filter for a MAC address
2718 * @adap: the adapter
2719 * @mbox: mailbox to use for the FW command
2720 * @viid: the VI id
2721 * @idx: index of existing filter for old value of MAC address, or -1
2722 * @addr: the new MAC address value
2723 * @persist: whether a new MAC allocation should be persistent
2724 * @add_smt: if true also add the address to the HW SMT
2725 *
2726 * Modifies an exact-match filter and sets it to the new MAC address.
2727 * Note that in general it is not possible to modify the value of a given
2728 * filter so the generic way to modify an address filter is to free the one
2729 * being used by the old address value and allocate a new filter for the
2730 * new address value. @idx can be -1 if the address is a new addition.
2731 *
2732 * Returns a negative error number or the index of the filter with the new
2733 * MAC value.
2734 */
2735int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2736 int idx, const u8 *addr, bool persist, bool add_smt)
2737{
2738 int ret, mode;
2739 struct fw_vi_mac_cmd c;
2740 struct fw_vi_mac_exact *p = c.u.exact;
2741
2742 if (idx < 0) /* new allocation */
2743 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2744 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2745
2746 memset(&c, 0, sizeof(c));
2747 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2748 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2749 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2750 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2751 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2752 FW_VI_MAC_CMD_IDX(idx));
2753 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2754
2755 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2756 if (ret == 0) {
2757 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2758 if (ret >= NEXACT_MAC)
2759 ret = -ENOMEM;
2760 }
2761 return ret;
2762}
2763
2764/**
2765 * t4_set_addr_hash - program the MAC inexact-match hash filter
2766 * @adap: the adapter
2767 * @mbox: mailbox to use for the FW command
2768 * @viid: the VI id
2769 * @ucast: whether the hash filter should also match unicast addresses
2770 * @vec: the value to be written to the hash filter
2771 * @sleep_ok: call is allowed to sleep
2772 *
2773 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2774 */
2775int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2776 bool ucast, u64 vec, bool sleep_ok)
2777{
2778 struct fw_vi_mac_cmd c;
2779
2780 memset(&c, 0, sizeof(c));
2781 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2782 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2783 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2784 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2785 FW_CMD_LEN16(1));
2786 c.u.hash.hashvec = cpu_to_be64(vec);
2787 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2788}
2789
2790/**
2791 * t4_enable_vi - enable/disable a virtual interface
2792 * @adap: the adapter
2793 * @mbox: mailbox to use for the FW command
2794 * @viid: the VI id
2795 * @rx_en: 1=enable Rx, 0=disable Rx
2796 * @tx_en: 1=enable Tx, 0=disable Tx
2797 *
2798 * Enables/disables a virtual interface.
2799 */
2800int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2801 bool rx_en, bool tx_en)
2802{
2803 struct fw_vi_enable_cmd c;
2804
2805 memset(&c, 0, sizeof(c));
2806 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2807 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2808 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2809 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2810 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2811}
2812
2813/**
2814 * t4_identify_port - identify a VI's port by blinking its LED
2815 * @adap: the adapter
2816 * @mbox: mailbox to use for the FW command
2817 * @viid: the VI id
2818 * @nblinks: how many times to blink LED at 2.5 Hz
2819 *
2820 * Identifies a VI's port by blinking its LED.
2821 */
2822int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2823 unsigned int nblinks)
2824{
2825 struct fw_vi_enable_cmd c;
2826
2827 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2828 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2829 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2830 c.blinkdur = htons(nblinks);
2831 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2832}
2833
2834/**
2835 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2836 * @adap: the adapter
2837 * @mbox: mailbox to use for the FW command
2838 * @start: %true to enable the queues, %false to disable them
2839 * @pf: the PF owning the queues
2840 * @vf: the VF owning the queues
2841 * @iqid: ingress queue id
2842 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2843 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2844 *
2845 * Starts or stops an ingress queue and its associated FLs, if any.
2846 */
2847int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2848 unsigned int pf, unsigned int vf, unsigned int iqid,
2849 unsigned int fl0id, unsigned int fl1id)
2850{
2851 struct fw_iq_cmd c;
2852
2853 memset(&c, 0, sizeof(c));
2854 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2855 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2856 FW_IQ_CMD_VFN(vf));
2857 c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
2858 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
2859 c.iqid = htons(iqid);
2860 c.fl0id = htons(fl0id);
2861 c.fl1id = htons(fl1id);
2862 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2863}
2864
2865/**
2866 * t4_iq_free - free an ingress queue and its FLs
2867 * @adap: the adapter
2868 * @mbox: mailbox to use for the FW command
2869 * @pf: the PF owning the queues
2870 * @vf: the VF owning the queues
2871 * @iqtype: the ingress queue type
2872 * @iqid: ingress queue id
2873 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2874 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2875 *
2876 * Frees an ingress queue and its associated FLs, if any.
2877 */
2878int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2879 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2880 unsigned int fl0id, unsigned int fl1id)
2881{
2882 struct fw_iq_cmd c;
2883
2884 memset(&c, 0, sizeof(c));
2885 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2886 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2887 FW_IQ_CMD_VFN(vf));
2888 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2889 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2890 c.iqid = htons(iqid);
2891 c.fl0id = htons(fl0id);
2892 c.fl1id = htons(fl1id);
2893 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2894}
2895
2896/**
2897 * t4_eth_eq_free - free an Ethernet egress queue
2898 * @adap: the adapter
2899 * @mbox: mailbox to use for the FW command
2900 * @pf: the PF owning the queue
2901 * @vf: the VF owning the queue
2902 * @eqid: egress queue id
2903 *
2904 * Frees an Ethernet egress queue.
2905 */
2906int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2907 unsigned int vf, unsigned int eqid)
2908{
2909 struct fw_eq_eth_cmd c;
2910
2911 memset(&c, 0, sizeof(c));
2912 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2913 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2914 FW_EQ_ETH_CMD_VFN(vf));
2915 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2916 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2917 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2918}
2919
2920/**
2921 * t4_ctrl_eq_free - free a control egress queue
2922 * @adap: the adapter
2923 * @mbox: mailbox to use for the FW command
2924 * @pf: the PF owning the queue
2925 * @vf: the VF owning the queue
2926 * @eqid: egress queue id
2927 *
2928 * Frees a control egress queue.
2929 */
2930int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2931 unsigned int vf, unsigned int eqid)
2932{
2933 struct fw_eq_ctrl_cmd c;
2934
2935 memset(&c, 0, sizeof(c));
2936 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2937 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2938 FW_EQ_CTRL_CMD_VFN(vf));
2939 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2940 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2941 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2942}
2943
2944/**
2945 * t4_ofld_eq_free - free an offload egress queue
2946 * @adap: the adapter
2947 * @mbox: mailbox to use for the FW command
2948 * @pf: the PF owning the queue
2949 * @vf: the VF owning the queue
2950 * @eqid: egress queue id
2951 *
2952 * Frees a control egress queue.
2953 */
2954int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2955 unsigned int vf, unsigned int eqid)
2956{
2957 struct fw_eq_ofld_cmd c;
2958
2959 memset(&c, 0, sizeof(c));
2960 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2961 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2962 FW_EQ_OFLD_CMD_VFN(vf));
2963 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2964 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2965 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2966}
2967
2968/**
2969 * t4_handle_fw_rpl - process a FW reply message
2970 * @adap: the adapter
2971 * @rpl: start of the FW message
2972 *
2973 * Processes a FW message, such as link state change messages.
2974 */
2975int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2976{
2977 u8 opcode = *(const u8 *)rpl;
2978
2979 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2980 int speed = 0, fc = 0;
2981 const struct fw_port_cmd *p = (void *)rpl;
2982 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2983 int port = adap->chan_map[chan];
2984 struct port_info *pi = adap2pinfo(adap, port);
2985 struct link_config *lc = &pi->link_cfg;
2986 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2987 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2988 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2989
2990 if (stat & FW_PORT_CMD_RXPAUSE)
2991 fc |= PAUSE_RX;
2992 if (stat & FW_PORT_CMD_TXPAUSE)
2993 fc |= PAUSE_TX;
2994 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2995 speed = SPEED_100;
2996 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2997 speed = SPEED_1000;
2998 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2999 speed = SPEED_10000;
3000
3001 if (link_ok != lc->link_ok || speed != lc->speed ||
3002 fc != lc->fc) { /* something changed */
3003 lc->link_ok = link_ok;
3004 lc->speed = speed;
3005 lc->fc = fc;
3006 t4_os_link_changed(adap, port, link_ok);
3007 }
3008 if (mod != pi->mod_type) {
3009 pi->mod_type = mod;
3010 t4_os_portmod_changed(adap, port);
3011 }
3012 }
3013 return 0;
3014}
3015
3016static void __devinit get_pci_mode(struct adapter *adapter,
3017 struct pci_params *p)
3018{
3019 u16 val;
3020 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
3021
3022 if (pcie_cap) {
3023 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3024 &val);
3025 p->speed = val & PCI_EXP_LNKSTA_CLS;
3026 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3027 }
3028}
3029
3030/**
3031 * init_link_config - initialize a link's SW state
3032 * @lc: structure holding the link state
3033 * @caps: link capabilities
3034 *
3035 * Initializes the SW state maintained for each link, including the link's
3036 * capabilities and default speed/flow-control/autonegotiation settings.
3037 */
3038static void __devinit init_link_config(struct link_config *lc,
3039 unsigned int caps)
3040{
3041 lc->supported = caps;
3042 lc->requested_speed = 0;
3043 lc->speed = 0;
3044 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3045 if (lc->supported & FW_PORT_CAP_ANEG) {
3046 lc->advertising = lc->supported & ADVERT_MASK;
3047 lc->autoneg = AUTONEG_ENABLE;
3048 lc->requested_fc |= PAUSE_AUTONEG;
3049 } else {
3050 lc->advertising = 0;
3051 lc->autoneg = AUTONEG_DISABLE;
3052 }
3053}
3054
204dc3c0 3055int t4_wait_dev_ready(struct adapter *adap)
56d36be4
DM
3056{
3057 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3058 return 0;
3059 msleep(500);
3060 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3061}
3062
900a6596
DM
3063static int __devinit get_flash_params(struct adapter *adap)
3064{
3065 int ret;
3066 u32 info;
3067
3068 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3069 if (!ret)
3070 ret = sf1_read(adap, 3, 0, 1, &info);
3071 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3072 if (ret)
3073 return ret;
3074
3075 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3076 return -EINVAL;
3077 info >>= 16; /* log2 of size */
3078 if (info >= 0x14 && info < 0x18)
3079 adap->params.sf_nsec = 1 << (info - 16);
3080 else if (info == 0x18)
3081 adap->params.sf_nsec = 64;
3082 else
3083 return -EINVAL;
3084 adap->params.sf_size = 1 << info;
3085 adap->params.sf_fw_start =
3086 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3087 return 0;
3088}
3089
56d36be4
DM
3090/**
3091 * t4_prep_adapter - prepare SW and HW for operation
3092 * @adapter: the adapter
3093 * @reset: if true perform a HW reset
3094 *
3095 * Initialize adapter SW state for the various HW modules, set initial
3096 * values for some adapter tunables, take PHYs out of reset, and
3097 * initialize the MDIO interface.
3098 */
3099int __devinit t4_prep_adapter(struct adapter *adapter)
3100{
3101 int ret;
3102
204dc3c0 3103 ret = t4_wait_dev_ready(adapter);
56d36be4
DM
3104 if (ret < 0)
3105 return ret;
3106
3107 get_pci_mode(adapter, &adapter->params.pci);
3108 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3109
900a6596
DM
3110 ret = get_flash_params(adapter);
3111 if (ret < 0) {
3112 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3113 return ret;
3114 }
3115
56d36be4
DM
3116 ret = get_vpd_params(adapter, &adapter->params.vpd);
3117 if (ret < 0)
3118 return ret;
3119
3120 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3121
3122 /*
3123 * Default port for debugging in case we can't reach FW.
3124 */
3125 adapter->params.nports = 1;
3126 adapter->params.portvec = 1;
3127 return 0;
3128}
3129
3130int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3131{
3132 u8 addr[6];
3133 int ret, i, j = 0;
3134 struct fw_port_cmd c;
3135
3136 memset(&c, 0, sizeof(c));
3137
3138 for_each_port(adap, i) {
3139 unsigned int rss_size;
3140 struct port_info *p = adap2pinfo(adap, i);
3141
3142 while ((adap->params.portvec & (1 << j)) == 0)
3143 j++;
3144
3145 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3146 FW_CMD_REQUEST | FW_CMD_READ |
3147 FW_PORT_CMD_PORTID(j));
3148 c.action_to_len16 = htonl(
3149 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3150 FW_LEN16(c));
3151 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3152 if (ret)
3153 return ret;
3154
3155 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3156 if (ret < 0)
3157 return ret;
3158
3159 p->viid = ret;
3160 p->tx_chan = j;
3161 p->lport = j;
3162 p->rss_size = rss_size;
3163 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3164 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
f21ce1c3 3165 adap->port[i]->dev_id = j;
56d36be4
DM
3166
3167 ret = ntohl(c.u.info.lstatus_to_modtype);
3168 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3169 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3170 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3171 p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
3172
3173 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3174 j++;
3175 }
3176 return 0;
3177}