cxgb4: rearrange initialization code in preparation for EEH
[linux-2.6-block.git] / drivers / net / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
de498c89
RD
56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
58{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
de498c89
RD
112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
de498c89 123#if 0
56d36be4
DM
124/**
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
132 *
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
135 */
de498c89
RD
136static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
139{
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
143 }
144}
de498c89 145#endif
56d36be4
DM
146
147/*
148 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
149 */
150static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
151 u32 mbox_addr)
152{
153 for ( ; nflit; nflit--, mbox_addr += 8)
154 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
155}
156
157/*
158 * Handle a FW assertion reported in a mailbox.
159 */
160static void fw_asrt(struct adapter *adap, u32 mbox_addr)
161{
162 struct fw_debug_cmd asrt;
163
164 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
165 dev_alert(adap->pdev_dev,
166 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
167 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
168 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
169}
170
171static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
172{
173 dev_err(adap->pdev_dev,
174 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
175 (unsigned long long)t4_read_reg64(adap, data_reg),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
181 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
182 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
183}
184
185/**
186 * t4_wr_mbox_meat - send a command to FW through the given mailbox
187 * @adap: the adapter
188 * @mbox: index of the mailbox to use
189 * @cmd: the command to write
190 * @size: command length in bytes
191 * @rpl: where to optionally store the reply
192 * @sleep_ok: if true we may sleep while awaiting command completion
193 *
194 * Sends the given command to FW through the selected mailbox and waits
195 * for the FW to execute the command. If @rpl is not %NULL it is used to
196 * store the FW's reply to the command. The command and its optional
197 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
198 * to respond. @sleep_ok determines whether we may sleep while awaiting
199 * the response. If sleeping is allowed we use progressive backoff
200 * otherwise we spin.
201 *
202 * The return value is 0 on success or a negative errno on failure. A
203 * failure can happen either because we are not able to execute the
204 * command or FW executes it but signals an error. In the latter case
205 * the return value is the error code indicated by FW (negated).
206 */
207int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
208 void *rpl, bool sleep_ok)
209{
210 static int delay[] = {
211 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
212 };
213
214 u32 v;
215 u64 res;
216 int i, ms, delay_idx;
217 const __be64 *p = cmd;
218 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
219 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
220
221 if ((size & 15) || size > MBOX_LEN)
222 return -EINVAL;
223
224 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
225 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
226 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
227
228 if (v != MBOX_OWNER_DRV)
229 return v ? -EBUSY : -ETIMEDOUT;
230
231 for (i = 0; i < size; i += 8)
232 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
233
234 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
235 t4_read_reg(adap, ctl_reg); /* flush write */
236
237 delay_idx = 0;
238 ms = delay[0];
239
240 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
241 if (sleep_ok) {
242 ms = delay[delay_idx]; /* last element may repeat */
243 if (delay_idx < ARRAY_SIZE(delay) - 1)
244 delay_idx++;
245 msleep(ms);
246 } else
247 mdelay(ms);
248
249 v = t4_read_reg(adap, ctl_reg);
250 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
251 if (!(v & MBMSGVALID)) {
252 t4_write_reg(adap, ctl_reg, 0);
253 continue;
254 }
255
256 res = t4_read_reg64(adap, data_reg);
257 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
258 fw_asrt(adap, data_reg);
259 res = FW_CMD_RETVAL(EIO);
260 } else if (rpl)
261 get_mbox_rpl(adap, rpl, size / 8, data_reg);
262
263 if (FW_CMD_RETVAL_GET((int)res))
264 dump_mbox(adap, mbox, data_reg);
265 t4_write_reg(adap, ctl_reg, 0);
266 return -FW_CMD_RETVAL_GET((int)res);
267 }
268 }
269
270 dump_mbox(adap, mbox, data_reg);
271 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
272 *(const u8 *)cmd, mbox);
273 return -ETIMEDOUT;
274}
275
276/**
277 * t4_mc_read - read from MC through backdoor accesses
278 * @adap: the adapter
279 * @addr: address of first byte requested
280 * @data: 64 bytes of data containing the requested address
281 * @ecc: where to store the corresponding 64-bit ECC word
282 *
283 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
284 * that covers the requested address @addr. If @parity is not %NULL it
285 * is assigned the 64-bit ECC word for the read data.
286 */
287int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
288{
289 int i;
290
291 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
292 return -EBUSY;
293 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
294 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
295 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
296 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
297 BIST_CMD_GAP(1));
298 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
299 if (i)
300 return i;
301
302#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
303
304 for (i = 15; i >= 0; i--)
305 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
306 if (ecc)
307 *ecc = t4_read_reg64(adap, MC_DATA(16));
308#undef MC_DATA
309 return 0;
310}
311
312/**
313 * t4_edc_read - read from EDC through backdoor accesses
314 * @adap: the adapter
315 * @idx: which EDC to access
316 * @addr: address of first byte requested
317 * @data: 64 bytes of data containing the requested address
318 * @ecc: where to store the corresponding 64-bit ECC word
319 *
320 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
321 * that covers the requested address @addr. If @parity is not %NULL it
322 * is assigned the 64-bit ECC word for the read data.
323 */
324int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
325{
326 int i;
327
328 idx *= EDC_STRIDE;
329 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
330 return -EBUSY;
331 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
332 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
333 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
334 t4_write_reg(adap, EDC_BIST_CMD + idx,
335 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
336 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
337 if (i)
338 return i;
339
340#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
341
342 for (i = 15; i >= 0; i--)
343 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
344 if (ecc)
345 *ecc = t4_read_reg64(adap, EDC_DATA(16));
346#undef EDC_DATA
347 return 0;
348}
349
56d36be4
DM
350/*
351 * Partial EEPROM Vital Product Data structure. Includes only the ID and
226ec5fd 352 * VPD-R header.
56d36be4 353 */
226ec5fd 354struct t4_vpd_hdr {
56d36be4
DM
355 u8 id_tag;
356 u8 id_len[2];
357 u8 id_data[ID_LEN];
358 u8 vpdr_tag;
359 u8 vpdr_len[2];
56d36be4
DM
360};
361
362#define EEPROM_STAT_ADDR 0x7bfc
363#define VPD_BASE 0
226ec5fd 364#define VPD_LEN 512
56d36be4
DM
365
366/**
367 * t4_seeprom_wp - enable/disable EEPROM write protection
368 * @adapter: the adapter
369 * @enable: whether to enable or disable write protection
370 *
371 * Enables or disables write protection on the serial EEPROM.
372 */
373int t4_seeprom_wp(struct adapter *adapter, bool enable)
374{
375 unsigned int v = enable ? 0xc : 0;
376 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
377 return ret < 0 ? ret : 0;
378}
379
380/**
381 * get_vpd_params - read VPD parameters from VPD EEPROM
382 * @adapter: adapter to read
383 * @p: where to store the parameters
384 *
385 * Reads card parameters stored in VPD EEPROM.
386 */
387static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
388{
226ec5fd
DM
389 int i, ret;
390 int ec, sn, v2;
391 u8 vpd[VPD_LEN], csum;
392 unsigned int vpdr_len;
393 const struct t4_vpd_hdr *v;
56d36be4 394
226ec5fd 395 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
56d36be4
DM
396 if (ret < 0)
397 return ret;
398
226ec5fd
DM
399 v = (const struct t4_vpd_hdr *)vpd;
400 vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
401 if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
402 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
403 return -EINVAL;
404 }
405
406#define FIND_VPD_KW(var, name) do { \
407 var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
408 vpdr_len, name); \
409 if (var < 0) { \
410 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
411 return -EINVAL; \
412 } \
413 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
414} while (0)
415
416 FIND_VPD_KW(i, "RV");
417 for (csum = 0; i >= 0; i--)
418 csum += vpd[i];
56d36be4
DM
419
420 if (csum) {
421 dev_err(adapter->pdev_dev,
422 "corrupted VPD EEPROM, actual csum %u\n", csum);
423 return -EINVAL;
424 }
425
226ec5fd
DM
426 FIND_VPD_KW(ec, "EC");
427 FIND_VPD_KW(sn, "SN");
428 FIND_VPD_KW(v2, "V2");
429#undef FIND_VPD_KW
430
431 p->cclk = simple_strtoul(vpd + v2, NULL, 10);
432 memcpy(p->id, v->id_data, ID_LEN);
56d36be4 433 strim(p->id);
226ec5fd 434 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 435 strim(p->ec);
226ec5fd
DM
436 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
437 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4
DM
438 strim(p->sn);
439 return 0;
440}
441
442/* serial flash and firmware constants */
443enum {
444 SF_ATTEMPTS = 10, /* max retries for SF operations */
445
446 /* flash command opcodes */
447 SF_PROG_PAGE = 2, /* program page */
448 SF_WR_DISABLE = 4, /* disable writes */
449 SF_RD_STATUS = 5, /* read status register */
450 SF_WR_ENABLE = 6, /* enable writes */
451 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 452 SF_RD_ID = 0x9f, /* read ID */
56d36be4
DM
453 SF_ERASE_SECTOR = 0xd8, /* erase sector */
454
900a6596 455 FW_MAX_SIZE = 512 * 1024,
56d36be4
DM
456};
457
458/**
459 * sf1_read - read data from the serial flash
460 * @adapter: the adapter
461 * @byte_cnt: number of bytes to read
462 * @cont: whether another operation will be chained
463 * @lock: whether to lock SF for PL access only
464 * @valp: where to store the read data
465 *
466 * Reads up to 4 bytes of data from the serial flash. The location of
467 * the read needs to be specified prior to calling this by issuing the
468 * appropriate commands to the serial flash.
469 */
470static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
471 int lock, u32 *valp)
472{
473 int ret;
474
475 if (!byte_cnt || byte_cnt > 4)
476 return -EINVAL;
477 if (t4_read_reg(adapter, SF_OP) & BUSY)
478 return -EBUSY;
479 cont = cont ? SF_CONT : 0;
480 lock = lock ? SF_LOCK : 0;
481 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
482 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
483 if (!ret)
484 *valp = t4_read_reg(adapter, SF_DATA);
485 return ret;
486}
487
488/**
489 * sf1_write - write data to the serial flash
490 * @adapter: the adapter
491 * @byte_cnt: number of bytes to write
492 * @cont: whether another operation will be chained
493 * @lock: whether to lock SF for PL access only
494 * @val: value to write
495 *
496 * Writes up to 4 bytes of data to the serial flash. The location of
497 * the write needs to be specified prior to calling this by issuing the
498 * appropriate commands to the serial flash.
499 */
500static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
501 int lock, u32 val)
502{
503 if (!byte_cnt || byte_cnt > 4)
504 return -EINVAL;
505 if (t4_read_reg(adapter, SF_OP) & BUSY)
506 return -EBUSY;
507 cont = cont ? SF_CONT : 0;
508 lock = lock ? SF_LOCK : 0;
509 t4_write_reg(adapter, SF_DATA, val);
510 t4_write_reg(adapter, SF_OP, lock |
511 cont | BYTECNT(byte_cnt - 1) | OP_WR);
512 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
513}
514
515/**
516 * flash_wait_op - wait for a flash operation to complete
517 * @adapter: the adapter
518 * @attempts: max number of polls of the status register
519 * @delay: delay between polls in ms
520 *
521 * Wait for a flash operation to complete by polling the status register.
522 */
523static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
524{
525 int ret;
526 u32 status;
527
528 while (1) {
529 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
530 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
531 return ret;
532 if (!(status & 1))
533 return 0;
534 if (--attempts == 0)
535 return -EAGAIN;
536 if (delay)
537 msleep(delay);
538 }
539}
540
541/**
542 * t4_read_flash - read words from serial flash
543 * @adapter: the adapter
544 * @addr: the start address for the read
545 * @nwords: how many 32-bit words to read
546 * @data: where to store the read data
547 * @byte_oriented: whether to store data as bytes or as words
548 *
549 * Read the specified number of 32-bit words from the serial flash.
550 * If @byte_oriented is set the read data is stored as a byte array
551 * (i.e., big-endian), otherwise as 32-bit words in the platform's
552 * natural endianess.
553 */
de498c89
RD
554static int t4_read_flash(struct adapter *adapter, unsigned int addr,
555 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
556{
557 int ret;
558
900a6596 559 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
560 return -EINVAL;
561
562 addr = swab32(addr) | SF_RD_DATA_FAST;
563
564 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
565 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
566 return ret;
567
568 for ( ; nwords; nwords--, data++) {
569 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
570 if (nwords == 1)
571 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
572 if (ret)
573 return ret;
574 if (byte_oriented)
575 *data = htonl(*data);
576 }
577 return 0;
578}
579
580/**
581 * t4_write_flash - write up to a page of data to the serial flash
582 * @adapter: the adapter
583 * @addr: the start address to write
584 * @n: length of data to write in bytes
585 * @data: the data to write
586 *
587 * Writes up to a page of data (256 bytes) to the serial flash starting
588 * at the given address. All the data must be written to the same page.
589 */
590static int t4_write_flash(struct adapter *adapter, unsigned int addr,
591 unsigned int n, const u8 *data)
592{
593 int ret;
594 u32 buf[64];
595 unsigned int i, c, left, val, offset = addr & 0xff;
596
900a6596 597 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
598 return -EINVAL;
599
600 val = swab32(addr) | SF_PROG_PAGE;
601
602 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
603 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
604 goto unlock;
605
606 for (left = n; left; left -= c) {
607 c = min(left, 4U);
608 for (val = 0, i = 0; i < c; ++i)
609 val = (val << 8) + *data++;
610
611 ret = sf1_write(adapter, c, c != left, 1, val);
612 if (ret)
613 goto unlock;
614 }
900a6596 615 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
616 if (ret)
617 goto unlock;
618
619 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
620
621 /* Read the page to verify the write succeeded */
622 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
623 if (ret)
624 return ret;
625
626 if (memcmp(data - n, (u8 *)buf + offset, n)) {
627 dev_err(adapter->pdev_dev,
628 "failed to correctly write the flash page at %#x\n",
629 addr);
630 return -EIO;
631 }
632 return 0;
633
634unlock:
635 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
636 return ret;
637}
638
639/**
640 * get_fw_version - read the firmware version
641 * @adapter: the adapter
642 * @vers: where to place the version
643 *
644 * Reads the FW version from flash.
645 */
646static int get_fw_version(struct adapter *adapter, u32 *vers)
647{
900a6596
DM
648 return t4_read_flash(adapter, adapter->params.sf_fw_start +
649 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
56d36be4
DM
650}
651
652/**
653 * get_tp_version - read the TP microcode version
654 * @adapter: the adapter
655 * @vers: where to place the version
656 *
657 * Reads the TP microcode version from flash.
658 */
659static int get_tp_version(struct adapter *adapter, u32 *vers)
660{
900a6596
DM
661 return t4_read_flash(adapter, adapter->params.sf_fw_start +
662 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
663 1, vers, 0);
664}
665
666/**
667 * t4_check_fw_version - check if the FW is compatible with this driver
668 * @adapter: the adapter
669 *
670 * Checks if an adapter's FW is compatible with the driver. Returns 0
671 * if there's exact match, a negative error if the version could not be
672 * read or there's a major version mismatch, and a positive value if the
673 * expected major version is found but there's a minor version mismatch.
674 */
675int t4_check_fw_version(struct adapter *adapter)
676{
677 u32 api_vers[2];
678 int ret, major, minor, micro;
679
680 ret = get_fw_version(adapter, &adapter->params.fw_vers);
681 if (!ret)
682 ret = get_tp_version(adapter, &adapter->params.tp_vers);
683 if (!ret)
900a6596
DM
684 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
685 offsetof(struct fw_hdr, intfver_nic),
686 2, api_vers, 1);
56d36be4
DM
687 if (ret)
688 return ret;
689
690 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
691 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
692 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
693 memcpy(adapter->params.api_vers, api_vers,
694 sizeof(adapter->params.api_vers));
695
696 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
697 dev_err(adapter->pdev_dev,
698 "card FW has major version %u, driver wants %u\n",
699 major, FW_VERSION_MAJOR);
700 return -EINVAL;
701 }
702
703 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
704 return 0; /* perfect match */
705
706 /* Minor/micro version mismatch. Report it but often it's OK. */
707 return 1;
708}
709
710/**
711 * t4_flash_erase_sectors - erase a range of flash sectors
712 * @adapter: the adapter
713 * @start: the first sector to erase
714 * @end: the last sector to erase
715 *
716 * Erases the sectors in the given inclusive range.
717 */
718static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
719{
720 int ret = 0;
721
722 while (start <= end) {
723 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
724 (ret = sf1_write(adapter, 4, 0, 1,
725 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 726 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
727 dev_err(adapter->pdev_dev,
728 "erase of flash sector %d failed, error %d\n",
729 start, ret);
730 break;
731 }
732 start++;
733 }
734 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
735 return ret;
736}
737
738/**
739 * t4_load_fw - download firmware
740 * @adap: the adapter
741 * @fw_data: the firmware image to write
742 * @size: image size
743 *
744 * Write the supplied firmware image to the card's serial flash.
745 */
746int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
747{
748 u32 csum;
749 int ret, addr;
750 unsigned int i;
751 u8 first_page[SF_PAGE_SIZE];
752 const u32 *p = (const u32 *)fw_data;
753 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596
DM
754 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
755 unsigned int fw_img_start = adap->params.sf_fw_start;
756 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
56d36be4
DM
757
758 if (!size) {
759 dev_err(adap->pdev_dev, "FW image has no data\n");
760 return -EINVAL;
761 }
762 if (size & 511) {
763 dev_err(adap->pdev_dev,
764 "FW image size not multiple of 512 bytes\n");
765 return -EINVAL;
766 }
767 if (ntohs(hdr->len512) * 512 != size) {
768 dev_err(adap->pdev_dev,
769 "FW image size differs from size in FW header\n");
770 return -EINVAL;
771 }
772 if (size > FW_MAX_SIZE) {
773 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
774 FW_MAX_SIZE);
775 return -EFBIG;
776 }
777
778 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
779 csum += ntohl(p[i]);
780
781 if (csum != 0xffffffff) {
782 dev_err(adap->pdev_dev,
783 "corrupted firmware image, checksum %#x\n", csum);
784 return -EINVAL;
785 }
786
900a6596
DM
787 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
788 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
789 if (ret)
790 goto out;
791
792 /*
793 * We write the correct version at the end so the driver can see a bad
794 * version if the FW write fails. Start by writing a copy of the
795 * first page with a bad version.
796 */
797 memcpy(first_page, fw_data, SF_PAGE_SIZE);
798 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
900a6596 799 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
800 if (ret)
801 goto out;
802
900a6596 803 addr = fw_img_start;
56d36be4
DM
804 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
805 addr += SF_PAGE_SIZE;
806 fw_data += SF_PAGE_SIZE;
807 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
808 if (ret)
809 goto out;
810 }
811
812 ret = t4_write_flash(adap,
900a6596 813 fw_img_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
814 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
815out:
816 if (ret)
817 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
818 ret);
819 return ret;
820}
821
822#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
823 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
824
825/**
826 * t4_link_start - apply link configuration to MAC/PHY
827 * @phy: the PHY to setup
828 * @mac: the MAC to setup
829 * @lc: the requested link configuration
830 *
831 * Set up a port's MAC and PHY according to a desired link configuration.
832 * - If the PHY can auto-negotiate first decide what to advertise, then
833 * enable/disable auto-negotiation as desired, and reset.
834 * - If the PHY does not auto-negotiate just reset it.
835 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
836 * otherwise do it later based on the outcome of auto-negotiation.
837 */
838int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
839 struct link_config *lc)
840{
841 struct fw_port_cmd c;
842 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
843
844 lc->link_ok = 0;
845 if (lc->requested_fc & PAUSE_RX)
846 fc |= FW_PORT_CAP_FC_RX;
847 if (lc->requested_fc & PAUSE_TX)
848 fc |= FW_PORT_CAP_FC_TX;
849
850 memset(&c, 0, sizeof(c));
851 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
852 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
853 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
854 FW_LEN16(c));
855
856 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
857 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
858 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
859 } else if (lc->autoneg == AUTONEG_DISABLE) {
860 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
861 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
862 } else
863 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
864
865 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
866}
867
868/**
869 * t4_restart_aneg - restart autonegotiation
870 * @adap: the adapter
871 * @mbox: mbox to use for the FW command
872 * @port: the port id
873 *
874 * Restarts autonegotiation for the selected port.
875 */
876int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
877{
878 struct fw_port_cmd c;
879
880 memset(&c, 0, sizeof(c));
881 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
882 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
883 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
884 FW_LEN16(c));
885 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
886 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
887}
888
56d36be4
DM
889struct intr_info {
890 unsigned int mask; /* bits to check in interrupt status */
891 const char *msg; /* message to print or NULL */
892 short stat_idx; /* stat counter to increment or -1 */
893 unsigned short fatal; /* whether the condition reported is fatal */
894};
895
896/**
897 * t4_handle_intr_status - table driven interrupt handler
898 * @adapter: the adapter that generated the interrupt
899 * @reg: the interrupt status register to process
900 * @acts: table of interrupt actions
901 *
902 * A table driven interrupt handler that applies a set of masks to an
903 * interrupt status word and performs the corresponding actions if the
904 * interrupts described by the mask have occured. The actions include
905 * optionally emitting a warning or alert message. The table is terminated
906 * by an entry specifying mask 0. Returns the number of fatal interrupt
907 * conditions.
908 */
909static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
910 const struct intr_info *acts)
911{
912 int fatal = 0;
913 unsigned int mask = 0;
914 unsigned int status = t4_read_reg(adapter, reg);
915
916 for ( ; acts->mask; ++acts) {
917 if (!(status & acts->mask))
918 continue;
919 if (acts->fatal) {
920 fatal++;
921 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
922 status & acts->mask);
923 } else if (acts->msg && printk_ratelimit())
924 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
925 status & acts->mask);
926 mask |= acts->mask;
927 }
928 status &= mask;
929 if (status) /* clear processed interrupts */
930 t4_write_reg(adapter, reg, status);
931 return fatal;
932}
933
934/*
935 * Interrupt handler for the PCIE module.
936 */
937static void pcie_intr_handler(struct adapter *adapter)
938{
939 static struct intr_info sysbus_intr_info[] = {
940 { RNPP, "RXNP array parity error", -1, 1 },
941 { RPCP, "RXPC array parity error", -1, 1 },
942 { RCIP, "RXCIF array parity error", -1, 1 },
943 { RCCP, "Rx completions control array parity error", -1, 1 },
944 { RFTP, "RXFT array parity error", -1, 1 },
945 { 0 }
946 };
947 static struct intr_info pcie_port_intr_info[] = {
948 { TPCP, "TXPC array parity error", -1, 1 },
949 { TNPP, "TXNP array parity error", -1, 1 },
950 { TFTP, "TXFT array parity error", -1, 1 },
951 { TCAP, "TXCA array parity error", -1, 1 },
952 { TCIP, "TXCIF array parity error", -1, 1 },
953 { RCAP, "RXCA array parity error", -1, 1 },
954 { OTDD, "outbound request TLP discarded", -1, 1 },
955 { RDPE, "Rx data parity error", -1, 1 },
956 { TDUE, "Tx uncorrectable data error", -1, 1 },
957 { 0 }
958 };
959 static struct intr_info pcie_intr_info[] = {
960 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
961 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
962 { MSIDATAPERR, "MSI data parity error", -1, 1 },
963 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
964 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
965 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
966 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
967 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
968 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
969 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
970 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
971 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
972 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
973 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
974 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
975 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
976 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
977 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
978 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
979 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
980 { FIDPERR, "PCI FID parity error", -1, 1 },
981 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
982 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
983 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
984 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
985 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
986 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
987 { PCIESINT, "PCI core secondary fault", -1, 1 },
988 { PCIEPINT, "PCI core primary fault", -1, 1 },
989 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
990 { 0 }
991 };
992
993 int fat;
994
995 fat = t4_handle_intr_status(adapter,
996 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
997 sysbus_intr_info) +
998 t4_handle_intr_status(adapter,
999 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1000 pcie_port_intr_info) +
1001 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1002 if (fat)
1003 t4_fatal_err(adapter);
1004}
1005
1006/*
1007 * TP interrupt handler.
1008 */
1009static void tp_intr_handler(struct adapter *adapter)
1010{
1011 static struct intr_info tp_intr_info[] = {
1012 { 0x3fffffff, "TP parity error", -1, 1 },
1013 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1014 { 0 }
1015 };
1016
1017 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1018 t4_fatal_err(adapter);
1019}
1020
1021/*
1022 * SGE interrupt handler.
1023 */
1024static void sge_intr_handler(struct adapter *adapter)
1025{
1026 u64 v;
1027
1028 static struct intr_info sge_intr_info[] = {
1029 { ERR_CPL_EXCEED_IQE_SIZE,
1030 "SGE received CPL exceeding IQE size", -1, 1 },
1031 { ERR_INVALID_CIDX_INC,
1032 "SGE GTS CIDX increment too large", -1, 0 },
1033 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1034 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1035 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1036 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1037 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1038 0 },
1039 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1040 0 },
1041 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1042 0 },
1043 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1044 0 },
1045 { ERR_ING_CTXT_PRIO,
1046 "SGE too many priority ingress contexts", -1, 0 },
1047 { ERR_EGR_CTXT_PRIO,
1048 "SGE too many priority egress contexts", -1, 0 },
1049 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1050 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1051 { 0 }
1052 };
1053
1054 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1055 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1056 if (v) {
1057 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1058 (unsigned long long)v);
1059 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1060 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1061 }
1062
1063 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1064 v != 0)
1065 t4_fatal_err(adapter);
1066}
1067
1068/*
1069 * CIM interrupt handler.
1070 */
1071static void cim_intr_handler(struct adapter *adapter)
1072{
1073 static struct intr_info cim_intr_info[] = {
1074 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1075 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1076 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1077 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1078 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1079 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1080 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1081 { 0 }
1082 };
1083 static struct intr_info cim_upintr_info[] = {
1084 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1085 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1086 { ILLWRINT, "CIM illegal write", -1, 1 },
1087 { ILLRDINT, "CIM illegal read", -1, 1 },
1088 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1089 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1090 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1091 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1092 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1093 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1094 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1095 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1096 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1097 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1098 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1099 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1100 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1101 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1102 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1103 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1104 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1105 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1106 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1107 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1108 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1109 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1110 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1111 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1112 { 0 }
1113 };
1114
1115 int fat;
1116
1117 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1118 cim_intr_info) +
1119 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1120 cim_upintr_info);
1121 if (fat)
1122 t4_fatal_err(adapter);
1123}
1124
1125/*
1126 * ULP RX interrupt handler.
1127 */
1128static void ulprx_intr_handler(struct adapter *adapter)
1129{
1130 static struct intr_info ulprx_intr_info[] = {
1131 { 0x7fffff, "ULPRX parity error", -1, 1 },
1132 { 0 }
1133 };
1134
1135 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1136 t4_fatal_err(adapter);
1137}
1138
1139/*
1140 * ULP TX interrupt handler.
1141 */
1142static void ulptx_intr_handler(struct adapter *adapter)
1143{
1144 static struct intr_info ulptx_intr_info[] = {
1145 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1146 0 },
1147 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1148 0 },
1149 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1150 0 },
1151 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1152 0 },
1153 { 0xfffffff, "ULPTX parity error", -1, 1 },
1154 { 0 }
1155 };
1156
1157 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1158 t4_fatal_err(adapter);
1159}
1160
1161/*
1162 * PM TX interrupt handler.
1163 */
1164static void pmtx_intr_handler(struct adapter *adapter)
1165{
1166 static struct intr_info pmtx_intr_info[] = {
1167 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1168 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1169 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1170 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1171 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1172 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1173 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1174 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1175 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1176 { 0 }
1177 };
1178
1179 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1180 t4_fatal_err(adapter);
1181}
1182
1183/*
1184 * PM RX interrupt handler.
1185 */
1186static void pmrx_intr_handler(struct adapter *adapter)
1187{
1188 static struct intr_info pmrx_intr_info[] = {
1189 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1190 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1191 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1192 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1193 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1194 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1195 { 0 }
1196 };
1197
1198 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1199 t4_fatal_err(adapter);
1200}
1201
1202/*
1203 * CPL switch interrupt handler.
1204 */
1205static void cplsw_intr_handler(struct adapter *adapter)
1206{
1207 static struct intr_info cplsw_intr_info[] = {
1208 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1209 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1210 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1211 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1212 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1213 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1214 { 0 }
1215 };
1216
1217 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1218 t4_fatal_err(adapter);
1219}
1220
1221/*
1222 * LE interrupt handler.
1223 */
1224static void le_intr_handler(struct adapter *adap)
1225{
1226 static struct intr_info le_intr_info[] = {
1227 { LIPMISS, "LE LIP miss", -1, 0 },
1228 { LIP0, "LE 0 LIP error", -1, 0 },
1229 { PARITYERR, "LE parity error", -1, 1 },
1230 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1231 { REQQPARERR, "LE request queue parity error", -1, 1 },
1232 { 0 }
1233 };
1234
1235 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1236 t4_fatal_err(adap);
1237}
1238
1239/*
1240 * MPS interrupt handler.
1241 */
1242static void mps_intr_handler(struct adapter *adapter)
1243{
1244 static struct intr_info mps_rx_intr_info[] = {
1245 { 0xffffff, "MPS Rx parity error", -1, 1 },
1246 { 0 }
1247 };
1248 static struct intr_info mps_tx_intr_info[] = {
1249 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1250 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1251 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1252 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1253 { BUBBLE, "MPS Tx underflow", -1, 1 },
1254 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1255 { FRMERR, "MPS Tx framing error", -1, 1 },
1256 { 0 }
1257 };
1258 static struct intr_info mps_trc_intr_info[] = {
1259 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1260 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1261 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1262 { 0 }
1263 };
1264 static struct intr_info mps_stat_sram_intr_info[] = {
1265 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1266 { 0 }
1267 };
1268 static struct intr_info mps_stat_tx_intr_info[] = {
1269 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1270 { 0 }
1271 };
1272 static struct intr_info mps_stat_rx_intr_info[] = {
1273 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1274 { 0 }
1275 };
1276 static struct intr_info mps_cls_intr_info[] = {
1277 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1278 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1279 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1280 { 0 }
1281 };
1282
1283 int fat;
1284
1285 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1286 mps_rx_intr_info) +
1287 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1288 mps_tx_intr_info) +
1289 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1290 mps_trc_intr_info) +
1291 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1292 mps_stat_sram_intr_info) +
1293 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1294 mps_stat_tx_intr_info) +
1295 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1296 mps_stat_rx_intr_info) +
1297 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1298 mps_cls_intr_info);
1299
1300 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1301 RXINT | TXINT | STATINT);
1302 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1303 if (fat)
1304 t4_fatal_err(adapter);
1305}
1306
1307#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1308
1309/*
1310 * EDC/MC interrupt handler.
1311 */
1312static void mem_intr_handler(struct adapter *adapter, int idx)
1313{
1314 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1315
1316 unsigned int addr, cnt_addr, v;
1317
1318 if (idx <= MEM_EDC1) {
1319 addr = EDC_REG(EDC_INT_CAUSE, idx);
1320 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1321 } else {
1322 addr = MC_INT_CAUSE;
1323 cnt_addr = MC_ECC_STATUS;
1324 }
1325
1326 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1327 if (v & PERR_INT_CAUSE)
1328 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1329 name[idx]);
1330 if (v & ECC_CE_INT_CAUSE) {
1331 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1332
1333 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1334 if (printk_ratelimit())
1335 dev_warn(adapter->pdev_dev,
1336 "%u %s correctable ECC data error%s\n",
1337 cnt, name[idx], cnt > 1 ? "s" : "");
1338 }
1339 if (v & ECC_UE_INT_CAUSE)
1340 dev_alert(adapter->pdev_dev,
1341 "%s uncorrectable ECC data error\n", name[idx]);
1342
1343 t4_write_reg(adapter, addr, v);
1344 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1345 t4_fatal_err(adapter);
1346}
1347
1348/*
1349 * MA interrupt handler.
1350 */
1351static void ma_intr_handler(struct adapter *adap)
1352{
1353 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1354
1355 if (status & MEM_PERR_INT_CAUSE)
1356 dev_alert(adap->pdev_dev,
1357 "MA parity error, parity status %#x\n",
1358 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1359 if (status & MEM_WRAP_INT_CAUSE) {
1360 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1361 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1362 "client %u to address %#x\n",
1363 MEM_WRAP_CLIENT_NUM_GET(v),
1364 MEM_WRAP_ADDRESS_GET(v) << 4);
1365 }
1366 t4_write_reg(adap, MA_INT_CAUSE, status);
1367 t4_fatal_err(adap);
1368}
1369
1370/*
1371 * SMB interrupt handler.
1372 */
1373static void smb_intr_handler(struct adapter *adap)
1374{
1375 static struct intr_info smb_intr_info[] = {
1376 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1377 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1378 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1379 { 0 }
1380 };
1381
1382 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1383 t4_fatal_err(adap);
1384}
1385
1386/*
1387 * NC-SI interrupt handler.
1388 */
1389static void ncsi_intr_handler(struct adapter *adap)
1390{
1391 static struct intr_info ncsi_intr_info[] = {
1392 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1393 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1394 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1395 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1396 { 0 }
1397 };
1398
1399 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1400 t4_fatal_err(adap);
1401}
1402
1403/*
1404 * XGMAC interrupt handler.
1405 */
1406static void xgmac_intr_handler(struct adapter *adap, int port)
1407{
1408 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1409
1410 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1411 if (!v)
1412 return;
1413
1414 if (v & TXFIFO_PRTY_ERR)
1415 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1416 port);
1417 if (v & RXFIFO_PRTY_ERR)
1418 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1419 port);
1420 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1421 t4_fatal_err(adap);
1422}
1423
1424/*
1425 * PL interrupt handler.
1426 */
1427static void pl_intr_handler(struct adapter *adap)
1428{
1429 static struct intr_info pl_intr_info[] = {
1430 { FATALPERR, "T4 fatal parity error", -1, 1 },
1431 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1432 { 0 }
1433 };
1434
1435 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1436 t4_fatal_err(adap);
1437}
1438
1439#define PF_INTR_MASK (PFSW | PFCIM)
1440#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1441 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1442 CPL_SWITCH | SGE | ULP_TX)
1443
1444/**
1445 * t4_slow_intr_handler - control path interrupt handler
1446 * @adapter: the adapter
1447 *
1448 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1449 * The designation 'slow' is because it involves register reads, while
1450 * data interrupts typically don't involve any MMIOs.
1451 */
1452int t4_slow_intr_handler(struct adapter *adapter)
1453{
1454 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1455
1456 if (!(cause & GLBL_INTR_MASK))
1457 return 0;
1458 if (cause & CIM)
1459 cim_intr_handler(adapter);
1460 if (cause & MPS)
1461 mps_intr_handler(adapter);
1462 if (cause & NCSI)
1463 ncsi_intr_handler(adapter);
1464 if (cause & PL)
1465 pl_intr_handler(adapter);
1466 if (cause & SMB)
1467 smb_intr_handler(adapter);
1468 if (cause & XGMAC0)
1469 xgmac_intr_handler(adapter, 0);
1470 if (cause & XGMAC1)
1471 xgmac_intr_handler(adapter, 1);
1472 if (cause & XGMAC_KR0)
1473 xgmac_intr_handler(adapter, 2);
1474 if (cause & XGMAC_KR1)
1475 xgmac_intr_handler(adapter, 3);
1476 if (cause & PCIE)
1477 pcie_intr_handler(adapter);
1478 if (cause & MC)
1479 mem_intr_handler(adapter, MEM_MC);
1480 if (cause & EDC0)
1481 mem_intr_handler(adapter, MEM_EDC0);
1482 if (cause & EDC1)
1483 mem_intr_handler(adapter, MEM_EDC1);
1484 if (cause & LE)
1485 le_intr_handler(adapter);
1486 if (cause & TP)
1487 tp_intr_handler(adapter);
1488 if (cause & MA)
1489 ma_intr_handler(adapter);
1490 if (cause & PM_TX)
1491 pmtx_intr_handler(adapter);
1492 if (cause & PM_RX)
1493 pmrx_intr_handler(adapter);
1494 if (cause & ULP_RX)
1495 ulprx_intr_handler(adapter);
1496 if (cause & CPL_SWITCH)
1497 cplsw_intr_handler(adapter);
1498 if (cause & SGE)
1499 sge_intr_handler(adapter);
1500 if (cause & ULP_TX)
1501 ulptx_intr_handler(adapter);
1502
1503 /* Clear the interrupts just processed for which we are the master. */
1504 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1505 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1506 return 1;
1507}
1508
1509/**
1510 * t4_intr_enable - enable interrupts
1511 * @adapter: the adapter whose interrupts should be enabled
1512 *
1513 * Enable PF-specific interrupts for the calling function and the top-level
1514 * interrupt concentrator for global interrupts. Interrupts are already
1515 * enabled at each module, here we just enable the roots of the interrupt
1516 * hierarchies.
1517 *
1518 * Note: this function should be called only when the driver manages
1519 * non PF-specific interrupts from the various HW modules. Only one PCI
1520 * function at a time should be doing this.
1521 */
1522void t4_intr_enable(struct adapter *adapter)
1523{
1524 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1525
1526 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1527 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1528 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1529 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1530 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1531 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1532 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1533 EGRESS_SIZE_ERR);
1534 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1535 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1536}
1537
1538/**
1539 * t4_intr_disable - disable interrupts
1540 * @adapter: the adapter whose interrupts should be disabled
1541 *
1542 * Disable interrupts. We only disable the top-level interrupt
1543 * concentrators. The caller must be a PCI function managing global
1544 * interrupts.
1545 */
1546void t4_intr_disable(struct adapter *adapter)
1547{
1548 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1549
1550 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1551 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1552}
1553
1554/**
1555 * t4_intr_clear - clear all interrupts
1556 * @adapter: the adapter whose interrupts should be cleared
1557 *
1558 * Clears all interrupts. The caller must be a PCI function managing
1559 * global interrupts.
1560 */
1561void t4_intr_clear(struct adapter *adapter)
1562{
1563 static const unsigned int cause_reg[] = {
1564 SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
1565 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1566 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1567 PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
1568 MC_INT_CAUSE,
1569 MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
1570 EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
1571 CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
1572 MYPF_REG(CIM_PF_HOST_INT_CAUSE),
1573 TP_INT_CAUSE,
1574 ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
1575 PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
1576 MPS_RX_PERR_INT_CAUSE,
1577 CPL_INTR_CAUSE,
1578 MYPF_REG(PL_PF_INT_CAUSE),
1579 PL_PL_INT_CAUSE,
1580 LE_DB_INT_CAUSE,
1581 };
1582
1583 unsigned int i;
1584
1585 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
1586 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
1587
1588 t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
1589 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1590}
1591
1592/**
1593 * hash_mac_addr - return the hash value of a MAC address
1594 * @addr: the 48-bit Ethernet MAC address
1595 *
1596 * Hashes a MAC address according to the hash function used by HW inexact
1597 * (hash) address matching.
1598 */
1599static int hash_mac_addr(const u8 *addr)
1600{
1601 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1602 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1603 a ^= b;
1604 a ^= (a >> 12);
1605 a ^= (a >> 6);
1606 return a & 0x3f;
1607}
1608
1609/**
1610 * t4_config_rss_range - configure a portion of the RSS mapping table
1611 * @adapter: the adapter
1612 * @mbox: mbox to use for the FW command
1613 * @viid: virtual interface whose RSS subtable is to be written
1614 * @start: start entry in the table to write
1615 * @n: how many table entries to write
1616 * @rspq: values for the response queue lookup table
1617 * @nrspq: number of values in @rspq
1618 *
1619 * Programs the selected part of the VI's RSS mapping table with the
1620 * provided values. If @nrspq < @n the supplied values are used repeatedly
1621 * until the full table range is populated.
1622 *
1623 * The caller must ensure the values in @rspq are in the range allowed for
1624 * @viid.
1625 */
1626int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1627 int start, int n, const u16 *rspq, unsigned int nrspq)
1628{
1629 int ret;
1630 const u16 *rsp = rspq;
1631 const u16 *rsp_end = rspq + nrspq;
1632 struct fw_rss_ind_tbl_cmd cmd;
1633
1634 memset(&cmd, 0, sizeof(cmd));
1635 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1636 FW_CMD_REQUEST | FW_CMD_WRITE |
1637 FW_RSS_IND_TBL_CMD_VIID(viid));
1638 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1639
1640 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1641 while (n > 0) {
1642 int nq = min(n, 32);
1643 __be32 *qp = &cmd.iq0_to_iq2;
1644
1645 cmd.niqid = htons(nq);
1646 cmd.startidx = htons(start);
1647
1648 start += nq;
1649 n -= nq;
1650
1651 while (nq > 0) {
1652 unsigned int v;
1653
1654 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1655 if (++rsp >= rsp_end)
1656 rsp = rspq;
1657 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1658 if (++rsp >= rsp_end)
1659 rsp = rspq;
1660 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1661 if (++rsp >= rsp_end)
1662 rsp = rspq;
1663
1664 *qp++ = htonl(v);
1665 nq -= 3;
1666 }
1667
1668 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1669 if (ret)
1670 return ret;
1671 }
1672 return 0;
1673}
1674
1675/**
1676 * t4_config_glbl_rss - configure the global RSS mode
1677 * @adapter: the adapter
1678 * @mbox: mbox to use for the FW command
1679 * @mode: global RSS mode
1680 * @flags: mode-specific flags
1681 *
1682 * Sets the global RSS mode.
1683 */
1684int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1685 unsigned int flags)
1686{
1687 struct fw_rss_glb_config_cmd c;
1688
1689 memset(&c, 0, sizeof(c));
1690 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1691 FW_CMD_REQUEST | FW_CMD_WRITE);
1692 c.retval_len16 = htonl(FW_LEN16(c));
1693 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1694 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1695 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1696 c.u.basicvirtual.mode_pkd =
1697 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1698 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1699 } else
1700 return -EINVAL;
1701 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1702}
1703
1704/* Read an RSS table row */
1705static int rd_rss_row(struct adapter *adap, int row, u32 *val)
1706{
1707 t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
1708 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
1709 5, 0, val);
1710}
1711
1712/**
1713 * t4_read_rss - read the contents of the RSS mapping table
1714 * @adapter: the adapter
1715 * @map: holds the contents of the RSS mapping table
1716 *
1717 * Reads the contents of the RSS hash->queue mapping table.
1718 */
1719int t4_read_rss(struct adapter *adapter, u16 *map)
1720{
1721 u32 val;
1722 int i, ret;
1723
1724 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
1725 ret = rd_rss_row(adapter, i, &val);
1726 if (ret)
1727 return ret;
1728 *map++ = LKPTBLQUEUE0_GET(val);
1729 *map++ = LKPTBLQUEUE1_GET(val);
1730 }
1731 return 0;
1732}
1733
1734/**
1735 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1736 * @adap: the adapter
1737 * @v4: holds the TCP/IP counter values
1738 * @v6: holds the TCP/IPv6 counter values
1739 *
1740 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1741 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1742 */
1743void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1744 struct tp_tcp_stats *v6)
1745{
1746 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1747
1748#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1749#define STAT(x) val[STAT_IDX(x)]
1750#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1751
1752 if (v4) {
1753 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1754 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1755 v4->tcpOutRsts = STAT(OUT_RST);
1756 v4->tcpInSegs = STAT64(IN_SEG);
1757 v4->tcpOutSegs = STAT64(OUT_SEG);
1758 v4->tcpRetransSegs = STAT64(RXT_SEG);
1759 }
1760 if (v6) {
1761 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1762 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1763 v6->tcpOutRsts = STAT(OUT_RST);
1764 v6->tcpInSegs = STAT64(IN_SEG);
1765 v6->tcpOutSegs = STAT64(OUT_SEG);
1766 v6->tcpRetransSegs = STAT64(RXT_SEG);
1767 }
1768#undef STAT64
1769#undef STAT
1770#undef STAT_IDX
1771}
1772
1773/**
1774 * t4_tp_get_err_stats - read TP's error MIB counters
1775 * @adap: the adapter
1776 * @st: holds the counter values
1777 *
1778 * Returns the values of TP's error counters.
1779 */
1780void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
1781{
1782 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
1783 12, TP_MIB_MAC_IN_ERR_0);
1784 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
1785 8, TP_MIB_TNL_CNG_DROP_0);
1786 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
1787 4, TP_MIB_TNL_DROP_0);
1788 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
1789 4, TP_MIB_OFD_VLN_DROP_0);
1790 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
1791 4, TP_MIB_TCP_V6IN_ERR_0);
1792 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
1793 2, TP_MIB_OFD_ARP_DROP);
1794}
1795
1796/**
1797 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1798 * @adap: the adapter
1799 * @mtus: where to store the MTU values
1800 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1801 *
1802 * Reads the HW path MTU table.
1803 */
1804void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1805{
1806 u32 v;
1807 int i;
1808
1809 for (i = 0; i < NMTUS; ++i) {
1810 t4_write_reg(adap, TP_MTU_TABLE,
1811 MTUINDEX(0xff) | MTUVALUE(i));
1812 v = t4_read_reg(adap, TP_MTU_TABLE);
1813 mtus[i] = MTUVALUE_GET(v);
1814 if (mtu_log)
1815 mtu_log[i] = MTUWIDTH_GET(v);
1816 }
1817}
1818
1819/**
1820 * init_cong_ctrl - initialize congestion control parameters
1821 * @a: the alpha values for congestion control
1822 * @b: the beta values for congestion control
1823 *
1824 * Initialize the congestion control parameters.
1825 */
1826static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1827{
1828 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1829 a[9] = 2;
1830 a[10] = 3;
1831 a[11] = 4;
1832 a[12] = 5;
1833 a[13] = 6;
1834 a[14] = 7;
1835 a[15] = 8;
1836 a[16] = 9;
1837 a[17] = 10;
1838 a[18] = 14;
1839 a[19] = 17;
1840 a[20] = 21;
1841 a[21] = 25;
1842 a[22] = 30;
1843 a[23] = 35;
1844 a[24] = 45;
1845 a[25] = 60;
1846 a[26] = 80;
1847 a[27] = 100;
1848 a[28] = 200;
1849 a[29] = 300;
1850 a[30] = 400;
1851 a[31] = 500;
1852
1853 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1854 b[9] = b[10] = 1;
1855 b[11] = b[12] = 2;
1856 b[13] = b[14] = b[15] = b[16] = 3;
1857 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1858 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1859 b[28] = b[29] = 6;
1860 b[30] = b[31] = 7;
1861}
1862
1863/* The minimum additive increment value for the congestion control table */
1864#define CC_MIN_INCR 2U
1865
1866/**
1867 * t4_load_mtus - write the MTU and congestion control HW tables
1868 * @adap: the adapter
1869 * @mtus: the values for the MTU table
1870 * @alpha: the values for the congestion control alpha parameter
1871 * @beta: the values for the congestion control beta parameter
1872 *
1873 * Write the HW MTU table with the supplied MTUs and the high-speed
1874 * congestion control table with the supplied alpha, beta, and MTUs.
1875 * We write the two tables together because the additive increments
1876 * depend on the MTUs.
1877 */
1878void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1879 const unsigned short *alpha, const unsigned short *beta)
1880{
1881 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1882 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1883 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1884 28672, 40960, 57344, 81920, 114688, 163840, 229376
1885 };
1886
1887 unsigned int i, w;
1888
1889 for (i = 0; i < NMTUS; ++i) {
1890 unsigned int mtu = mtus[i];
1891 unsigned int log2 = fls(mtu);
1892
1893 if (!(mtu & ((1 << log2) >> 2))) /* round */
1894 log2--;
1895 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1896 MTUWIDTH(log2) | MTUVALUE(mtu));
1897
1898 for (w = 0; w < NCCTRL_WIN; ++w) {
1899 unsigned int inc;
1900
1901 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1902 CC_MIN_INCR);
1903
1904 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1905 (w << 16) | (beta[w] << 13) | inc);
1906 }
1907 }
1908}
1909
1910/**
1911 * t4_set_trace_filter - configure one of the tracing filters
1912 * @adap: the adapter
1913 * @tp: the desired trace filter parameters
1914 * @idx: which filter to configure
1915 * @enable: whether to enable or disable the filter
1916 *
1917 * Configures one of the tracing filters available in HW. If @enable is
1918 * %0 @tp is not examined and may be %NULL.
1919 */
1920int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
1921 int idx, int enable)
1922{
1923 int i, ofst = idx * 4;
1924 u32 data_reg, mask_reg, cfg;
1925 u32 multitrc = TRCMULTIFILTER;
1926
1927 if (!enable) {
1928 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1929 goto out;
1930 }
1931
1932 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
1933 tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
1934 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
1935 return -EINVAL;
1936
1937 if (tp->snap_len > 256) { /* must be tracer 0 */
1938 if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
1939 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
1940 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
1941 return -EINVAL; /* other tracers are enabled */
1942 multitrc = 0;
1943 } else if (idx) {
1944 i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
1945 if (TFCAPTUREMAX_GET(i) > 256 &&
1946 (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
1947 return -EINVAL;
1948 }
1949
1950 /* stop the tracer we'll be changing */
1951 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1952
1953 /* disable tracing globally if running in the wrong single/multi mode */
1954 cfg = t4_read_reg(adap, MPS_TRC_CFG);
1955 if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
1956 t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
1957 t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1958 msleep(1);
1959 if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
1960 return -ETIMEDOUT;
1961 }
1962 /*
1963 * At this point either the tracing is enabled and in the right mode or
1964 * disabled.
1965 */
1966
1967 idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
1968 data_reg = MPS_TRC_FILTER0_MATCH + idx;
1969 mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
1970
1971 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
1972 t4_write_reg(adap, data_reg, tp->data[i]);
1973 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
1974 }
1975 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
1976 TFCAPTUREMAX(tp->snap_len) |
1977 TFMINPKTSIZE(tp->min_len));
1978 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
1979 TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
1980 TFPORT(tp->port) | TFEN |
1981 (tp->invert ? TFINVERTMATCH : 0));
1982
1983 cfg &= ~TRCMULTIFILTER;
1984 t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
1985out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1986 return 0;
1987}
1988
1989/**
1990 * t4_get_trace_filter - query one of the tracing filters
1991 * @adap: the adapter
1992 * @tp: the current trace filter parameters
1993 * @idx: which trace filter to query
1994 * @enabled: non-zero if the filter is enabled
1995 *
1996 * Returns the current settings of one of the HW tracing filters.
1997 */
1998void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
1999 int *enabled)
2000{
2001 u32 ctla, ctlb;
2002 int i, ofst = idx * 4;
2003 u32 data_reg, mask_reg;
2004
2005 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
2006 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
2007
2008 *enabled = !!(ctla & TFEN);
2009 tp->snap_len = TFCAPTUREMAX_GET(ctlb);
2010 tp->min_len = TFMINPKTSIZE_GET(ctlb);
2011 tp->skip_ofst = TFOFFSET_GET(ctla);
2012 tp->skip_len = TFLENGTH_GET(ctla);
2013 tp->invert = !!(ctla & TFINVERTMATCH);
2014 tp->port = TFPORT_GET(ctla);
2015
2016 ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
2017 data_reg = MPS_TRC_FILTER0_MATCH + ofst;
2018 mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
2019
2020 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
2021 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
2022 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
2023 }
2024}
2025
2026/**
2027 * get_mps_bg_map - return the buffer groups associated with a port
2028 * @adap: the adapter
2029 * @idx: the port index
2030 *
2031 * Returns a bitmap indicating which MPS buffer groups are associated
2032 * with the given port. Bit i is set if buffer group i is used by the
2033 * port.
2034 */
2035static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2036{
2037 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2038
2039 if (n == 0)
2040 return idx == 0 ? 0xf : 0;
2041 if (n == 1)
2042 return idx < 2 ? (3 << (2 * idx)) : 0;
2043 return 1 << idx;
2044}
2045
2046/**
2047 * t4_get_port_stats - collect port statistics
2048 * @adap: the adapter
2049 * @idx: the port index
2050 * @p: the stats structure to fill
2051 *
2052 * Collect statistics related to the given port from HW.
2053 */
2054void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2055{
2056 u32 bgmap = get_mps_bg_map(adap, idx);
2057
2058#define GET_STAT(name) \
2059 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2060#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2061
2062 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2063 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2064 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2065 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2066 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2067 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2068 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2069 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2070 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2071 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2072 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2073 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2074 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2075 p->tx_drop = GET_STAT(TX_PORT_DROP);
2076 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2077 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2078 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2079 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2080 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2081 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2082 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2083 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2084 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2085
2086 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2087 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2088 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2089 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2090 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2091 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2092 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2093 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2094 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2095 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2096 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2097 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2098 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2099 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2100 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2101 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2102 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2103 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2104 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2105 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2106 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2107 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2108 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2109 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2110 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2111 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2112 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2113
2114 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2115 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2116 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2117 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2118 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2119 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2120 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2121 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2122
2123#undef GET_STAT
2124#undef GET_STAT_COM
2125}
2126
2127/**
2128 * t4_get_lb_stats - collect loopback port statistics
2129 * @adap: the adapter
2130 * @idx: the loopback port index
2131 * @p: the stats structure to fill
2132 *
2133 * Return HW statistics for the given loopback port.
2134 */
2135void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
2136{
2137 u32 bgmap = get_mps_bg_map(adap, idx);
2138
2139#define GET_STAT(name) \
2140 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
2141#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2142
2143 p->octets = GET_STAT(BYTES);
2144 p->frames = GET_STAT(FRAMES);
2145 p->bcast_frames = GET_STAT(BCAST);
2146 p->mcast_frames = GET_STAT(MCAST);
2147 p->ucast_frames = GET_STAT(UCAST);
2148 p->error_frames = GET_STAT(ERROR);
2149
2150 p->frames_64 = GET_STAT(64B);
2151 p->frames_65_127 = GET_STAT(65B_127B);
2152 p->frames_128_255 = GET_STAT(128B_255B);
2153 p->frames_256_511 = GET_STAT(256B_511B);
2154 p->frames_512_1023 = GET_STAT(512B_1023B);
2155 p->frames_1024_1518 = GET_STAT(1024B_1518B);
2156 p->frames_1519_max = GET_STAT(1519B_MAX);
2157 p->drop = t4_read_reg(adap, PORT_REG(idx,
2158 MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
2159
2160 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
2161 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
2162 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
2163 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
2164 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
2165 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
2166 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
2167 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
2168
2169#undef GET_STAT
2170#undef GET_STAT_COM
2171}
2172
2173/**
2174 * t4_wol_magic_enable - enable/disable magic packet WoL
2175 * @adap: the adapter
2176 * @port: the physical port index
2177 * @addr: MAC address expected in magic packets, %NULL to disable
2178 *
2179 * Enables/disables magic packet wake-on-LAN for the selected port.
2180 */
2181void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2182 const u8 *addr)
2183{
2184 if (addr) {
2185 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2186 (addr[2] << 24) | (addr[3] << 16) |
2187 (addr[4] << 8) | addr[5]);
2188 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2189 (addr[0] << 8) | addr[1]);
2190 }
2191 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2192 addr ? MAGICEN : 0);
2193}
2194
2195/**
2196 * t4_wol_pat_enable - enable/disable pattern-based WoL
2197 * @adap: the adapter
2198 * @port: the physical port index
2199 * @map: bitmap of which HW pattern filters to set
2200 * @mask0: byte mask for bytes 0-63 of a packet
2201 * @mask1: byte mask for bytes 64-127 of a packet
2202 * @crc: Ethernet CRC for selected bytes
2203 * @enable: enable/disable switch
2204 *
2205 * Sets the pattern filters indicated in @map to mask out the bytes
2206 * specified in @mask0/@mask1 in received packets and compare the CRC of
2207 * the resulting packet against @crc. If @enable is %true pattern-based
2208 * WoL is enabled, otherwise disabled.
2209 */
2210int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2211 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2212{
2213 int i;
2214
2215 if (!enable) {
2216 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2217 PATEN, 0);
2218 return 0;
2219 }
2220 if (map > 0xff)
2221 return -EINVAL;
2222
2223#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2224
2225 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2226 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2227 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2228
2229 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2230 if (!(map & 1))
2231 continue;
2232
2233 /* write byte masks */
2234 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2235 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2236 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2237 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2238 return -ETIMEDOUT;
2239
2240 /* write CRC */
2241 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2242 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2243 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2244 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2245 return -ETIMEDOUT;
2246 }
2247#undef EPIO_REG
2248
2249 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2250 return 0;
2251}
2252
2253#define INIT_CMD(var, cmd, rd_wr) do { \
2254 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2255 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2256 (var).retval_len16 = htonl(FW_LEN16(var)); \
2257} while (0)
2258
2259/**
2260 * t4_mdio_rd - read a PHY register through MDIO
2261 * @adap: the adapter
2262 * @mbox: mailbox to use for the FW command
2263 * @phy_addr: the PHY address
2264 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2265 * @reg: the register to read
2266 * @valp: where to store the value
2267 *
2268 * Issues a FW command through the given mailbox to read a PHY register.
2269 */
2270int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2271 unsigned int mmd, unsigned int reg, u16 *valp)
2272{
2273 int ret;
2274 struct fw_ldst_cmd c;
2275
2276 memset(&c, 0, sizeof(c));
2277 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2278 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2279 c.cycles_to_len16 = htonl(FW_LEN16(c));
2280 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2281 FW_LDST_CMD_MMD(mmd));
2282 c.u.mdio.raddr = htons(reg);
2283
2284 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2285 if (ret == 0)
2286 *valp = ntohs(c.u.mdio.rval);
2287 return ret;
2288}
2289
2290/**
2291 * t4_mdio_wr - write a PHY register through MDIO
2292 * @adap: the adapter
2293 * @mbox: mailbox to use for the FW command
2294 * @phy_addr: the PHY address
2295 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2296 * @reg: the register to write
2297 * @valp: value to write
2298 *
2299 * Issues a FW command through the given mailbox to write a PHY register.
2300 */
2301int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2302 unsigned int mmd, unsigned int reg, u16 val)
2303{
2304 struct fw_ldst_cmd c;
2305
2306 memset(&c, 0, sizeof(c));
2307 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2308 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2309 c.cycles_to_len16 = htonl(FW_LEN16(c));
2310 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2311 FW_LDST_CMD_MMD(mmd));
2312 c.u.mdio.raddr = htons(reg);
2313 c.u.mdio.rval = htons(val);
2314
2315 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2316}
2317
2318/**
2319 * t4_fw_hello - establish communication with FW
2320 * @adap: the adapter
2321 * @mbox: mailbox to use for the FW command
2322 * @evt_mbox: mailbox to receive async FW events
2323 * @master: specifies the caller's willingness to be the device master
2324 * @state: returns the current device state
2325 *
2326 * Issues a command to establish communication with FW.
2327 */
2328int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2329 enum dev_master master, enum dev_state *state)
2330{
2331 int ret;
2332 struct fw_hello_cmd c;
2333
2334 INIT_CMD(c, HELLO, WRITE);
2335 c.err_to_mbasyncnot = htonl(
2336 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2337 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2338 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2339 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2340
2341 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2342 if (ret == 0 && state) {
2343 u32 v = ntohl(c.err_to_mbasyncnot);
2344 if (v & FW_HELLO_CMD_INIT)
2345 *state = DEV_STATE_INIT;
2346 else if (v & FW_HELLO_CMD_ERR)
2347 *state = DEV_STATE_ERR;
2348 else
2349 *state = DEV_STATE_UNINIT;
2350 }
2351 return ret;
2352}
2353
2354/**
2355 * t4_fw_bye - end communication with FW
2356 * @adap: the adapter
2357 * @mbox: mailbox to use for the FW command
2358 *
2359 * Issues a command to terminate communication with FW.
2360 */
2361int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2362{
2363 struct fw_bye_cmd c;
2364
2365 INIT_CMD(c, BYE, WRITE);
2366 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2367}
2368
2369/**
2370 * t4_init_cmd - ask FW to initialize the device
2371 * @adap: the adapter
2372 * @mbox: mailbox to use for the FW command
2373 *
2374 * Issues a command to FW to partially initialize the device. This
2375 * performs initialization that generally doesn't depend on user input.
2376 */
2377int t4_early_init(struct adapter *adap, unsigned int mbox)
2378{
2379 struct fw_initialize_cmd c;
2380
2381 INIT_CMD(c, INITIALIZE, WRITE);
2382 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2383}
2384
2385/**
2386 * t4_fw_reset - issue a reset to FW
2387 * @adap: the adapter
2388 * @mbox: mailbox to use for the FW command
2389 * @reset: specifies the type of reset to perform
2390 *
2391 * Issues a reset command of the specified type to FW.
2392 */
2393int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2394{
2395 struct fw_reset_cmd c;
2396
2397 INIT_CMD(c, RESET, WRITE);
2398 c.val = htonl(reset);
2399 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2400}
2401
2402/**
2403 * t4_query_params - query FW or device parameters
2404 * @adap: the adapter
2405 * @mbox: mailbox to use for the FW command
2406 * @pf: the PF
2407 * @vf: the VF
2408 * @nparams: the number of parameters
2409 * @params: the parameter names
2410 * @val: the parameter values
2411 *
2412 * Reads the value of FW or device parameters. Up to 7 parameters can be
2413 * queried at once.
2414 */
2415int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2416 unsigned int vf, unsigned int nparams, const u32 *params,
2417 u32 *val)
2418{
2419 int i, ret;
2420 struct fw_params_cmd c;
2421 __be32 *p = &c.param[0].mnem;
2422
2423 if (nparams > 7)
2424 return -EINVAL;
2425
2426 memset(&c, 0, sizeof(c));
2427 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2428 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2429 FW_PARAMS_CMD_VFN(vf));
2430 c.retval_len16 = htonl(FW_LEN16(c));
2431 for (i = 0; i < nparams; i++, p += 2)
2432 *p = htonl(*params++);
2433
2434 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2435 if (ret == 0)
2436 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2437 *val++ = ntohl(*p);
2438 return ret;
2439}
2440
2441/**
2442 * t4_set_params - sets FW or device parameters
2443 * @adap: the adapter
2444 * @mbox: mailbox to use for the FW command
2445 * @pf: the PF
2446 * @vf: the VF
2447 * @nparams: the number of parameters
2448 * @params: the parameter names
2449 * @val: the parameter values
2450 *
2451 * Sets the value of FW or device parameters. Up to 7 parameters can be
2452 * specified at once.
2453 */
2454int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2455 unsigned int vf, unsigned int nparams, const u32 *params,
2456 const u32 *val)
2457{
2458 struct fw_params_cmd c;
2459 __be32 *p = &c.param[0].mnem;
2460
2461 if (nparams > 7)
2462 return -EINVAL;
2463
2464 memset(&c, 0, sizeof(c));
2465 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2466 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2467 FW_PARAMS_CMD_VFN(vf));
2468 c.retval_len16 = htonl(FW_LEN16(c));
2469 while (nparams--) {
2470 *p++ = htonl(*params++);
2471 *p++ = htonl(*val++);
2472 }
2473
2474 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2475}
2476
2477/**
2478 * t4_cfg_pfvf - configure PF/VF resource limits
2479 * @adap: the adapter
2480 * @mbox: mailbox to use for the FW command
2481 * @pf: the PF being configured
2482 * @vf: the VF being configured
2483 * @txq: the max number of egress queues
2484 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2485 * @rxqi: the max number of interrupt-capable ingress queues
2486 * @rxq: the max number of interruptless ingress queues
2487 * @tc: the PCI traffic class
2488 * @vi: the max number of virtual interfaces
2489 * @cmask: the channel access rights mask for the PF/VF
2490 * @pmask: the port access rights mask for the PF/VF
2491 * @nexact: the maximum number of exact MPS filters
2492 * @rcaps: read capabilities
2493 * @wxcaps: write/execute capabilities
2494 *
2495 * Configures resource limits and capabilities for a physical or virtual
2496 * function.
2497 */
2498int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2499 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2500 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2501 unsigned int vi, unsigned int cmask, unsigned int pmask,
2502 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2503{
2504 struct fw_pfvf_cmd c;
2505
2506 memset(&c, 0, sizeof(c));
2507 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2508 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2509 FW_PFVF_CMD_VFN(vf));
2510 c.retval_len16 = htonl(FW_LEN16(c));
2511 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2512 FW_PFVF_CMD_NIQ(rxq));
2513 c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2514 FW_PFVF_CMD_PMASK(pmask) |
2515 FW_PFVF_CMD_NEQ(txq));
2516 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2517 FW_PFVF_CMD_NEXACTF(nexact));
2518 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2519 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2520 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2521 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2522}
2523
2524/**
2525 * t4_alloc_vi - allocate a virtual interface
2526 * @adap: the adapter
2527 * @mbox: mailbox to use for the FW command
2528 * @port: physical port associated with the VI
2529 * @pf: the PF owning the VI
2530 * @vf: the VF owning the VI
2531 * @nmac: number of MAC addresses needed (1 to 5)
2532 * @mac: the MAC addresses of the VI
2533 * @rss_size: size of RSS table slice associated with this VI
2534 *
2535 * Allocates a virtual interface for the given physical port. If @mac is
2536 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2537 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2538 * stored consecutively so the space needed is @nmac * 6 bytes.
2539 * Returns a negative error number or the non-negative VI id.
2540 */
2541int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2542 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2543 unsigned int *rss_size)
2544{
2545 int ret;
2546 struct fw_vi_cmd c;
2547
2548 memset(&c, 0, sizeof(c));
2549 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2550 FW_CMD_WRITE | FW_CMD_EXEC |
2551 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2552 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2553 c.portid_pkd = FW_VI_CMD_PORTID(port);
2554 c.nmac = nmac - 1;
2555
2556 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2557 if (ret)
2558 return ret;
2559
2560 if (mac) {
2561 memcpy(mac, c.mac, sizeof(c.mac));
2562 switch (nmac) {
2563 case 5:
2564 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2565 case 4:
2566 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2567 case 3:
2568 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2569 case 2:
2570 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2571 }
2572 }
2573 if (rss_size)
2574 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2575 return ntohs(c.viid_pkd);
2576}
2577
2578/**
2579 * t4_free_vi - free a virtual interface
2580 * @adap: the adapter
2581 * @mbox: mailbox to use for the FW command
2582 * @pf: the PF owning the VI
2583 * @vf: the VF owning the VI
2584 * @viid: virtual interface identifiler
2585 *
2586 * Free a previously allocated virtual interface.
2587 */
2588int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2589 unsigned int vf, unsigned int viid)
2590{
2591 struct fw_vi_cmd c;
2592
2593 memset(&c, 0, sizeof(c));
2594 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2595 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2596 FW_VI_CMD_VFN(vf));
2597 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2598 c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
2599 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2600}
2601
2602/**
2603 * t4_set_rxmode - set Rx properties of a virtual interface
2604 * @adap: the adapter
2605 * @mbox: mailbox to use for the FW command
2606 * @viid: the VI id
2607 * @mtu: the new MTU or -1
2608 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2609 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2610 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 2611 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
2612 * @sleep_ok: if true we may sleep while awaiting command completion
2613 *
2614 * Sets Rx properties of a virtual interface.
2615 */
2616int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
2617 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2618 bool sleep_ok)
56d36be4
DM
2619{
2620 struct fw_vi_rxmode_cmd c;
2621
2622 /* convert to FW values */
2623 if (mtu < 0)
2624 mtu = FW_RXMODE_MTU_NO_CHG;
2625 if (promisc < 0)
2626 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2627 if (all_multi < 0)
2628 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2629 if (bcast < 0)
2630 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
f8f5aafa
DM
2631 if (vlanex < 0)
2632 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
56d36be4
DM
2633
2634 memset(&c, 0, sizeof(c));
2635 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2636 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2637 c.retval_len16 = htonl(FW_LEN16(c));
f8f5aafa
DM
2638 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2639 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2640 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2641 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2642 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
56d36be4
DM
2643 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2644}
2645
2646/**
2647 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2648 * @adap: the adapter
2649 * @mbox: mailbox to use for the FW command
2650 * @viid: the VI id
2651 * @free: if true any existing filters for this VI id are first removed
2652 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2653 * @addr: the MAC address(es)
2654 * @idx: where to store the index of each allocated filter
2655 * @hash: pointer to hash address filter bitmap
2656 * @sleep_ok: call is allowed to sleep
2657 *
2658 * Allocates an exact-match filter for each of the supplied addresses and
2659 * sets it to the corresponding address. If @idx is not %NULL it should
2660 * have at least @naddr entries, each of which will be set to the index of
2661 * the filter allocated for the corresponding MAC address. If a filter
2662 * could not be allocated for an address its index is set to 0xffff.
2663 * If @hash is not %NULL addresses that fail to allocate an exact filter
2664 * are hashed and update the hash filter bitmap pointed at by @hash.
2665 *
2666 * Returns a negative error number or the number of filters allocated.
2667 */
2668int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2669 unsigned int viid, bool free, unsigned int naddr,
2670 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2671{
2672 int i, ret;
2673 struct fw_vi_mac_cmd c;
2674 struct fw_vi_mac_exact *p;
2675
2676 if (naddr > 7)
2677 return -EINVAL;
2678
2679 memset(&c, 0, sizeof(c));
2680 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2681 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2682 FW_VI_MAC_CMD_VIID(viid));
2683 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2684 FW_CMD_LEN16((naddr + 2) / 2));
2685
2686 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2687 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2688 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2689 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2690 }
2691
2692 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2693 if (ret)
2694 return ret;
2695
2696 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2697 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2698
2699 if (idx)
2700 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2701 if (index < NEXACT_MAC)
2702 ret++;
2703 else if (hash)
2704 *hash |= (1 << hash_mac_addr(addr[i]));
2705 }
2706 return ret;
2707}
2708
2709/**
2710 * t4_change_mac - modifies the exact-match filter for a MAC address
2711 * @adap: the adapter
2712 * @mbox: mailbox to use for the FW command
2713 * @viid: the VI id
2714 * @idx: index of existing filter for old value of MAC address, or -1
2715 * @addr: the new MAC address value
2716 * @persist: whether a new MAC allocation should be persistent
2717 * @add_smt: if true also add the address to the HW SMT
2718 *
2719 * Modifies an exact-match filter and sets it to the new MAC address.
2720 * Note that in general it is not possible to modify the value of a given
2721 * filter so the generic way to modify an address filter is to free the one
2722 * being used by the old address value and allocate a new filter for the
2723 * new address value. @idx can be -1 if the address is a new addition.
2724 *
2725 * Returns a negative error number or the index of the filter with the new
2726 * MAC value.
2727 */
2728int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2729 int idx, const u8 *addr, bool persist, bool add_smt)
2730{
2731 int ret, mode;
2732 struct fw_vi_mac_cmd c;
2733 struct fw_vi_mac_exact *p = c.u.exact;
2734
2735 if (idx < 0) /* new allocation */
2736 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2737 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2738
2739 memset(&c, 0, sizeof(c));
2740 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2741 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2742 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2743 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2744 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2745 FW_VI_MAC_CMD_IDX(idx));
2746 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2747
2748 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2749 if (ret == 0) {
2750 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2751 if (ret >= NEXACT_MAC)
2752 ret = -ENOMEM;
2753 }
2754 return ret;
2755}
2756
2757/**
2758 * t4_set_addr_hash - program the MAC inexact-match hash filter
2759 * @adap: the adapter
2760 * @mbox: mailbox to use for the FW command
2761 * @viid: the VI id
2762 * @ucast: whether the hash filter should also match unicast addresses
2763 * @vec: the value to be written to the hash filter
2764 * @sleep_ok: call is allowed to sleep
2765 *
2766 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2767 */
2768int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2769 bool ucast, u64 vec, bool sleep_ok)
2770{
2771 struct fw_vi_mac_cmd c;
2772
2773 memset(&c, 0, sizeof(c));
2774 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2775 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2776 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2777 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2778 FW_CMD_LEN16(1));
2779 c.u.hash.hashvec = cpu_to_be64(vec);
2780 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2781}
2782
2783/**
2784 * t4_enable_vi - enable/disable a virtual interface
2785 * @adap: the adapter
2786 * @mbox: mailbox to use for the FW command
2787 * @viid: the VI id
2788 * @rx_en: 1=enable Rx, 0=disable Rx
2789 * @tx_en: 1=enable Tx, 0=disable Tx
2790 *
2791 * Enables/disables a virtual interface.
2792 */
2793int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2794 bool rx_en, bool tx_en)
2795{
2796 struct fw_vi_enable_cmd c;
2797
2798 memset(&c, 0, sizeof(c));
2799 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2800 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2801 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2802 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2803 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2804}
2805
2806/**
2807 * t4_identify_port - identify a VI's port by blinking its LED
2808 * @adap: the adapter
2809 * @mbox: mailbox to use for the FW command
2810 * @viid: the VI id
2811 * @nblinks: how many times to blink LED at 2.5 Hz
2812 *
2813 * Identifies a VI's port by blinking its LED.
2814 */
2815int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2816 unsigned int nblinks)
2817{
2818 struct fw_vi_enable_cmd c;
2819
2820 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2821 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2822 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2823 c.blinkdur = htons(nblinks);
2824 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2825}
2826
2827/**
2828 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2829 * @adap: the adapter
2830 * @mbox: mailbox to use for the FW command
2831 * @start: %true to enable the queues, %false to disable them
2832 * @pf: the PF owning the queues
2833 * @vf: the VF owning the queues
2834 * @iqid: ingress queue id
2835 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2836 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2837 *
2838 * Starts or stops an ingress queue and its associated FLs, if any.
2839 */
2840int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2841 unsigned int pf, unsigned int vf, unsigned int iqid,
2842 unsigned int fl0id, unsigned int fl1id)
2843{
2844 struct fw_iq_cmd c;
2845
2846 memset(&c, 0, sizeof(c));
2847 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2848 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2849 FW_IQ_CMD_VFN(vf));
2850 c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
2851 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
2852 c.iqid = htons(iqid);
2853 c.fl0id = htons(fl0id);
2854 c.fl1id = htons(fl1id);
2855 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2856}
2857
2858/**
2859 * t4_iq_free - free an ingress queue and its FLs
2860 * @adap: the adapter
2861 * @mbox: mailbox to use for the FW command
2862 * @pf: the PF owning the queues
2863 * @vf: the VF owning the queues
2864 * @iqtype: the ingress queue type
2865 * @iqid: ingress queue id
2866 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2867 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2868 *
2869 * Frees an ingress queue and its associated FLs, if any.
2870 */
2871int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2872 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2873 unsigned int fl0id, unsigned int fl1id)
2874{
2875 struct fw_iq_cmd c;
2876
2877 memset(&c, 0, sizeof(c));
2878 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2879 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2880 FW_IQ_CMD_VFN(vf));
2881 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2882 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2883 c.iqid = htons(iqid);
2884 c.fl0id = htons(fl0id);
2885 c.fl1id = htons(fl1id);
2886 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2887}
2888
2889/**
2890 * t4_eth_eq_free - free an Ethernet egress queue
2891 * @adap: the adapter
2892 * @mbox: mailbox to use for the FW command
2893 * @pf: the PF owning the queue
2894 * @vf: the VF owning the queue
2895 * @eqid: egress queue id
2896 *
2897 * Frees an Ethernet egress queue.
2898 */
2899int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2900 unsigned int vf, unsigned int eqid)
2901{
2902 struct fw_eq_eth_cmd c;
2903
2904 memset(&c, 0, sizeof(c));
2905 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2906 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2907 FW_EQ_ETH_CMD_VFN(vf));
2908 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2909 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2910 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2911}
2912
2913/**
2914 * t4_ctrl_eq_free - free a control egress queue
2915 * @adap: the adapter
2916 * @mbox: mailbox to use for the FW command
2917 * @pf: the PF owning the queue
2918 * @vf: the VF owning the queue
2919 * @eqid: egress queue id
2920 *
2921 * Frees a control egress queue.
2922 */
2923int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2924 unsigned int vf, unsigned int eqid)
2925{
2926 struct fw_eq_ctrl_cmd c;
2927
2928 memset(&c, 0, sizeof(c));
2929 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2930 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2931 FW_EQ_CTRL_CMD_VFN(vf));
2932 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2933 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2934 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2935}
2936
2937/**
2938 * t4_ofld_eq_free - free an offload egress queue
2939 * @adap: the adapter
2940 * @mbox: mailbox to use for the FW command
2941 * @pf: the PF owning the queue
2942 * @vf: the VF owning the queue
2943 * @eqid: egress queue id
2944 *
2945 * Frees a control egress queue.
2946 */
2947int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2948 unsigned int vf, unsigned int eqid)
2949{
2950 struct fw_eq_ofld_cmd c;
2951
2952 memset(&c, 0, sizeof(c));
2953 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2954 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2955 FW_EQ_OFLD_CMD_VFN(vf));
2956 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2957 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2958 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2959}
2960
2961/**
2962 * t4_handle_fw_rpl - process a FW reply message
2963 * @adap: the adapter
2964 * @rpl: start of the FW message
2965 *
2966 * Processes a FW message, such as link state change messages.
2967 */
2968int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2969{
2970 u8 opcode = *(const u8 *)rpl;
2971
2972 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2973 int speed = 0, fc = 0;
2974 const struct fw_port_cmd *p = (void *)rpl;
2975 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2976 int port = adap->chan_map[chan];
2977 struct port_info *pi = adap2pinfo(adap, port);
2978 struct link_config *lc = &pi->link_cfg;
2979 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2980 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2981 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2982
2983 if (stat & FW_PORT_CMD_RXPAUSE)
2984 fc |= PAUSE_RX;
2985 if (stat & FW_PORT_CMD_TXPAUSE)
2986 fc |= PAUSE_TX;
2987 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2988 speed = SPEED_100;
2989 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2990 speed = SPEED_1000;
2991 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2992 speed = SPEED_10000;
2993
2994 if (link_ok != lc->link_ok || speed != lc->speed ||
2995 fc != lc->fc) { /* something changed */
2996 lc->link_ok = link_ok;
2997 lc->speed = speed;
2998 lc->fc = fc;
2999 t4_os_link_changed(adap, port, link_ok);
3000 }
3001 if (mod != pi->mod_type) {
3002 pi->mod_type = mod;
3003 t4_os_portmod_changed(adap, port);
3004 }
3005 }
3006 return 0;
3007}
3008
3009static void __devinit get_pci_mode(struct adapter *adapter,
3010 struct pci_params *p)
3011{
3012 u16 val;
3013 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
3014
3015 if (pcie_cap) {
3016 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3017 &val);
3018 p->speed = val & PCI_EXP_LNKSTA_CLS;
3019 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3020 }
3021}
3022
3023/**
3024 * init_link_config - initialize a link's SW state
3025 * @lc: structure holding the link state
3026 * @caps: link capabilities
3027 *
3028 * Initializes the SW state maintained for each link, including the link's
3029 * capabilities and default speed/flow-control/autonegotiation settings.
3030 */
3031static void __devinit init_link_config(struct link_config *lc,
3032 unsigned int caps)
3033{
3034 lc->supported = caps;
3035 lc->requested_speed = 0;
3036 lc->speed = 0;
3037 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3038 if (lc->supported & FW_PORT_CAP_ANEG) {
3039 lc->advertising = lc->supported & ADVERT_MASK;
3040 lc->autoneg = AUTONEG_ENABLE;
3041 lc->requested_fc |= PAUSE_AUTONEG;
3042 } else {
3043 lc->advertising = 0;
3044 lc->autoneg = AUTONEG_DISABLE;
3045 }
3046}
3047
3048static int __devinit wait_dev_ready(struct adapter *adap)
3049{
3050 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3051 return 0;
3052 msleep(500);
3053 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3054}
3055
900a6596
DM
3056static int __devinit get_flash_params(struct adapter *adap)
3057{
3058 int ret;
3059 u32 info;
3060
3061 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3062 if (!ret)
3063 ret = sf1_read(adap, 3, 0, 1, &info);
3064 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3065 if (ret)
3066 return ret;
3067
3068 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3069 return -EINVAL;
3070 info >>= 16; /* log2 of size */
3071 if (info >= 0x14 && info < 0x18)
3072 adap->params.sf_nsec = 1 << (info - 16);
3073 else if (info == 0x18)
3074 adap->params.sf_nsec = 64;
3075 else
3076 return -EINVAL;
3077 adap->params.sf_size = 1 << info;
3078 adap->params.sf_fw_start =
3079 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3080 return 0;
3081}
3082
56d36be4
DM
3083/**
3084 * t4_prep_adapter - prepare SW and HW for operation
3085 * @adapter: the adapter
3086 * @reset: if true perform a HW reset
3087 *
3088 * Initialize adapter SW state for the various HW modules, set initial
3089 * values for some adapter tunables, take PHYs out of reset, and
3090 * initialize the MDIO interface.
3091 */
3092int __devinit t4_prep_adapter(struct adapter *adapter)
3093{
3094 int ret;
3095
3096 ret = wait_dev_ready(adapter);
3097 if (ret < 0)
3098 return ret;
3099
3100 get_pci_mode(adapter, &adapter->params.pci);
3101 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3102
900a6596
DM
3103 ret = get_flash_params(adapter);
3104 if (ret < 0) {
3105 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3106 return ret;
3107 }
3108
56d36be4
DM
3109 ret = get_vpd_params(adapter, &adapter->params.vpd);
3110 if (ret < 0)
3111 return ret;
3112
3113 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3114
3115 /*
3116 * Default port for debugging in case we can't reach FW.
3117 */
3118 adapter->params.nports = 1;
3119 adapter->params.portvec = 1;
3120 return 0;
3121}
3122
3123int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3124{
3125 u8 addr[6];
3126 int ret, i, j = 0;
3127 struct fw_port_cmd c;
3128
3129 memset(&c, 0, sizeof(c));
3130
3131 for_each_port(adap, i) {
3132 unsigned int rss_size;
3133 struct port_info *p = adap2pinfo(adap, i);
3134
3135 while ((adap->params.portvec & (1 << j)) == 0)
3136 j++;
3137
3138 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3139 FW_CMD_REQUEST | FW_CMD_READ |
3140 FW_PORT_CMD_PORTID(j));
3141 c.action_to_len16 = htonl(
3142 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3143 FW_LEN16(c));
3144 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3145 if (ret)
3146 return ret;
3147
3148 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3149 if (ret < 0)
3150 return ret;
3151
3152 p->viid = ret;
3153 p->tx_chan = j;
3154 p->lport = j;
3155 p->rss_size = rss_size;
3156 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3157 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
3158
3159 ret = ntohl(c.u.info.lstatus_to_modtype);
3160 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3161 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3162 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3163 p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
3164
3165 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3166 j++;
3167 }
3168 return 0;
3169}