e1000e: add PCI device id to enable support for 82567V-4
[linux-2.6-block.git] / drivers / net / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
de498c89
RD
56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
58{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
de498c89
RD
112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
de498c89 123#if 0
56d36be4
DM
124/**
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
132 *
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
135 */
de498c89
RD
136static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
139{
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
143 }
144}
de498c89 145#endif
56d36be4
DM
146
147/*
148 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
149 */
150static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
151 u32 mbox_addr)
152{
153 for ( ; nflit; nflit--, mbox_addr += 8)
154 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
155}
156
157/*
158 * Handle a FW assertion reported in a mailbox.
159 */
160static void fw_asrt(struct adapter *adap, u32 mbox_addr)
161{
162 struct fw_debug_cmd asrt;
163
164 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
165 dev_alert(adap->pdev_dev,
166 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
167 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
168 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
169}
170
171static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
172{
173 dev_err(adap->pdev_dev,
174 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
175 (unsigned long long)t4_read_reg64(adap, data_reg),
176 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
177 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
178 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
179 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
180 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
181 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
182 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
183}
184
185/**
186 * t4_wr_mbox_meat - send a command to FW through the given mailbox
187 * @adap: the adapter
188 * @mbox: index of the mailbox to use
189 * @cmd: the command to write
190 * @size: command length in bytes
191 * @rpl: where to optionally store the reply
192 * @sleep_ok: if true we may sleep while awaiting command completion
193 *
194 * Sends the given command to FW through the selected mailbox and waits
195 * for the FW to execute the command. If @rpl is not %NULL it is used to
196 * store the FW's reply to the command. The command and its optional
197 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
198 * to respond. @sleep_ok determines whether we may sleep while awaiting
199 * the response. If sleeping is allowed we use progressive backoff
200 * otherwise we spin.
201 *
202 * The return value is 0 on success or a negative errno on failure. A
203 * failure can happen either because we are not able to execute the
204 * command or FW executes it but signals an error. In the latter case
205 * the return value is the error code indicated by FW (negated).
206 */
207int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
208 void *rpl, bool sleep_ok)
209{
210 static int delay[] = {
211 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
212 };
213
214 u32 v;
215 u64 res;
216 int i, ms, delay_idx;
217 const __be64 *p = cmd;
218 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
219 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
220
221 if ((size & 15) || size > MBOX_LEN)
222 return -EINVAL;
223
224 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
225 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
226 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
227
228 if (v != MBOX_OWNER_DRV)
229 return v ? -EBUSY : -ETIMEDOUT;
230
231 for (i = 0; i < size; i += 8)
232 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
233
234 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
235 t4_read_reg(adap, ctl_reg); /* flush write */
236
237 delay_idx = 0;
238 ms = delay[0];
239
240 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
241 if (sleep_ok) {
242 ms = delay[delay_idx]; /* last element may repeat */
243 if (delay_idx < ARRAY_SIZE(delay) - 1)
244 delay_idx++;
245 msleep(ms);
246 } else
247 mdelay(ms);
248
249 v = t4_read_reg(adap, ctl_reg);
250 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
251 if (!(v & MBMSGVALID)) {
252 t4_write_reg(adap, ctl_reg, 0);
253 continue;
254 }
255
256 res = t4_read_reg64(adap, data_reg);
257 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
258 fw_asrt(adap, data_reg);
259 res = FW_CMD_RETVAL(EIO);
260 } else if (rpl)
261 get_mbox_rpl(adap, rpl, size / 8, data_reg);
262
263 if (FW_CMD_RETVAL_GET((int)res))
264 dump_mbox(adap, mbox, data_reg);
265 t4_write_reg(adap, ctl_reg, 0);
266 return -FW_CMD_RETVAL_GET((int)res);
267 }
268 }
269
270 dump_mbox(adap, mbox, data_reg);
271 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
272 *(const u8 *)cmd, mbox);
273 return -ETIMEDOUT;
274}
275
276/**
277 * t4_mc_read - read from MC through backdoor accesses
278 * @adap: the adapter
279 * @addr: address of first byte requested
280 * @data: 64 bytes of data containing the requested address
281 * @ecc: where to store the corresponding 64-bit ECC word
282 *
283 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
284 * that covers the requested address @addr. If @parity is not %NULL it
285 * is assigned the 64-bit ECC word for the read data.
286 */
287int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
288{
289 int i;
290
291 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
292 return -EBUSY;
293 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
294 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
295 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
296 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
297 BIST_CMD_GAP(1));
298 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
299 if (i)
300 return i;
301
302#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
303
304 for (i = 15; i >= 0; i--)
305 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
306 if (ecc)
307 *ecc = t4_read_reg64(adap, MC_DATA(16));
308#undef MC_DATA
309 return 0;
310}
311
312/**
313 * t4_edc_read - read from EDC through backdoor accesses
314 * @adap: the adapter
315 * @idx: which EDC to access
316 * @addr: address of first byte requested
317 * @data: 64 bytes of data containing the requested address
318 * @ecc: where to store the corresponding 64-bit ECC word
319 *
320 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
321 * that covers the requested address @addr. If @parity is not %NULL it
322 * is assigned the 64-bit ECC word for the read data.
323 */
324int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
325{
326 int i;
327
328 idx *= EDC_STRIDE;
329 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
330 return -EBUSY;
331 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
332 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
333 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
334 t4_write_reg(adap, EDC_BIST_CMD + idx,
335 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
336 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
337 if (i)
338 return i;
339
340#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
341
342 for (i = 15; i >= 0; i--)
343 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
344 if (ecc)
345 *ecc = t4_read_reg64(adap, EDC_DATA(16));
346#undef EDC_DATA
347 return 0;
348}
349
56d36be4
DM
350/*
351 * Partial EEPROM Vital Product Data structure. Includes only the ID and
226ec5fd 352 * VPD-R header.
56d36be4 353 */
226ec5fd 354struct t4_vpd_hdr {
56d36be4
DM
355 u8 id_tag;
356 u8 id_len[2];
357 u8 id_data[ID_LEN];
358 u8 vpdr_tag;
359 u8 vpdr_len[2];
56d36be4
DM
360};
361
362#define EEPROM_STAT_ADDR 0x7bfc
363#define VPD_BASE 0
226ec5fd 364#define VPD_LEN 512
56d36be4
DM
365
366/**
367 * t4_seeprom_wp - enable/disable EEPROM write protection
368 * @adapter: the adapter
369 * @enable: whether to enable or disable write protection
370 *
371 * Enables or disables write protection on the serial EEPROM.
372 */
373int t4_seeprom_wp(struct adapter *adapter, bool enable)
374{
375 unsigned int v = enable ? 0xc : 0;
376 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
377 return ret < 0 ? ret : 0;
378}
379
380/**
381 * get_vpd_params - read VPD parameters from VPD EEPROM
382 * @adapter: adapter to read
383 * @p: where to store the parameters
384 *
385 * Reads card parameters stored in VPD EEPROM.
386 */
387static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
388{
226ec5fd
DM
389 int i, ret;
390 int ec, sn, v2;
391 u8 vpd[VPD_LEN], csum;
392 unsigned int vpdr_len;
393 const struct t4_vpd_hdr *v;
56d36be4 394
226ec5fd 395 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
56d36be4
DM
396 if (ret < 0)
397 return ret;
398
226ec5fd
DM
399 v = (const struct t4_vpd_hdr *)vpd;
400 vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
401 if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
402 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
403 return -EINVAL;
404 }
405
406#define FIND_VPD_KW(var, name) do { \
407 var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
408 vpdr_len, name); \
409 if (var < 0) { \
410 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
411 return -EINVAL; \
412 } \
413 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
414} while (0)
415
416 FIND_VPD_KW(i, "RV");
417 for (csum = 0; i >= 0; i--)
418 csum += vpd[i];
56d36be4
DM
419
420 if (csum) {
421 dev_err(adapter->pdev_dev,
422 "corrupted VPD EEPROM, actual csum %u\n", csum);
423 return -EINVAL;
424 }
425
226ec5fd
DM
426 FIND_VPD_KW(ec, "EC");
427 FIND_VPD_KW(sn, "SN");
428 FIND_VPD_KW(v2, "V2");
429#undef FIND_VPD_KW
430
431 p->cclk = simple_strtoul(vpd + v2, NULL, 10);
432 memcpy(p->id, v->id_data, ID_LEN);
56d36be4 433 strim(p->id);
226ec5fd 434 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 435 strim(p->ec);
226ec5fd
DM
436 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
437 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4
DM
438 strim(p->sn);
439 return 0;
440}
441
442/* serial flash and firmware constants */
443enum {
444 SF_ATTEMPTS = 10, /* max retries for SF operations */
445
446 /* flash command opcodes */
447 SF_PROG_PAGE = 2, /* program page */
448 SF_WR_DISABLE = 4, /* disable writes */
449 SF_RD_STATUS = 5, /* read status register */
450 SF_WR_ENABLE = 6, /* enable writes */
451 SF_RD_DATA_FAST = 0xb, /* read flash */
452 SF_ERASE_SECTOR = 0xd8, /* erase sector */
453
454 FW_START_SEC = 8, /* first flash sector for FW */
455 FW_END_SEC = 15, /* last flash sector for FW */
456 FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
457 FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
458};
459
460/**
461 * sf1_read - read data from the serial flash
462 * @adapter: the adapter
463 * @byte_cnt: number of bytes to read
464 * @cont: whether another operation will be chained
465 * @lock: whether to lock SF for PL access only
466 * @valp: where to store the read data
467 *
468 * Reads up to 4 bytes of data from the serial flash. The location of
469 * the read needs to be specified prior to calling this by issuing the
470 * appropriate commands to the serial flash.
471 */
472static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
473 int lock, u32 *valp)
474{
475 int ret;
476
477 if (!byte_cnt || byte_cnt > 4)
478 return -EINVAL;
479 if (t4_read_reg(adapter, SF_OP) & BUSY)
480 return -EBUSY;
481 cont = cont ? SF_CONT : 0;
482 lock = lock ? SF_LOCK : 0;
483 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
484 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
485 if (!ret)
486 *valp = t4_read_reg(adapter, SF_DATA);
487 return ret;
488}
489
490/**
491 * sf1_write - write data to the serial flash
492 * @adapter: the adapter
493 * @byte_cnt: number of bytes to write
494 * @cont: whether another operation will be chained
495 * @lock: whether to lock SF for PL access only
496 * @val: value to write
497 *
498 * Writes up to 4 bytes of data to the serial flash. The location of
499 * the write needs to be specified prior to calling this by issuing the
500 * appropriate commands to the serial flash.
501 */
502static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
503 int lock, u32 val)
504{
505 if (!byte_cnt || byte_cnt > 4)
506 return -EINVAL;
507 if (t4_read_reg(adapter, SF_OP) & BUSY)
508 return -EBUSY;
509 cont = cont ? SF_CONT : 0;
510 lock = lock ? SF_LOCK : 0;
511 t4_write_reg(adapter, SF_DATA, val);
512 t4_write_reg(adapter, SF_OP, lock |
513 cont | BYTECNT(byte_cnt - 1) | OP_WR);
514 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
515}
516
517/**
518 * flash_wait_op - wait for a flash operation to complete
519 * @adapter: the adapter
520 * @attempts: max number of polls of the status register
521 * @delay: delay between polls in ms
522 *
523 * Wait for a flash operation to complete by polling the status register.
524 */
525static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
526{
527 int ret;
528 u32 status;
529
530 while (1) {
531 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
532 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
533 return ret;
534 if (!(status & 1))
535 return 0;
536 if (--attempts == 0)
537 return -EAGAIN;
538 if (delay)
539 msleep(delay);
540 }
541}
542
543/**
544 * t4_read_flash - read words from serial flash
545 * @adapter: the adapter
546 * @addr: the start address for the read
547 * @nwords: how many 32-bit words to read
548 * @data: where to store the read data
549 * @byte_oriented: whether to store data as bytes or as words
550 *
551 * Read the specified number of 32-bit words from the serial flash.
552 * If @byte_oriented is set the read data is stored as a byte array
553 * (i.e., big-endian), otherwise as 32-bit words in the platform's
554 * natural endianess.
555 */
de498c89
RD
556static int t4_read_flash(struct adapter *adapter, unsigned int addr,
557 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
558{
559 int ret;
560
561 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
562 return -EINVAL;
563
564 addr = swab32(addr) | SF_RD_DATA_FAST;
565
566 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
567 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
568 return ret;
569
570 for ( ; nwords; nwords--, data++) {
571 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
572 if (nwords == 1)
573 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
574 if (ret)
575 return ret;
576 if (byte_oriented)
577 *data = htonl(*data);
578 }
579 return 0;
580}
581
582/**
583 * t4_write_flash - write up to a page of data to the serial flash
584 * @adapter: the adapter
585 * @addr: the start address to write
586 * @n: length of data to write in bytes
587 * @data: the data to write
588 *
589 * Writes up to a page of data (256 bytes) to the serial flash starting
590 * at the given address. All the data must be written to the same page.
591 */
592static int t4_write_flash(struct adapter *adapter, unsigned int addr,
593 unsigned int n, const u8 *data)
594{
595 int ret;
596 u32 buf[64];
597 unsigned int i, c, left, val, offset = addr & 0xff;
598
599 if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE)
600 return -EINVAL;
601
602 val = swab32(addr) | SF_PROG_PAGE;
603
604 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
605 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
606 goto unlock;
607
608 for (left = n; left; left -= c) {
609 c = min(left, 4U);
610 for (val = 0, i = 0; i < c; ++i)
611 val = (val << 8) + *data++;
612
613 ret = sf1_write(adapter, c, c != left, 1, val);
614 if (ret)
615 goto unlock;
616 }
617 ret = flash_wait_op(adapter, 5, 1);
618 if (ret)
619 goto unlock;
620
621 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
622
623 /* Read the page to verify the write succeeded */
624 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
625 if (ret)
626 return ret;
627
628 if (memcmp(data - n, (u8 *)buf + offset, n)) {
629 dev_err(adapter->pdev_dev,
630 "failed to correctly write the flash page at %#x\n",
631 addr);
632 return -EIO;
633 }
634 return 0;
635
636unlock:
637 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
638 return ret;
639}
640
641/**
642 * get_fw_version - read the firmware version
643 * @adapter: the adapter
644 * @vers: where to place the version
645 *
646 * Reads the FW version from flash.
647 */
648static int get_fw_version(struct adapter *adapter, u32 *vers)
649{
650 return t4_read_flash(adapter,
651 FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
652 vers, 0);
653}
654
655/**
656 * get_tp_version - read the TP microcode version
657 * @adapter: the adapter
658 * @vers: where to place the version
659 *
660 * Reads the TP microcode version from flash.
661 */
662static int get_tp_version(struct adapter *adapter, u32 *vers)
663{
664 return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
665 tp_microcode_ver),
666 1, vers, 0);
667}
668
669/**
670 * t4_check_fw_version - check if the FW is compatible with this driver
671 * @adapter: the adapter
672 *
673 * Checks if an adapter's FW is compatible with the driver. Returns 0
674 * if there's exact match, a negative error if the version could not be
675 * read or there's a major version mismatch, and a positive value if the
676 * expected major version is found but there's a minor version mismatch.
677 */
678int t4_check_fw_version(struct adapter *adapter)
679{
680 u32 api_vers[2];
681 int ret, major, minor, micro;
682
683 ret = get_fw_version(adapter, &adapter->params.fw_vers);
684 if (!ret)
685 ret = get_tp_version(adapter, &adapter->params.tp_vers);
686 if (!ret)
687 ret = t4_read_flash(adapter,
688 FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
689 2, api_vers, 1);
690 if (ret)
691 return ret;
692
693 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
694 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
695 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
696 memcpy(adapter->params.api_vers, api_vers,
697 sizeof(adapter->params.api_vers));
698
699 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
700 dev_err(adapter->pdev_dev,
701 "card FW has major version %u, driver wants %u\n",
702 major, FW_VERSION_MAJOR);
703 return -EINVAL;
704 }
705
706 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
707 return 0; /* perfect match */
708
709 /* Minor/micro version mismatch. Report it but often it's OK. */
710 return 1;
711}
712
713/**
714 * t4_flash_erase_sectors - erase a range of flash sectors
715 * @adapter: the adapter
716 * @start: the first sector to erase
717 * @end: the last sector to erase
718 *
719 * Erases the sectors in the given inclusive range.
720 */
721static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
722{
723 int ret = 0;
724
725 while (start <= end) {
726 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
727 (ret = sf1_write(adapter, 4, 0, 1,
728 SF_ERASE_SECTOR | (start << 8))) != 0 ||
729 (ret = flash_wait_op(adapter, 5, 500)) != 0) {
730 dev_err(adapter->pdev_dev,
731 "erase of flash sector %d failed, error %d\n",
732 start, ret);
733 break;
734 }
735 start++;
736 }
737 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
738 return ret;
739}
740
741/**
742 * t4_load_fw - download firmware
743 * @adap: the adapter
744 * @fw_data: the firmware image to write
745 * @size: image size
746 *
747 * Write the supplied firmware image to the card's serial flash.
748 */
749int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
750{
751 u32 csum;
752 int ret, addr;
753 unsigned int i;
754 u8 first_page[SF_PAGE_SIZE];
755 const u32 *p = (const u32 *)fw_data;
756 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
757
758 if (!size) {
759 dev_err(adap->pdev_dev, "FW image has no data\n");
760 return -EINVAL;
761 }
762 if (size & 511) {
763 dev_err(adap->pdev_dev,
764 "FW image size not multiple of 512 bytes\n");
765 return -EINVAL;
766 }
767 if (ntohs(hdr->len512) * 512 != size) {
768 dev_err(adap->pdev_dev,
769 "FW image size differs from size in FW header\n");
770 return -EINVAL;
771 }
772 if (size > FW_MAX_SIZE) {
773 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
774 FW_MAX_SIZE);
775 return -EFBIG;
776 }
777
778 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
779 csum += ntohl(p[i]);
780
781 if (csum != 0xffffffff) {
782 dev_err(adap->pdev_dev,
783 "corrupted firmware image, checksum %#x\n", csum);
784 return -EINVAL;
785 }
786
787 i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
788 ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
789 if (ret)
790 goto out;
791
792 /*
793 * We write the correct version at the end so the driver can see a bad
794 * version if the FW write fails. Start by writing a copy of the
795 * first page with a bad version.
796 */
797 memcpy(first_page, fw_data, SF_PAGE_SIZE);
798 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
799 ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
800 if (ret)
801 goto out;
802
803 addr = FW_IMG_START;
804 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
805 addr += SF_PAGE_SIZE;
806 fw_data += SF_PAGE_SIZE;
807 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
808 if (ret)
809 goto out;
810 }
811
812 ret = t4_write_flash(adap,
813 FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
814 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
815out:
816 if (ret)
817 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
818 ret);
819 return ret;
820}
821
822#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
823 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
824
825/**
826 * t4_link_start - apply link configuration to MAC/PHY
827 * @phy: the PHY to setup
828 * @mac: the MAC to setup
829 * @lc: the requested link configuration
830 *
831 * Set up a port's MAC and PHY according to a desired link configuration.
832 * - If the PHY can auto-negotiate first decide what to advertise, then
833 * enable/disable auto-negotiation as desired, and reset.
834 * - If the PHY does not auto-negotiate just reset it.
835 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
836 * otherwise do it later based on the outcome of auto-negotiation.
837 */
838int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
839 struct link_config *lc)
840{
841 struct fw_port_cmd c;
842 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
843
844 lc->link_ok = 0;
845 if (lc->requested_fc & PAUSE_RX)
846 fc |= FW_PORT_CAP_FC_RX;
847 if (lc->requested_fc & PAUSE_TX)
848 fc |= FW_PORT_CAP_FC_TX;
849
850 memset(&c, 0, sizeof(c));
851 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
852 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
853 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
854 FW_LEN16(c));
855
856 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
857 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
858 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
859 } else if (lc->autoneg == AUTONEG_DISABLE) {
860 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
861 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
862 } else
863 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
864
865 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
866}
867
868/**
869 * t4_restart_aneg - restart autonegotiation
870 * @adap: the adapter
871 * @mbox: mbox to use for the FW command
872 * @port: the port id
873 *
874 * Restarts autonegotiation for the selected port.
875 */
876int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
877{
878 struct fw_port_cmd c;
879
880 memset(&c, 0, sizeof(c));
881 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
882 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
883 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
884 FW_LEN16(c));
885 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
886 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
887}
888
889/**
890 * t4_set_vlan_accel - configure HW VLAN extraction
891 * @adap: the adapter
892 * @ports: bitmap of adapter ports to operate on
893 * @on: enable (1) or disable (0) HW VLAN extraction
894 *
895 * Enables or disables HW extraction of VLAN tags for the ports specified
896 * by @ports. @ports is a bitmap with the ith bit designating the port
897 * associated with the ith adapter channel.
898 */
899void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
900{
901 ports <<= VLANEXTENABLE_SHIFT;
902 t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
903}
904
905struct intr_info {
906 unsigned int mask; /* bits to check in interrupt status */
907 const char *msg; /* message to print or NULL */
908 short stat_idx; /* stat counter to increment or -1 */
909 unsigned short fatal; /* whether the condition reported is fatal */
910};
911
912/**
913 * t4_handle_intr_status - table driven interrupt handler
914 * @adapter: the adapter that generated the interrupt
915 * @reg: the interrupt status register to process
916 * @acts: table of interrupt actions
917 *
918 * A table driven interrupt handler that applies a set of masks to an
919 * interrupt status word and performs the corresponding actions if the
920 * interrupts described by the mask have occured. The actions include
921 * optionally emitting a warning or alert message. The table is terminated
922 * by an entry specifying mask 0. Returns the number of fatal interrupt
923 * conditions.
924 */
925static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
926 const struct intr_info *acts)
927{
928 int fatal = 0;
929 unsigned int mask = 0;
930 unsigned int status = t4_read_reg(adapter, reg);
931
932 for ( ; acts->mask; ++acts) {
933 if (!(status & acts->mask))
934 continue;
935 if (acts->fatal) {
936 fatal++;
937 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
938 status & acts->mask);
939 } else if (acts->msg && printk_ratelimit())
940 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
941 status & acts->mask);
942 mask |= acts->mask;
943 }
944 status &= mask;
945 if (status) /* clear processed interrupts */
946 t4_write_reg(adapter, reg, status);
947 return fatal;
948}
949
950/*
951 * Interrupt handler for the PCIE module.
952 */
953static void pcie_intr_handler(struct adapter *adapter)
954{
955 static struct intr_info sysbus_intr_info[] = {
956 { RNPP, "RXNP array parity error", -1, 1 },
957 { RPCP, "RXPC array parity error", -1, 1 },
958 { RCIP, "RXCIF array parity error", -1, 1 },
959 { RCCP, "Rx completions control array parity error", -1, 1 },
960 { RFTP, "RXFT array parity error", -1, 1 },
961 { 0 }
962 };
963 static struct intr_info pcie_port_intr_info[] = {
964 { TPCP, "TXPC array parity error", -1, 1 },
965 { TNPP, "TXNP array parity error", -1, 1 },
966 { TFTP, "TXFT array parity error", -1, 1 },
967 { TCAP, "TXCA array parity error", -1, 1 },
968 { TCIP, "TXCIF array parity error", -1, 1 },
969 { RCAP, "RXCA array parity error", -1, 1 },
970 { OTDD, "outbound request TLP discarded", -1, 1 },
971 { RDPE, "Rx data parity error", -1, 1 },
972 { TDUE, "Tx uncorrectable data error", -1, 1 },
973 { 0 }
974 };
975 static struct intr_info pcie_intr_info[] = {
976 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
977 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
978 { MSIDATAPERR, "MSI data parity error", -1, 1 },
979 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
980 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
981 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
982 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
983 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
984 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
985 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
986 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
987 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
988 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
989 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
990 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
991 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
992 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
993 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
994 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
995 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
996 { FIDPERR, "PCI FID parity error", -1, 1 },
997 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
998 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
999 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1000 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1001 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1002 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1003 { PCIESINT, "PCI core secondary fault", -1, 1 },
1004 { PCIEPINT, "PCI core primary fault", -1, 1 },
1005 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1006 { 0 }
1007 };
1008
1009 int fat;
1010
1011 fat = t4_handle_intr_status(adapter,
1012 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1013 sysbus_intr_info) +
1014 t4_handle_intr_status(adapter,
1015 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1016 pcie_port_intr_info) +
1017 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
1018 if (fat)
1019 t4_fatal_err(adapter);
1020}
1021
1022/*
1023 * TP interrupt handler.
1024 */
1025static void tp_intr_handler(struct adapter *adapter)
1026{
1027 static struct intr_info tp_intr_info[] = {
1028 { 0x3fffffff, "TP parity error", -1, 1 },
1029 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1030 { 0 }
1031 };
1032
1033 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1034 t4_fatal_err(adapter);
1035}
1036
1037/*
1038 * SGE interrupt handler.
1039 */
1040static void sge_intr_handler(struct adapter *adapter)
1041{
1042 u64 v;
1043
1044 static struct intr_info sge_intr_info[] = {
1045 { ERR_CPL_EXCEED_IQE_SIZE,
1046 "SGE received CPL exceeding IQE size", -1, 1 },
1047 { ERR_INVALID_CIDX_INC,
1048 "SGE GTS CIDX increment too large", -1, 0 },
1049 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1050 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1051 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1052 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1053 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1054 0 },
1055 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1056 0 },
1057 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1058 0 },
1059 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1060 0 },
1061 { ERR_ING_CTXT_PRIO,
1062 "SGE too many priority ingress contexts", -1, 0 },
1063 { ERR_EGR_CTXT_PRIO,
1064 "SGE too many priority egress contexts", -1, 0 },
1065 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1066 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1067 { 0 }
1068 };
1069
1070 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1071 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1072 if (v) {
1073 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1074 (unsigned long long)v);
1075 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1076 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1077 }
1078
1079 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1080 v != 0)
1081 t4_fatal_err(adapter);
1082}
1083
1084/*
1085 * CIM interrupt handler.
1086 */
1087static void cim_intr_handler(struct adapter *adapter)
1088{
1089 static struct intr_info cim_intr_info[] = {
1090 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1091 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1092 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1093 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1094 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1095 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1096 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1097 { 0 }
1098 };
1099 static struct intr_info cim_upintr_info[] = {
1100 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1101 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1102 { ILLWRINT, "CIM illegal write", -1, 1 },
1103 { ILLRDINT, "CIM illegal read", -1, 1 },
1104 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1105 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1106 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1107 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1108 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1109 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1110 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1111 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1112 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1113 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1114 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1115 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1116 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1117 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1118 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1119 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1120 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1121 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1122 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1123 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1124 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1125 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1126 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1127 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1128 { 0 }
1129 };
1130
1131 int fat;
1132
1133 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1134 cim_intr_info) +
1135 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1136 cim_upintr_info);
1137 if (fat)
1138 t4_fatal_err(adapter);
1139}
1140
1141/*
1142 * ULP RX interrupt handler.
1143 */
1144static void ulprx_intr_handler(struct adapter *adapter)
1145{
1146 static struct intr_info ulprx_intr_info[] = {
1147 { 0x7fffff, "ULPRX parity error", -1, 1 },
1148 { 0 }
1149 };
1150
1151 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1152 t4_fatal_err(adapter);
1153}
1154
1155/*
1156 * ULP TX interrupt handler.
1157 */
1158static void ulptx_intr_handler(struct adapter *adapter)
1159{
1160 static struct intr_info ulptx_intr_info[] = {
1161 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1162 0 },
1163 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1164 0 },
1165 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1166 0 },
1167 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1168 0 },
1169 { 0xfffffff, "ULPTX parity error", -1, 1 },
1170 { 0 }
1171 };
1172
1173 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1174 t4_fatal_err(adapter);
1175}
1176
1177/*
1178 * PM TX interrupt handler.
1179 */
1180static void pmtx_intr_handler(struct adapter *adapter)
1181{
1182 static struct intr_info pmtx_intr_info[] = {
1183 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1184 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1185 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1186 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1187 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1188 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1189 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1190 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1191 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1192 { 0 }
1193 };
1194
1195 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1196 t4_fatal_err(adapter);
1197}
1198
1199/*
1200 * PM RX interrupt handler.
1201 */
1202static void pmrx_intr_handler(struct adapter *adapter)
1203{
1204 static struct intr_info pmrx_intr_info[] = {
1205 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1206 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1207 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1208 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1209 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1210 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1211 { 0 }
1212 };
1213
1214 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1215 t4_fatal_err(adapter);
1216}
1217
1218/*
1219 * CPL switch interrupt handler.
1220 */
1221static void cplsw_intr_handler(struct adapter *adapter)
1222{
1223 static struct intr_info cplsw_intr_info[] = {
1224 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1225 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1226 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1227 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1228 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1229 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1230 { 0 }
1231 };
1232
1233 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1234 t4_fatal_err(adapter);
1235}
1236
1237/*
1238 * LE interrupt handler.
1239 */
1240static void le_intr_handler(struct adapter *adap)
1241{
1242 static struct intr_info le_intr_info[] = {
1243 { LIPMISS, "LE LIP miss", -1, 0 },
1244 { LIP0, "LE 0 LIP error", -1, 0 },
1245 { PARITYERR, "LE parity error", -1, 1 },
1246 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1247 { REQQPARERR, "LE request queue parity error", -1, 1 },
1248 { 0 }
1249 };
1250
1251 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1252 t4_fatal_err(adap);
1253}
1254
1255/*
1256 * MPS interrupt handler.
1257 */
1258static void mps_intr_handler(struct adapter *adapter)
1259{
1260 static struct intr_info mps_rx_intr_info[] = {
1261 { 0xffffff, "MPS Rx parity error", -1, 1 },
1262 { 0 }
1263 };
1264 static struct intr_info mps_tx_intr_info[] = {
1265 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1266 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1267 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1268 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1269 { BUBBLE, "MPS Tx underflow", -1, 1 },
1270 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1271 { FRMERR, "MPS Tx framing error", -1, 1 },
1272 { 0 }
1273 };
1274 static struct intr_info mps_trc_intr_info[] = {
1275 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1276 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1277 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1278 { 0 }
1279 };
1280 static struct intr_info mps_stat_sram_intr_info[] = {
1281 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1282 { 0 }
1283 };
1284 static struct intr_info mps_stat_tx_intr_info[] = {
1285 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1286 { 0 }
1287 };
1288 static struct intr_info mps_stat_rx_intr_info[] = {
1289 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1290 { 0 }
1291 };
1292 static struct intr_info mps_cls_intr_info[] = {
1293 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1294 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1295 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1296 { 0 }
1297 };
1298
1299 int fat;
1300
1301 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1302 mps_rx_intr_info) +
1303 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1304 mps_tx_intr_info) +
1305 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1306 mps_trc_intr_info) +
1307 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1308 mps_stat_sram_intr_info) +
1309 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1310 mps_stat_tx_intr_info) +
1311 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1312 mps_stat_rx_intr_info) +
1313 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1314 mps_cls_intr_info);
1315
1316 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1317 RXINT | TXINT | STATINT);
1318 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1319 if (fat)
1320 t4_fatal_err(adapter);
1321}
1322
1323#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1324
1325/*
1326 * EDC/MC interrupt handler.
1327 */
1328static void mem_intr_handler(struct adapter *adapter, int idx)
1329{
1330 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1331
1332 unsigned int addr, cnt_addr, v;
1333
1334 if (idx <= MEM_EDC1) {
1335 addr = EDC_REG(EDC_INT_CAUSE, idx);
1336 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1337 } else {
1338 addr = MC_INT_CAUSE;
1339 cnt_addr = MC_ECC_STATUS;
1340 }
1341
1342 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1343 if (v & PERR_INT_CAUSE)
1344 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1345 name[idx]);
1346 if (v & ECC_CE_INT_CAUSE) {
1347 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1348
1349 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1350 if (printk_ratelimit())
1351 dev_warn(adapter->pdev_dev,
1352 "%u %s correctable ECC data error%s\n",
1353 cnt, name[idx], cnt > 1 ? "s" : "");
1354 }
1355 if (v & ECC_UE_INT_CAUSE)
1356 dev_alert(adapter->pdev_dev,
1357 "%s uncorrectable ECC data error\n", name[idx]);
1358
1359 t4_write_reg(adapter, addr, v);
1360 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1361 t4_fatal_err(adapter);
1362}
1363
1364/*
1365 * MA interrupt handler.
1366 */
1367static void ma_intr_handler(struct adapter *adap)
1368{
1369 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1370
1371 if (status & MEM_PERR_INT_CAUSE)
1372 dev_alert(adap->pdev_dev,
1373 "MA parity error, parity status %#x\n",
1374 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1375 if (status & MEM_WRAP_INT_CAUSE) {
1376 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1377 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1378 "client %u to address %#x\n",
1379 MEM_WRAP_CLIENT_NUM_GET(v),
1380 MEM_WRAP_ADDRESS_GET(v) << 4);
1381 }
1382 t4_write_reg(adap, MA_INT_CAUSE, status);
1383 t4_fatal_err(adap);
1384}
1385
1386/*
1387 * SMB interrupt handler.
1388 */
1389static void smb_intr_handler(struct adapter *adap)
1390{
1391 static struct intr_info smb_intr_info[] = {
1392 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1393 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1394 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1395 { 0 }
1396 };
1397
1398 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1399 t4_fatal_err(adap);
1400}
1401
1402/*
1403 * NC-SI interrupt handler.
1404 */
1405static void ncsi_intr_handler(struct adapter *adap)
1406{
1407 static struct intr_info ncsi_intr_info[] = {
1408 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1409 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1410 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1411 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1412 { 0 }
1413 };
1414
1415 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1416 t4_fatal_err(adap);
1417}
1418
1419/*
1420 * XGMAC interrupt handler.
1421 */
1422static void xgmac_intr_handler(struct adapter *adap, int port)
1423{
1424 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1425
1426 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1427 if (!v)
1428 return;
1429
1430 if (v & TXFIFO_PRTY_ERR)
1431 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1432 port);
1433 if (v & RXFIFO_PRTY_ERR)
1434 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1435 port);
1436 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1437 t4_fatal_err(adap);
1438}
1439
1440/*
1441 * PL interrupt handler.
1442 */
1443static void pl_intr_handler(struct adapter *adap)
1444{
1445 static struct intr_info pl_intr_info[] = {
1446 { FATALPERR, "T4 fatal parity error", -1, 1 },
1447 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1448 { 0 }
1449 };
1450
1451 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1452 t4_fatal_err(adap);
1453}
1454
1455#define PF_INTR_MASK (PFSW | PFCIM)
1456#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1457 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1458 CPL_SWITCH | SGE | ULP_TX)
1459
1460/**
1461 * t4_slow_intr_handler - control path interrupt handler
1462 * @adapter: the adapter
1463 *
1464 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1465 * The designation 'slow' is because it involves register reads, while
1466 * data interrupts typically don't involve any MMIOs.
1467 */
1468int t4_slow_intr_handler(struct adapter *adapter)
1469{
1470 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1471
1472 if (!(cause & GLBL_INTR_MASK))
1473 return 0;
1474 if (cause & CIM)
1475 cim_intr_handler(adapter);
1476 if (cause & MPS)
1477 mps_intr_handler(adapter);
1478 if (cause & NCSI)
1479 ncsi_intr_handler(adapter);
1480 if (cause & PL)
1481 pl_intr_handler(adapter);
1482 if (cause & SMB)
1483 smb_intr_handler(adapter);
1484 if (cause & XGMAC0)
1485 xgmac_intr_handler(adapter, 0);
1486 if (cause & XGMAC1)
1487 xgmac_intr_handler(adapter, 1);
1488 if (cause & XGMAC_KR0)
1489 xgmac_intr_handler(adapter, 2);
1490 if (cause & XGMAC_KR1)
1491 xgmac_intr_handler(adapter, 3);
1492 if (cause & PCIE)
1493 pcie_intr_handler(adapter);
1494 if (cause & MC)
1495 mem_intr_handler(adapter, MEM_MC);
1496 if (cause & EDC0)
1497 mem_intr_handler(adapter, MEM_EDC0);
1498 if (cause & EDC1)
1499 mem_intr_handler(adapter, MEM_EDC1);
1500 if (cause & LE)
1501 le_intr_handler(adapter);
1502 if (cause & TP)
1503 tp_intr_handler(adapter);
1504 if (cause & MA)
1505 ma_intr_handler(adapter);
1506 if (cause & PM_TX)
1507 pmtx_intr_handler(adapter);
1508 if (cause & PM_RX)
1509 pmrx_intr_handler(adapter);
1510 if (cause & ULP_RX)
1511 ulprx_intr_handler(adapter);
1512 if (cause & CPL_SWITCH)
1513 cplsw_intr_handler(adapter);
1514 if (cause & SGE)
1515 sge_intr_handler(adapter);
1516 if (cause & ULP_TX)
1517 ulptx_intr_handler(adapter);
1518
1519 /* Clear the interrupts just processed for which we are the master. */
1520 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1521 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1522 return 1;
1523}
1524
1525/**
1526 * t4_intr_enable - enable interrupts
1527 * @adapter: the adapter whose interrupts should be enabled
1528 *
1529 * Enable PF-specific interrupts for the calling function and the top-level
1530 * interrupt concentrator for global interrupts. Interrupts are already
1531 * enabled at each module, here we just enable the roots of the interrupt
1532 * hierarchies.
1533 *
1534 * Note: this function should be called only when the driver manages
1535 * non PF-specific interrupts from the various HW modules. Only one PCI
1536 * function at a time should be doing this.
1537 */
1538void t4_intr_enable(struct adapter *adapter)
1539{
1540 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1541
1542 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1543 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1544 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1545 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1546 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1547 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1548 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1549 EGRESS_SIZE_ERR);
1550 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1551 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1552}
1553
1554/**
1555 * t4_intr_disable - disable interrupts
1556 * @adapter: the adapter whose interrupts should be disabled
1557 *
1558 * Disable interrupts. We only disable the top-level interrupt
1559 * concentrators. The caller must be a PCI function managing global
1560 * interrupts.
1561 */
1562void t4_intr_disable(struct adapter *adapter)
1563{
1564 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1565
1566 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1567 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1568}
1569
1570/**
1571 * t4_intr_clear - clear all interrupts
1572 * @adapter: the adapter whose interrupts should be cleared
1573 *
1574 * Clears all interrupts. The caller must be a PCI function managing
1575 * global interrupts.
1576 */
1577void t4_intr_clear(struct adapter *adapter)
1578{
1579 static const unsigned int cause_reg[] = {
1580 SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
1581 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1582 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1583 PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
1584 MC_INT_CAUSE,
1585 MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
1586 EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
1587 CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
1588 MYPF_REG(CIM_PF_HOST_INT_CAUSE),
1589 TP_INT_CAUSE,
1590 ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
1591 PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
1592 MPS_RX_PERR_INT_CAUSE,
1593 CPL_INTR_CAUSE,
1594 MYPF_REG(PL_PF_INT_CAUSE),
1595 PL_PL_INT_CAUSE,
1596 LE_DB_INT_CAUSE,
1597 };
1598
1599 unsigned int i;
1600
1601 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
1602 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
1603
1604 t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
1605 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1606}
1607
1608/**
1609 * hash_mac_addr - return the hash value of a MAC address
1610 * @addr: the 48-bit Ethernet MAC address
1611 *
1612 * Hashes a MAC address according to the hash function used by HW inexact
1613 * (hash) address matching.
1614 */
1615static int hash_mac_addr(const u8 *addr)
1616{
1617 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1618 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1619 a ^= b;
1620 a ^= (a >> 12);
1621 a ^= (a >> 6);
1622 return a & 0x3f;
1623}
1624
1625/**
1626 * t4_config_rss_range - configure a portion of the RSS mapping table
1627 * @adapter: the adapter
1628 * @mbox: mbox to use for the FW command
1629 * @viid: virtual interface whose RSS subtable is to be written
1630 * @start: start entry in the table to write
1631 * @n: how many table entries to write
1632 * @rspq: values for the response queue lookup table
1633 * @nrspq: number of values in @rspq
1634 *
1635 * Programs the selected part of the VI's RSS mapping table with the
1636 * provided values. If @nrspq < @n the supplied values are used repeatedly
1637 * until the full table range is populated.
1638 *
1639 * The caller must ensure the values in @rspq are in the range allowed for
1640 * @viid.
1641 */
1642int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1643 int start, int n, const u16 *rspq, unsigned int nrspq)
1644{
1645 int ret;
1646 const u16 *rsp = rspq;
1647 const u16 *rsp_end = rspq + nrspq;
1648 struct fw_rss_ind_tbl_cmd cmd;
1649
1650 memset(&cmd, 0, sizeof(cmd));
1651 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1652 FW_CMD_REQUEST | FW_CMD_WRITE |
1653 FW_RSS_IND_TBL_CMD_VIID(viid));
1654 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1655
1656 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1657 while (n > 0) {
1658 int nq = min(n, 32);
1659 __be32 *qp = &cmd.iq0_to_iq2;
1660
1661 cmd.niqid = htons(nq);
1662 cmd.startidx = htons(start);
1663
1664 start += nq;
1665 n -= nq;
1666
1667 while (nq > 0) {
1668 unsigned int v;
1669
1670 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1671 if (++rsp >= rsp_end)
1672 rsp = rspq;
1673 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1674 if (++rsp >= rsp_end)
1675 rsp = rspq;
1676 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1677 if (++rsp >= rsp_end)
1678 rsp = rspq;
1679
1680 *qp++ = htonl(v);
1681 nq -= 3;
1682 }
1683
1684 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1685 if (ret)
1686 return ret;
1687 }
1688 return 0;
1689}
1690
1691/**
1692 * t4_config_glbl_rss - configure the global RSS mode
1693 * @adapter: the adapter
1694 * @mbox: mbox to use for the FW command
1695 * @mode: global RSS mode
1696 * @flags: mode-specific flags
1697 *
1698 * Sets the global RSS mode.
1699 */
1700int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1701 unsigned int flags)
1702{
1703 struct fw_rss_glb_config_cmd c;
1704
1705 memset(&c, 0, sizeof(c));
1706 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1707 FW_CMD_REQUEST | FW_CMD_WRITE);
1708 c.retval_len16 = htonl(FW_LEN16(c));
1709 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1710 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1711 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1712 c.u.basicvirtual.mode_pkd =
1713 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1714 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1715 } else
1716 return -EINVAL;
1717 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1718}
1719
1720/* Read an RSS table row */
1721static int rd_rss_row(struct adapter *adap, int row, u32 *val)
1722{
1723 t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
1724 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
1725 5, 0, val);
1726}
1727
1728/**
1729 * t4_read_rss - read the contents of the RSS mapping table
1730 * @adapter: the adapter
1731 * @map: holds the contents of the RSS mapping table
1732 *
1733 * Reads the contents of the RSS hash->queue mapping table.
1734 */
1735int t4_read_rss(struct adapter *adapter, u16 *map)
1736{
1737 u32 val;
1738 int i, ret;
1739
1740 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
1741 ret = rd_rss_row(adapter, i, &val);
1742 if (ret)
1743 return ret;
1744 *map++ = LKPTBLQUEUE0_GET(val);
1745 *map++ = LKPTBLQUEUE1_GET(val);
1746 }
1747 return 0;
1748}
1749
1750/**
1751 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1752 * @adap: the adapter
1753 * @v4: holds the TCP/IP counter values
1754 * @v6: holds the TCP/IPv6 counter values
1755 *
1756 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1757 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1758 */
1759void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1760 struct tp_tcp_stats *v6)
1761{
1762 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1763
1764#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1765#define STAT(x) val[STAT_IDX(x)]
1766#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1767
1768 if (v4) {
1769 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1770 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1771 v4->tcpOutRsts = STAT(OUT_RST);
1772 v4->tcpInSegs = STAT64(IN_SEG);
1773 v4->tcpOutSegs = STAT64(OUT_SEG);
1774 v4->tcpRetransSegs = STAT64(RXT_SEG);
1775 }
1776 if (v6) {
1777 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1778 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1779 v6->tcpOutRsts = STAT(OUT_RST);
1780 v6->tcpInSegs = STAT64(IN_SEG);
1781 v6->tcpOutSegs = STAT64(OUT_SEG);
1782 v6->tcpRetransSegs = STAT64(RXT_SEG);
1783 }
1784#undef STAT64
1785#undef STAT
1786#undef STAT_IDX
1787}
1788
1789/**
1790 * t4_tp_get_err_stats - read TP's error MIB counters
1791 * @adap: the adapter
1792 * @st: holds the counter values
1793 *
1794 * Returns the values of TP's error counters.
1795 */
1796void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
1797{
1798 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
1799 12, TP_MIB_MAC_IN_ERR_0);
1800 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
1801 8, TP_MIB_TNL_CNG_DROP_0);
1802 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
1803 4, TP_MIB_TNL_DROP_0);
1804 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
1805 4, TP_MIB_OFD_VLN_DROP_0);
1806 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
1807 4, TP_MIB_TCP_V6IN_ERR_0);
1808 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
1809 2, TP_MIB_OFD_ARP_DROP);
1810}
1811
1812/**
1813 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1814 * @adap: the adapter
1815 * @mtus: where to store the MTU values
1816 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1817 *
1818 * Reads the HW path MTU table.
1819 */
1820void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1821{
1822 u32 v;
1823 int i;
1824
1825 for (i = 0; i < NMTUS; ++i) {
1826 t4_write_reg(adap, TP_MTU_TABLE,
1827 MTUINDEX(0xff) | MTUVALUE(i));
1828 v = t4_read_reg(adap, TP_MTU_TABLE);
1829 mtus[i] = MTUVALUE_GET(v);
1830 if (mtu_log)
1831 mtu_log[i] = MTUWIDTH_GET(v);
1832 }
1833}
1834
1835/**
1836 * init_cong_ctrl - initialize congestion control parameters
1837 * @a: the alpha values for congestion control
1838 * @b: the beta values for congestion control
1839 *
1840 * Initialize the congestion control parameters.
1841 */
1842static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1843{
1844 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1845 a[9] = 2;
1846 a[10] = 3;
1847 a[11] = 4;
1848 a[12] = 5;
1849 a[13] = 6;
1850 a[14] = 7;
1851 a[15] = 8;
1852 a[16] = 9;
1853 a[17] = 10;
1854 a[18] = 14;
1855 a[19] = 17;
1856 a[20] = 21;
1857 a[21] = 25;
1858 a[22] = 30;
1859 a[23] = 35;
1860 a[24] = 45;
1861 a[25] = 60;
1862 a[26] = 80;
1863 a[27] = 100;
1864 a[28] = 200;
1865 a[29] = 300;
1866 a[30] = 400;
1867 a[31] = 500;
1868
1869 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1870 b[9] = b[10] = 1;
1871 b[11] = b[12] = 2;
1872 b[13] = b[14] = b[15] = b[16] = 3;
1873 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1874 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1875 b[28] = b[29] = 6;
1876 b[30] = b[31] = 7;
1877}
1878
1879/* The minimum additive increment value for the congestion control table */
1880#define CC_MIN_INCR 2U
1881
1882/**
1883 * t4_load_mtus - write the MTU and congestion control HW tables
1884 * @adap: the adapter
1885 * @mtus: the values for the MTU table
1886 * @alpha: the values for the congestion control alpha parameter
1887 * @beta: the values for the congestion control beta parameter
1888 *
1889 * Write the HW MTU table with the supplied MTUs and the high-speed
1890 * congestion control table with the supplied alpha, beta, and MTUs.
1891 * We write the two tables together because the additive increments
1892 * depend on the MTUs.
1893 */
1894void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1895 const unsigned short *alpha, const unsigned short *beta)
1896{
1897 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1898 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1899 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1900 28672, 40960, 57344, 81920, 114688, 163840, 229376
1901 };
1902
1903 unsigned int i, w;
1904
1905 for (i = 0; i < NMTUS; ++i) {
1906 unsigned int mtu = mtus[i];
1907 unsigned int log2 = fls(mtu);
1908
1909 if (!(mtu & ((1 << log2) >> 2))) /* round */
1910 log2--;
1911 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1912 MTUWIDTH(log2) | MTUVALUE(mtu));
1913
1914 for (w = 0; w < NCCTRL_WIN; ++w) {
1915 unsigned int inc;
1916
1917 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1918 CC_MIN_INCR);
1919
1920 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1921 (w << 16) | (beta[w] << 13) | inc);
1922 }
1923 }
1924}
1925
1926/**
1927 * t4_set_trace_filter - configure one of the tracing filters
1928 * @adap: the adapter
1929 * @tp: the desired trace filter parameters
1930 * @idx: which filter to configure
1931 * @enable: whether to enable or disable the filter
1932 *
1933 * Configures one of the tracing filters available in HW. If @enable is
1934 * %0 @tp is not examined and may be %NULL.
1935 */
1936int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
1937 int idx, int enable)
1938{
1939 int i, ofst = idx * 4;
1940 u32 data_reg, mask_reg, cfg;
1941 u32 multitrc = TRCMULTIFILTER;
1942
1943 if (!enable) {
1944 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1945 goto out;
1946 }
1947
1948 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
1949 tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
1950 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
1951 return -EINVAL;
1952
1953 if (tp->snap_len > 256) { /* must be tracer 0 */
1954 if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
1955 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
1956 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
1957 return -EINVAL; /* other tracers are enabled */
1958 multitrc = 0;
1959 } else if (idx) {
1960 i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
1961 if (TFCAPTUREMAX_GET(i) > 256 &&
1962 (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
1963 return -EINVAL;
1964 }
1965
1966 /* stop the tracer we'll be changing */
1967 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1968
1969 /* disable tracing globally if running in the wrong single/multi mode */
1970 cfg = t4_read_reg(adap, MPS_TRC_CFG);
1971 if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
1972 t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
1973 t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1974 msleep(1);
1975 if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
1976 return -ETIMEDOUT;
1977 }
1978 /*
1979 * At this point either the tracing is enabled and in the right mode or
1980 * disabled.
1981 */
1982
1983 idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
1984 data_reg = MPS_TRC_FILTER0_MATCH + idx;
1985 mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
1986
1987 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
1988 t4_write_reg(adap, data_reg, tp->data[i]);
1989 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
1990 }
1991 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
1992 TFCAPTUREMAX(tp->snap_len) |
1993 TFMINPKTSIZE(tp->min_len));
1994 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
1995 TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
1996 TFPORT(tp->port) | TFEN |
1997 (tp->invert ? TFINVERTMATCH : 0));
1998
1999 cfg &= ~TRCMULTIFILTER;
2000 t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
2001out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
2002 return 0;
2003}
2004
2005/**
2006 * t4_get_trace_filter - query one of the tracing filters
2007 * @adap: the adapter
2008 * @tp: the current trace filter parameters
2009 * @idx: which trace filter to query
2010 * @enabled: non-zero if the filter is enabled
2011 *
2012 * Returns the current settings of one of the HW tracing filters.
2013 */
2014void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
2015 int *enabled)
2016{
2017 u32 ctla, ctlb;
2018 int i, ofst = idx * 4;
2019 u32 data_reg, mask_reg;
2020
2021 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
2022 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
2023
2024 *enabled = !!(ctla & TFEN);
2025 tp->snap_len = TFCAPTUREMAX_GET(ctlb);
2026 tp->min_len = TFMINPKTSIZE_GET(ctlb);
2027 tp->skip_ofst = TFOFFSET_GET(ctla);
2028 tp->skip_len = TFLENGTH_GET(ctla);
2029 tp->invert = !!(ctla & TFINVERTMATCH);
2030 tp->port = TFPORT_GET(ctla);
2031
2032 ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
2033 data_reg = MPS_TRC_FILTER0_MATCH + ofst;
2034 mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
2035
2036 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
2037 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
2038 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
2039 }
2040}
2041
2042/**
2043 * get_mps_bg_map - return the buffer groups associated with a port
2044 * @adap: the adapter
2045 * @idx: the port index
2046 *
2047 * Returns a bitmap indicating which MPS buffer groups are associated
2048 * with the given port. Bit i is set if buffer group i is used by the
2049 * port.
2050 */
2051static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2052{
2053 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2054
2055 if (n == 0)
2056 return idx == 0 ? 0xf : 0;
2057 if (n == 1)
2058 return idx < 2 ? (3 << (2 * idx)) : 0;
2059 return 1 << idx;
2060}
2061
2062/**
2063 * t4_get_port_stats - collect port statistics
2064 * @adap: the adapter
2065 * @idx: the port index
2066 * @p: the stats structure to fill
2067 *
2068 * Collect statistics related to the given port from HW.
2069 */
2070void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2071{
2072 u32 bgmap = get_mps_bg_map(adap, idx);
2073
2074#define GET_STAT(name) \
2075 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
2076#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2077
2078 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2079 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2080 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2081 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2082 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2083 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2084 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2085 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2086 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2087 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2088 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2089 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2090 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2091 p->tx_drop = GET_STAT(TX_PORT_DROP);
2092 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2093 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2094 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2095 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2096 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2097 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2098 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2099 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2100 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2101
2102 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2103 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2104 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2105 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2106 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2107 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2108 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2109 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2110 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2111 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2112 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2113 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2114 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2115 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2116 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2117 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2118 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2119 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2120 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2121 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2122 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2123 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2124 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2125 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2126 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2127 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2128 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2129
2130 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2131 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2132 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2133 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2134 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2135 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2136 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2137 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2138
2139#undef GET_STAT
2140#undef GET_STAT_COM
2141}
2142
2143/**
2144 * t4_get_lb_stats - collect loopback port statistics
2145 * @adap: the adapter
2146 * @idx: the loopback port index
2147 * @p: the stats structure to fill
2148 *
2149 * Return HW statistics for the given loopback port.
2150 */
2151void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
2152{
2153 u32 bgmap = get_mps_bg_map(adap, idx);
2154
2155#define GET_STAT(name) \
2156 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
2157#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2158
2159 p->octets = GET_STAT(BYTES);
2160 p->frames = GET_STAT(FRAMES);
2161 p->bcast_frames = GET_STAT(BCAST);
2162 p->mcast_frames = GET_STAT(MCAST);
2163 p->ucast_frames = GET_STAT(UCAST);
2164 p->error_frames = GET_STAT(ERROR);
2165
2166 p->frames_64 = GET_STAT(64B);
2167 p->frames_65_127 = GET_STAT(65B_127B);
2168 p->frames_128_255 = GET_STAT(128B_255B);
2169 p->frames_256_511 = GET_STAT(256B_511B);
2170 p->frames_512_1023 = GET_STAT(512B_1023B);
2171 p->frames_1024_1518 = GET_STAT(1024B_1518B);
2172 p->frames_1519_max = GET_STAT(1519B_MAX);
2173 p->drop = t4_read_reg(adap, PORT_REG(idx,
2174 MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
2175
2176 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
2177 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
2178 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
2179 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
2180 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
2181 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
2182 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
2183 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
2184
2185#undef GET_STAT
2186#undef GET_STAT_COM
2187}
2188
2189/**
2190 * t4_wol_magic_enable - enable/disable magic packet WoL
2191 * @adap: the adapter
2192 * @port: the physical port index
2193 * @addr: MAC address expected in magic packets, %NULL to disable
2194 *
2195 * Enables/disables magic packet wake-on-LAN for the selected port.
2196 */
2197void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2198 const u8 *addr)
2199{
2200 if (addr) {
2201 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
2202 (addr[2] << 24) | (addr[3] << 16) |
2203 (addr[4] << 8) | addr[5]);
2204 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
2205 (addr[0] << 8) | addr[1]);
2206 }
2207 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
2208 addr ? MAGICEN : 0);
2209}
2210
2211/**
2212 * t4_wol_pat_enable - enable/disable pattern-based WoL
2213 * @adap: the adapter
2214 * @port: the physical port index
2215 * @map: bitmap of which HW pattern filters to set
2216 * @mask0: byte mask for bytes 0-63 of a packet
2217 * @mask1: byte mask for bytes 64-127 of a packet
2218 * @crc: Ethernet CRC for selected bytes
2219 * @enable: enable/disable switch
2220 *
2221 * Sets the pattern filters indicated in @map to mask out the bytes
2222 * specified in @mask0/@mask1 in received packets and compare the CRC of
2223 * the resulting packet against @crc. If @enable is %true pattern-based
2224 * WoL is enabled, otherwise disabled.
2225 */
2226int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2227 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2228{
2229 int i;
2230
2231 if (!enable) {
2232 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
2233 PATEN, 0);
2234 return 0;
2235 }
2236 if (map > 0xff)
2237 return -EINVAL;
2238
2239#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
2240
2241 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2242 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2243 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2244
2245 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2246 if (!(map & 1))
2247 continue;
2248
2249 /* write byte masks */
2250 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2251 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2252 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2253 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2254 return -ETIMEDOUT;
2255
2256 /* write CRC */
2257 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2258 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2259 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2260 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
2261 return -ETIMEDOUT;
2262 }
2263#undef EPIO_REG
2264
2265 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2266 return 0;
2267}
2268
2269#define INIT_CMD(var, cmd, rd_wr) do { \
2270 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2271 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2272 (var).retval_len16 = htonl(FW_LEN16(var)); \
2273} while (0)
2274
2275/**
2276 * t4_mdio_rd - read a PHY register through MDIO
2277 * @adap: the adapter
2278 * @mbox: mailbox to use for the FW command
2279 * @phy_addr: the PHY address
2280 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2281 * @reg: the register to read
2282 * @valp: where to store the value
2283 *
2284 * Issues a FW command through the given mailbox to read a PHY register.
2285 */
2286int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2287 unsigned int mmd, unsigned int reg, u16 *valp)
2288{
2289 int ret;
2290 struct fw_ldst_cmd c;
2291
2292 memset(&c, 0, sizeof(c));
2293 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2294 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2295 c.cycles_to_len16 = htonl(FW_LEN16(c));
2296 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2297 FW_LDST_CMD_MMD(mmd));
2298 c.u.mdio.raddr = htons(reg);
2299
2300 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2301 if (ret == 0)
2302 *valp = ntohs(c.u.mdio.rval);
2303 return ret;
2304}
2305
2306/**
2307 * t4_mdio_wr - write a PHY register through MDIO
2308 * @adap: the adapter
2309 * @mbox: mailbox to use for the FW command
2310 * @phy_addr: the PHY address
2311 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2312 * @reg: the register to write
2313 * @valp: value to write
2314 *
2315 * Issues a FW command through the given mailbox to write a PHY register.
2316 */
2317int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2318 unsigned int mmd, unsigned int reg, u16 val)
2319{
2320 struct fw_ldst_cmd c;
2321
2322 memset(&c, 0, sizeof(c));
2323 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2324 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2325 c.cycles_to_len16 = htonl(FW_LEN16(c));
2326 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2327 FW_LDST_CMD_MMD(mmd));
2328 c.u.mdio.raddr = htons(reg);
2329 c.u.mdio.rval = htons(val);
2330
2331 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2332}
2333
2334/**
2335 * t4_fw_hello - establish communication with FW
2336 * @adap: the adapter
2337 * @mbox: mailbox to use for the FW command
2338 * @evt_mbox: mailbox to receive async FW events
2339 * @master: specifies the caller's willingness to be the device master
2340 * @state: returns the current device state
2341 *
2342 * Issues a command to establish communication with FW.
2343 */
2344int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2345 enum dev_master master, enum dev_state *state)
2346{
2347 int ret;
2348 struct fw_hello_cmd c;
2349
2350 INIT_CMD(c, HELLO, WRITE);
2351 c.err_to_mbasyncnot = htonl(
2352 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2353 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2354 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2355 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2356
2357 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2358 if (ret == 0 && state) {
2359 u32 v = ntohl(c.err_to_mbasyncnot);
2360 if (v & FW_HELLO_CMD_INIT)
2361 *state = DEV_STATE_INIT;
2362 else if (v & FW_HELLO_CMD_ERR)
2363 *state = DEV_STATE_ERR;
2364 else
2365 *state = DEV_STATE_UNINIT;
2366 }
2367 return ret;
2368}
2369
2370/**
2371 * t4_fw_bye - end communication with FW
2372 * @adap: the adapter
2373 * @mbox: mailbox to use for the FW command
2374 *
2375 * Issues a command to terminate communication with FW.
2376 */
2377int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2378{
2379 struct fw_bye_cmd c;
2380
2381 INIT_CMD(c, BYE, WRITE);
2382 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2383}
2384
2385/**
2386 * t4_init_cmd - ask FW to initialize the device
2387 * @adap: the adapter
2388 * @mbox: mailbox to use for the FW command
2389 *
2390 * Issues a command to FW to partially initialize the device. This
2391 * performs initialization that generally doesn't depend on user input.
2392 */
2393int t4_early_init(struct adapter *adap, unsigned int mbox)
2394{
2395 struct fw_initialize_cmd c;
2396
2397 INIT_CMD(c, INITIALIZE, WRITE);
2398 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2399}
2400
2401/**
2402 * t4_fw_reset - issue a reset to FW
2403 * @adap: the adapter
2404 * @mbox: mailbox to use for the FW command
2405 * @reset: specifies the type of reset to perform
2406 *
2407 * Issues a reset command of the specified type to FW.
2408 */
2409int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2410{
2411 struct fw_reset_cmd c;
2412
2413 INIT_CMD(c, RESET, WRITE);
2414 c.val = htonl(reset);
2415 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2416}
2417
2418/**
2419 * t4_query_params - query FW or device parameters
2420 * @adap: the adapter
2421 * @mbox: mailbox to use for the FW command
2422 * @pf: the PF
2423 * @vf: the VF
2424 * @nparams: the number of parameters
2425 * @params: the parameter names
2426 * @val: the parameter values
2427 *
2428 * Reads the value of FW or device parameters. Up to 7 parameters can be
2429 * queried at once.
2430 */
2431int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2432 unsigned int vf, unsigned int nparams, const u32 *params,
2433 u32 *val)
2434{
2435 int i, ret;
2436 struct fw_params_cmd c;
2437 __be32 *p = &c.param[0].mnem;
2438
2439 if (nparams > 7)
2440 return -EINVAL;
2441
2442 memset(&c, 0, sizeof(c));
2443 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2444 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2445 FW_PARAMS_CMD_VFN(vf));
2446 c.retval_len16 = htonl(FW_LEN16(c));
2447 for (i = 0; i < nparams; i++, p += 2)
2448 *p = htonl(*params++);
2449
2450 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2451 if (ret == 0)
2452 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2453 *val++ = ntohl(*p);
2454 return ret;
2455}
2456
2457/**
2458 * t4_set_params - sets FW or device parameters
2459 * @adap: the adapter
2460 * @mbox: mailbox to use for the FW command
2461 * @pf: the PF
2462 * @vf: the VF
2463 * @nparams: the number of parameters
2464 * @params: the parameter names
2465 * @val: the parameter values
2466 *
2467 * Sets the value of FW or device parameters. Up to 7 parameters can be
2468 * specified at once.
2469 */
2470int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2471 unsigned int vf, unsigned int nparams, const u32 *params,
2472 const u32 *val)
2473{
2474 struct fw_params_cmd c;
2475 __be32 *p = &c.param[0].mnem;
2476
2477 if (nparams > 7)
2478 return -EINVAL;
2479
2480 memset(&c, 0, sizeof(c));
2481 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2482 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2483 FW_PARAMS_CMD_VFN(vf));
2484 c.retval_len16 = htonl(FW_LEN16(c));
2485 while (nparams--) {
2486 *p++ = htonl(*params++);
2487 *p++ = htonl(*val++);
2488 }
2489
2490 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2491}
2492
2493/**
2494 * t4_cfg_pfvf - configure PF/VF resource limits
2495 * @adap: the adapter
2496 * @mbox: mailbox to use for the FW command
2497 * @pf: the PF being configured
2498 * @vf: the VF being configured
2499 * @txq: the max number of egress queues
2500 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2501 * @rxqi: the max number of interrupt-capable ingress queues
2502 * @rxq: the max number of interruptless ingress queues
2503 * @tc: the PCI traffic class
2504 * @vi: the max number of virtual interfaces
2505 * @cmask: the channel access rights mask for the PF/VF
2506 * @pmask: the port access rights mask for the PF/VF
2507 * @nexact: the maximum number of exact MPS filters
2508 * @rcaps: read capabilities
2509 * @wxcaps: write/execute capabilities
2510 *
2511 * Configures resource limits and capabilities for a physical or virtual
2512 * function.
2513 */
2514int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2515 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2516 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2517 unsigned int vi, unsigned int cmask, unsigned int pmask,
2518 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2519{
2520 struct fw_pfvf_cmd c;
2521
2522 memset(&c, 0, sizeof(c));
2523 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2524 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2525 FW_PFVF_CMD_VFN(vf));
2526 c.retval_len16 = htonl(FW_LEN16(c));
2527 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2528 FW_PFVF_CMD_NIQ(rxq));
2529 c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
2530 FW_PFVF_CMD_PMASK(pmask) |
2531 FW_PFVF_CMD_NEQ(txq));
2532 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2533 FW_PFVF_CMD_NEXACTF(nexact));
2534 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2535 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2536 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2537 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2538}
2539
2540/**
2541 * t4_alloc_vi - allocate a virtual interface
2542 * @adap: the adapter
2543 * @mbox: mailbox to use for the FW command
2544 * @port: physical port associated with the VI
2545 * @pf: the PF owning the VI
2546 * @vf: the VF owning the VI
2547 * @nmac: number of MAC addresses needed (1 to 5)
2548 * @mac: the MAC addresses of the VI
2549 * @rss_size: size of RSS table slice associated with this VI
2550 *
2551 * Allocates a virtual interface for the given physical port. If @mac is
2552 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2553 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2554 * stored consecutively so the space needed is @nmac * 6 bytes.
2555 * Returns a negative error number or the non-negative VI id.
2556 */
2557int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2558 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2559 unsigned int *rss_size)
2560{
2561 int ret;
2562 struct fw_vi_cmd c;
2563
2564 memset(&c, 0, sizeof(c));
2565 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2566 FW_CMD_WRITE | FW_CMD_EXEC |
2567 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2568 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2569 c.portid_pkd = FW_VI_CMD_PORTID(port);
2570 c.nmac = nmac - 1;
2571
2572 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2573 if (ret)
2574 return ret;
2575
2576 if (mac) {
2577 memcpy(mac, c.mac, sizeof(c.mac));
2578 switch (nmac) {
2579 case 5:
2580 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2581 case 4:
2582 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2583 case 3:
2584 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2585 case 2:
2586 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2587 }
2588 }
2589 if (rss_size)
2590 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
2591 return ntohs(c.viid_pkd);
2592}
2593
2594/**
2595 * t4_free_vi - free a virtual interface
2596 * @adap: the adapter
2597 * @mbox: mailbox to use for the FW command
2598 * @pf: the PF owning the VI
2599 * @vf: the VF owning the VI
2600 * @viid: virtual interface identifiler
2601 *
2602 * Free a previously allocated virtual interface.
2603 */
2604int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2605 unsigned int vf, unsigned int viid)
2606{
2607 struct fw_vi_cmd c;
2608
2609 memset(&c, 0, sizeof(c));
2610 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2611 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2612 FW_VI_CMD_VFN(vf));
2613 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2614 c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
2615 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2616}
2617
2618/**
2619 * t4_set_rxmode - set Rx properties of a virtual interface
2620 * @adap: the adapter
2621 * @mbox: mailbox to use for the FW command
2622 * @viid: the VI id
2623 * @mtu: the new MTU or -1
2624 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2625 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2626 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2627 * @sleep_ok: if true we may sleep while awaiting command completion
2628 *
2629 * Sets Rx properties of a virtual interface.
2630 */
2631int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2632 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok)
2633{
2634 struct fw_vi_rxmode_cmd c;
2635
2636 /* convert to FW values */
2637 if (mtu < 0)
2638 mtu = FW_RXMODE_MTU_NO_CHG;
2639 if (promisc < 0)
2640 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2641 if (all_multi < 0)
2642 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2643 if (bcast < 0)
2644 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2645
2646 memset(&c, 0, sizeof(c));
2647 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2648 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2649 c.retval_len16 = htonl(FW_LEN16(c));
2650 c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2651 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2652 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2653 FW_VI_RXMODE_CMD_BROADCASTEN(bcast));
2654 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2655}
2656
2657/**
2658 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2659 * @adap: the adapter
2660 * @mbox: mailbox to use for the FW command
2661 * @viid: the VI id
2662 * @free: if true any existing filters for this VI id are first removed
2663 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2664 * @addr: the MAC address(es)
2665 * @idx: where to store the index of each allocated filter
2666 * @hash: pointer to hash address filter bitmap
2667 * @sleep_ok: call is allowed to sleep
2668 *
2669 * Allocates an exact-match filter for each of the supplied addresses and
2670 * sets it to the corresponding address. If @idx is not %NULL it should
2671 * have at least @naddr entries, each of which will be set to the index of
2672 * the filter allocated for the corresponding MAC address. If a filter
2673 * could not be allocated for an address its index is set to 0xffff.
2674 * If @hash is not %NULL addresses that fail to allocate an exact filter
2675 * are hashed and update the hash filter bitmap pointed at by @hash.
2676 *
2677 * Returns a negative error number or the number of filters allocated.
2678 */
2679int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2680 unsigned int viid, bool free, unsigned int naddr,
2681 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2682{
2683 int i, ret;
2684 struct fw_vi_mac_cmd c;
2685 struct fw_vi_mac_exact *p;
2686
2687 if (naddr > 7)
2688 return -EINVAL;
2689
2690 memset(&c, 0, sizeof(c));
2691 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2692 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2693 FW_VI_MAC_CMD_VIID(viid));
2694 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2695 FW_CMD_LEN16((naddr + 2) / 2));
2696
2697 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2698 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2699 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2700 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2701 }
2702
2703 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2704 if (ret)
2705 return ret;
2706
2707 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2708 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2709
2710 if (idx)
2711 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2712 if (index < NEXACT_MAC)
2713 ret++;
2714 else if (hash)
2715 *hash |= (1 << hash_mac_addr(addr[i]));
2716 }
2717 return ret;
2718}
2719
2720/**
2721 * t4_change_mac - modifies the exact-match filter for a MAC address
2722 * @adap: the adapter
2723 * @mbox: mailbox to use for the FW command
2724 * @viid: the VI id
2725 * @idx: index of existing filter for old value of MAC address, or -1
2726 * @addr: the new MAC address value
2727 * @persist: whether a new MAC allocation should be persistent
2728 * @add_smt: if true also add the address to the HW SMT
2729 *
2730 * Modifies an exact-match filter and sets it to the new MAC address.
2731 * Note that in general it is not possible to modify the value of a given
2732 * filter so the generic way to modify an address filter is to free the one
2733 * being used by the old address value and allocate a new filter for the
2734 * new address value. @idx can be -1 if the address is a new addition.
2735 *
2736 * Returns a negative error number or the index of the filter with the new
2737 * MAC value.
2738 */
2739int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2740 int idx, const u8 *addr, bool persist, bool add_smt)
2741{
2742 int ret, mode;
2743 struct fw_vi_mac_cmd c;
2744 struct fw_vi_mac_exact *p = c.u.exact;
2745
2746 if (idx < 0) /* new allocation */
2747 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2748 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2749
2750 memset(&c, 0, sizeof(c));
2751 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2752 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2753 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2754 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2755 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2756 FW_VI_MAC_CMD_IDX(idx));
2757 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2758
2759 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2760 if (ret == 0) {
2761 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2762 if (ret >= NEXACT_MAC)
2763 ret = -ENOMEM;
2764 }
2765 return ret;
2766}
2767
2768/**
2769 * t4_set_addr_hash - program the MAC inexact-match hash filter
2770 * @adap: the adapter
2771 * @mbox: mailbox to use for the FW command
2772 * @viid: the VI id
2773 * @ucast: whether the hash filter should also match unicast addresses
2774 * @vec: the value to be written to the hash filter
2775 * @sleep_ok: call is allowed to sleep
2776 *
2777 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2778 */
2779int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2780 bool ucast, u64 vec, bool sleep_ok)
2781{
2782 struct fw_vi_mac_cmd c;
2783
2784 memset(&c, 0, sizeof(c));
2785 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2786 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2787 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2788 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2789 FW_CMD_LEN16(1));
2790 c.u.hash.hashvec = cpu_to_be64(vec);
2791 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2792}
2793
2794/**
2795 * t4_enable_vi - enable/disable a virtual interface
2796 * @adap: the adapter
2797 * @mbox: mailbox to use for the FW command
2798 * @viid: the VI id
2799 * @rx_en: 1=enable Rx, 0=disable Rx
2800 * @tx_en: 1=enable Tx, 0=disable Tx
2801 *
2802 * Enables/disables a virtual interface.
2803 */
2804int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2805 bool rx_en, bool tx_en)
2806{
2807 struct fw_vi_enable_cmd c;
2808
2809 memset(&c, 0, sizeof(c));
2810 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2811 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2812 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2813 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2814 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2815}
2816
2817/**
2818 * t4_identify_port - identify a VI's port by blinking its LED
2819 * @adap: the adapter
2820 * @mbox: mailbox to use for the FW command
2821 * @viid: the VI id
2822 * @nblinks: how many times to blink LED at 2.5 Hz
2823 *
2824 * Identifies a VI's port by blinking its LED.
2825 */
2826int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2827 unsigned int nblinks)
2828{
2829 struct fw_vi_enable_cmd c;
2830
2831 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2832 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2833 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2834 c.blinkdur = htons(nblinks);
2835 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2836}
2837
2838/**
2839 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2840 * @adap: the adapter
2841 * @mbox: mailbox to use for the FW command
2842 * @start: %true to enable the queues, %false to disable them
2843 * @pf: the PF owning the queues
2844 * @vf: the VF owning the queues
2845 * @iqid: ingress queue id
2846 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2847 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2848 *
2849 * Starts or stops an ingress queue and its associated FLs, if any.
2850 */
2851int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2852 unsigned int pf, unsigned int vf, unsigned int iqid,
2853 unsigned int fl0id, unsigned int fl1id)
2854{
2855 struct fw_iq_cmd c;
2856
2857 memset(&c, 0, sizeof(c));
2858 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2859 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2860 FW_IQ_CMD_VFN(vf));
2861 c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
2862 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
2863 c.iqid = htons(iqid);
2864 c.fl0id = htons(fl0id);
2865 c.fl1id = htons(fl1id);
2866 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2867}
2868
2869/**
2870 * t4_iq_free - free an ingress queue and its FLs
2871 * @adap: the adapter
2872 * @mbox: mailbox to use for the FW command
2873 * @pf: the PF owning the queues
2874 * @vf: the VF owning the queues
2875 * @iqtype: the ingress queue type
2876 * @iqid: ingress queue id
2877 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2878 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2879 *
2880 * Frees an ingress queue and its associated FLs, if any.
2881 */
2882int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2883 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2884 unsigned int fl0id, unsigned int fl1id)
2885{
2886 struct fw_iq_cmd c;
2887
2888 memset(&c, 0, sizeof(c));
2889 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2890 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2891 FW_IQ_CMD_VFN(vf));
2892 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2893 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2894 c.iqid = htons(iqid);
2895 c.fl0id = htons(fl0id);
2896 c.fl1id = htons(fl1id);
2897 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2898}
2899
2900/**
2901 * t4_eth_eq_free - free an Ethernet egress queue
2902 * @adap: the adapter
2903 * @mbox: mailbox to use for the FW command
2904 * @pf: the PF owning the queue
2905 * @vf: the VF owning the queue
2906 * @eqid: egress queue id
2907 *
2908 * Frees an Ethernet egress queue.
2909 */
2910int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2911 unsigned int vf, unsigned int eqid)
2912{
2913 struct fw_eq_eth_cmd c;
2914
2915 memset(&c, 0, sizeof(c));
2916 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2917 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2918 FW_EQ_ETH_CMD_VFN(vf));
2919 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2920 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2921 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2922}
2923
2924/**
2925 * t4_ctrl_eq_free - free a control egress queue
2926 * @adap: the adapter
2927 * @mbox: mailbox to use for the FW command
2928 * @pf: the PF owning the queue
2929 * @vf: the VF owning the queue
2930 * @eqid: egress queue id
2931 *
2932 * Frees a control egress queue.
2933 */
2934int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2935 unsigned int vf, unsigned int eqid)
2936{
2937 struct fw_eq_ctrl_cmd c;
2938
2939 memset(&c, 0, sizeof(c));
2940 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2941 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2942 FW_EQ_CTRL_CMD_VFN(vf));
2943 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2944 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2945 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2946}
2947
2948/**
2949 * t4_ofld_eq_free - free an offload egress queue
2950 * @adap: the adapter
2951 * @mbox: mailbox to use for the FW command
2952 * @pf: the PF owning the queue
2953 * @vf: the VF owning the queue
2954 * @eqid: egress queue id
2955 *
2956 * Frees a control egress queue.
2957 */
2958int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2959 unsigned int vf, unsigned int eqid)
2960{
2961 struct fw_eq_ofld_cmd c;
2962
2963 memset(&c, 0, sizeof(c));
2964 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2965 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2966 FW_EQ_OFLD_CMD_VFN(vf));
2967 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2968 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2969 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2970}
2971
2972/**
2973 * t4_handle_fw_rpl - process a FW reply message
2974 * @adap: the adapter
2975 * @rpl: start of the FW message
2976 *
2977 * Processes a FW message, such as link state change messages.
2978 */
2979int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2980{
2981 u8 opcode = *(const u8 *)rpl;
2982
2983 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2984 int speed = 0, fc = 0;
2985 const struct fw_port_cmd *p = (void *)rpl;
2986 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2987 int port = adap->chan_map[chan];
2988 struct port_info *pi = adap2pinfo(adap, port);
2989 struct link_config *lc = &pi->link_cfg;
2990 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2991 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2992 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2993
2994 if (stat & FW_PORT_CMD_RXPAUSE)
2995 fc |= PAUSE_RX;
2996 if (stat & FW_PORT_CMD_TXPAUSE)
2997 fc |= PAUSE_TX;
2998 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2999 speed = SPEED_100;
3000 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3001 speed = SPEED_1000;
3002 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3003 speed = SPEED_10000;
3004
3005 if (link_ok != lc->link_ok || speed != lc->speed ||
3006 fc != lc->fc) { /* something changed */
3007 lc->link_ok = link_ok;
3008 lc->speed = speed;
3009 lc->fc = fc;
3010 t4_os_link_changed(adap, port, link_ok);
3011 }
3012 if (mod != pi->mod_type) {
3013 pi->mod_type = mod;
3014 t4_os_portmod_changed(adap, port);
3015 }
3016 }
3017 return 0;
3018}
3019
3020static void __devinit get_pci_mode(struct adapter *adapter,
3021 struct pci_params *p)
3022{
3023 u16 val;
3024 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
3025
3026 if (pcie_cap) {
3027 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3028 &val);
3029 p->speed = val & PCI_EXP_LNKSTA_CLS;
3030 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3031 }
3032}
3033
3034/**
3035 * init_link_config - initialize a link's SW state
3036 * @lc: structure holding the link state
3037 * @caps: link capabilities
3038 *
3039 * Initializes the SW state maintained for each link, including the link's
3040 * capabilities and default speed/flow-control/autonegotiation settings.
3041 */
3042static void __devinit init_link_config(struct link_config *lc,
3043 unsigned int caps)
3044{
3045 lc->supported = caps;
3046 lc->requested_speed = 0;
3047 lc->speed = 0;
3048 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3049 if (lc->supported & FW_PORT_CAP_ANEG) {
3050 lc->advertising = lc->supported & ADVERT_MASK;
3051 lc->autoneg = AUTONEG_ENABLE;
3052 lc->requested_fc |= PAUSE_AUTONEG;
3053 } else {
3054 lc->advertising = 0;
3055 lc->autoneg = AUTONEG_DISABLE;
3056 }
3057}
3058
3059static int __devinit wait_dev_ready(struct adapter *adap)
3060{
3061 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
3062 return 0;
3063 msleep(500);
3064 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3065}
3066
3067/**
3068 * t4_prep_adapter - prepare SW and HW for operation
3069 * @adapter: the adapter
3070 * @reset: if true perform a HW reset
3071 *
3072 * Initialize adapter SW state for the various HW modules, set initial
3073 * values for some adapter tunables, take PHYs out of reset, and
3074 * initialize the MDIO interface.
3075 */
3076int __devinit t4_prep_adapter(struct adapter *adapter)
3077{
3078 int ret;
3079
3080 ret = wait_dev_ready(adapter);
3081 if (ret < 0)
3082 return ret;
3083
3084 get_pci_mode(adapter, &adapter->params.pci);
3085 adapter->params.rev = t4_read_reg(adapter, PL_REV);
3086
3087 ret = get_vpd_params(adapter, &adapter->params.vpd);
3088 if (ret < 0)
3089 return ret;
3090
3091 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3092
3093 /*
3094 * Default port for debugging in case we can't reach FW.
3095 */
3096 adapter->params.nports = 1;
3097 adapter->params.portvec = 1;
3098 return 0;
3099}
3100
3101int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3102{
3103 u8 addr[6];
3104 int ret, i, j = 0;
3105 struct fw_port_cmd c;
3106
3107 memset(&c, 0, sizeof(c));
3108
3109 for_each_port(adap, i) {
3110 unsigned int rss_size;
3111 struct port_info *p = adap2pinfo(adap, i);
3112
3113 while ((adap->params.portvec & (1 << j)) == 0)
3114 j++;
3115
3116 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
3117 FW_CMD_REQUEST | FW_CMD_READ |
3118 FW_PORT_CMD_PORTID(j));
3119 c.action_to_len16 = htonl(
3120 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
3121 FW_LEN16(c));
3122 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3123 if (ret)
3124 return ret;
3125
3126 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3127 if (ret < 0)
3128 return ret;
3129
3130 p->viid = ret;
3131 p->tx_chan = j;
3132 p->lport = j;
3133 p->rss_size = rss_size;
3134 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3135 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
3136
3137 ret = ntohl(c.u.info.lstatus_to_modtype);
3138 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
3139 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
3140 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
3141 p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
3142
3143 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
3144 j++;
3145 }
3146 return 0;
3147}