cxgb4: Update driver version and description
[linux-2.6-block.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
63#include <asm/uaccess.h>
64
65#include "cxgb4.h"
66#include "t4_regs.h"
67#include "t4_msg.h"
68#include "t4fw_api.h"
69#include "l2t.h"
70
3a7f8554
SR
71#define DRV_VERSION "2.0.0-ko"
72#define DRV_DESC "Chelsio T4/T5 Network Driver"
b8ff05a9
DM
73
74/*
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
77 * recover.
78 */
79#define MAX_SGE_TIMERVAL 200U
80
7ee9ff94 81enum {
13ee15d3
VP
82 /*
83 * Physical Function provisioning constants.
84 */
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
88 */
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
93
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
96
97#ifdef CONFIG_PCI_IOV
98 /*
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
104 * Egress Context.
105 */
7ee9ff94
CL
106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
108
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
7ee9ff94 112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
13ee15d3 113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
7ee9ff94
CL
114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
116
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
13ee15d3 119#endif
7ee9ff94
CL
120};
121
122/*
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware
126 * controls.
127 */
128static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf)
130{
131 unsigned int portn, portvec;
132
133 /*
134 * Give PF's access to all of the ports.
135 */
136 if (vf == 0)
137 return FW_PFVF_CMD_PMASK_MASK;
138
139 /*
140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this.
145 */
146 if (adapter->params.nports == 0)
147 return 0;
148
149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec;
151 for (;;) {
152 /*
153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ...
157 */
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
159 if (portn == 0)
160 return pmask;
161 portn--;
162 portvec &= ~pmask;
163 }
164 /*NOTREACHED*/
165}
7ee9ff94 166
b8ff05a9
DM
167enum {
168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128,
175 MIN_FL_ENTRIES = 16
176};
177
f2b7e78d
VP
178/* Host shadow copy of ingress filter entry. This is in host native format
179 * and doesn't match the ordering or bit order, etc. of the hardware of the
180 * firmware command. The use of bit-field structure elements is purely to
181 * remind ourselves of the field size limitations and save memory in the case
182 * where the filter table is large.
183 */
184struct filter_entry {
185 /* Administrative fields for filter.
186 */
187 u32 valid:1; /* filter allocated and valid */
188 u32 locked:1; /* filter is administratively locked */
189
190 u32 pending:1; /* filter action is pending firmware reply */
191 u32 smtidx:8; /* Source MAC Table index for smac */
192 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
193
194 /* The filter itself. Most of this is a straight copy of information
195 * provided by the extended ioctl(). Some fields are translated to
196 * internal forms -- for instance the Ingress Queue ID passed in from
197 * the ioctl() is translated into the Absolute Ingress Queue ID.
198 */
199 struct ch_filter_specification fs;
200};
201
b8ff05a9
DM
202#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
205
060e0c75 206#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
b8ff05a9
DM
207
208static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
060e0c75 209 CH_DEVICE(0xa000, 0), /* PE10K */
ccea790e
DM
210 CH_DEVICE(0x4001, -1),
211 CH_DEVICE(0x4002, -1),
212 CH_DEVICE(0x4003, -1),
213 CH_DEVICE(0x4004, -1),
214 CH_DEVICE(0x4005, -1),
215 CH_DEVICE(0x4006, -1),
216 CH_DEVICE(0x4007, -1),
217 CH_DEVICE(0x4008, -1),
218 CH_DEVICE(0x4009, -1),
219 CH_DEVICE(0x400a, -1),
220 CH_DEVICE(0x4401, 4),
221 CH_DEVICE(0x4402, 4),
222 CH_DEVICE(0x4403, 4),
223 CH_DEVICE(0x4404, 4),
224 CH_DEVICE(0x4405, 4),
225 CH_DEVICE(0x4406, 4),
226 CH_DEVICE(0x4407, 4),
227 CH_DEVICE(0x4408, 4),
228 CH_DEVICE(0x4409, 4),
229 CH_DEVICE(0x440a, 4),
f637d577
VP
230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4),
9616407c
SR
232 CH_DEVICE(0x5001, 5),
233 CH_DEVICE(0x5002, 5),
234 CH_DEVICE(0x5003, 5),
235 CH_DEVICE(0x5004, 5),
236 CH_DEVICE(0x5005, 5),
237 CH_DEVICE(0x5006, 5),
238 CH_DEVICE(0x5007, 5),
239 CH_DEVICE(0x5008, 5),
240 CH_DEVICE(0x5009, 5),
241 CH_DEVICE(0x500A, 5),
242 CH_DEVICE(0x500B, 5),
243 CH_DEVICE(0x500C, 5),
244 CH_DEVICE(0x500D, 5),
245 CH_DEVICE(0x500E, 5),
246 CH_DEVICE(0x500F, 5),
247 CH_DEVICE(0x5010, 5),
248 CH_DEVICE(0x5011, 5),
249 CH_DEVICE(0x5012, 5),
250 CH_DEVICE(0x5013, 5),
251 CH_DEVICE(0x5401, 5),
252 CH_DEVICE(0x5402, 5),
253 CH_DEVICE(0x5403, 5),
254 CH_DEVICE(0x5404, 5),
255 CH_DEVICE(0x5405, 5),
256 CH_DEVICE(0x5406, 5),
257 CH_DEVICE(0x5407, 5),
258 CH_DEVICE(0x5408, 5),
259 CH_DEVICE(0x5409, 5),
260 CH_DEVICE(0x540A, 5),
261 CH_DEVICE(0x540B, 5),
262 CH_DEVICE(0x540C, 5),
263 CH_DEVICE(0x540D, 5),
264 CH_DEVICE(0x540E, 5),
265 CH_DEVICE(0x540F, 5),
266 CH_DEVICE(0x5410, 5),
267 CH_DEVICE(0x5411, 5),
268 CH_DEVICE(0x5412, 5),
269 CH_DEVICE(0x5413, 5),
b8ff05a9
DM
270 { 0, }
271};
272
273#define FW_FNAME "cxgb4/t4fw.bin"
0a57a536 274#define FW5_FNAME "cxgb4/t5fw.bin"
636f9d37 275#define FW_CFNAME "cxgb4/t4-config.txt"
0a57a536 276#define FW5_CFNAME "cxgb4/t5-config.txt"
b8ff05a9
DM
277
278MODULE_DESCRIPTION(DRV_DESC);
279MODULE_AUTHOR("Chelsio Communications");
280MODULE_LICENSE("Dual BSD/GPL");
281MODULE_VERSION(DRV_VERSION);
282MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
283MODULE_FIRMWARE(FW_FNAME);
0a57a536 284MODULE_FIRMWARE(FW5_FNAME);
b8ff05a9 285
636f9d37
VP
286/*
287 * Normally we're willing to become the firmware's Master PF but will be happy
288 * if another PF has already become the Master and initialized the adapter.
289 * Setting "force_init" will cause this driver to forcibly establish itself as
290 * the Master PF and initialize the adapter.
291 */
292static uint force_init;
293
294module_param(force_init, uint, 0644);
295MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
296
13ee15d3
VP
297/*
298 * Normally if the firmware we connect to has Configuration File support, we
299 * use that and only fall back to the old Driver-based initialization if the
300 * Configuration File fails for some reason. If force_old_init is set, then
301 * we'll always use the old Driver-based initialization sequence.
302 */
303static uint force_old_init;
304
305module_param(force_old_init, uint, 0644);
306MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
307
b8ff05a9
DM
308static int dflt_msg_enable = DFLT_MSG_ENABLE;
309
310module_param(dflt_msg_enable, int, 0644);
311MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
312
313/*
314 * The driver uses the best interrupt scheme available on a platform in the
315 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
316 * of these schemes the driver may consider as follows:
317 *
318 * msi = 2: choose from among all three options
319 * msi = 1: only consider MSI and INTx interrupts
320 * msi = 0: force INTx interrupts
321 */
322static int msi = 2;
323
324module_param(msi, int, 0644);
325MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
326
327/*
328 * Queue interrupt hold-off timer values. Queues default to the first of these
329 * upon creation.
330 */
331static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
332
333module_param_array(intr_holdoff, uint, NULL, 0644);
334MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
335 "0..4 in microseconds");
336
337static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
338
339module_param_array(intr_cnt, uint, NULL, 0644);
340MODULE_PARM_DESC(intr_cnt,
341 "thresholds 1..3 for queue interrupt packet counters");
342
636f9d37
VP
343/*
344 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
345 * offset by 2 bytes in order to have the IP headers line up on 4-byte
346 * boundaries. This is a requirement for many architectures which will throw
347 * a machine check fault if an attempt is made to access one of the 4-byte IP
348 * header fields on a non-4-byte boundary. And it's a major performance issue
349 * even on some architectures which allow it like some implementations of the
350 * x86 ISA. However, some architectures don't mind this and for some very
351 * edge-case performance sensitive applications (like forwarding large volumes
352 * of small packets), setting this DMA offset to 0 will decrease the number of
353 * PCI-E Bus transfers enough to measurably affect performance.
354 */
355static int rx_dma_offset = 2;
356
eb939922 357static bool vf_acls;
b8ff05a9
DM
358
359#ifdef CONFIG_PCI_IOV
360module_param(vf_acls, bool, 0644);
361MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
362
0a57a536
SR
363/* Since T5 has more num of PFs, using NUM_OF_PF_WITH_SRIOV_T5
364 * macro as num_vf array size
365 */
366static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV_T5];
b8ff05a9
DM
367
368module_param_array(num_vf, uint, NULL, 0644);
0a57a536
SR
369MODULE_PARM_DESC(num_vf,
370 "number of VFs for each of PFs 0-3 for T4 and PFs 0-7 for T5");
b8ff05a9
DM
371#endif
372
13ee15d3
VP
373/*
374 * The filter TCAM has a fixed portion and a variable portion. The fixed
375 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
376 * ports. The variable portion is 36 bits which can include things like Exact
377 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
378 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
379 * far exceed the 36-bit budget for this "compressed" header portion of the
380 * filter. Thus, we have a scarce resource which must be carefully managed.
381 *
382 * By default we set this up to mostly match the set of filter matching
383 * capabilities of T3 but with accommodations for some of T4's more
384 * interesting features:
385 *
386 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
387 * [Inner] VLAN (17), Port (3), FCoE (1) }
388 */
389enum {
390 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
391 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
392 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
393};
394
395static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
396
f2b7e78d
VP
397module_param(tp_vlan_pri_map, uint, 0644);
398MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
399
b8ff05a9
DM
400static struct dentry *cxgb4_debugfs_root;
401
402static LIST_HEAD(adapter_list);
403static DEFINE_MUTEX(uld_mutex);
404static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
405static const char *uld_str[] = { "RDMA", "iSCSI" };
406
407static void link_report(struct net_device *dev)
408{
409 if (!netif_carrier_ok(dev))
410 netdev_info(dev, "link down\n");
411 else {
412 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
413
414 const char *s = "10Mbps";
415 const struct port_info *p = netdev_priv(dev);
416
417 switch (p->link_cfg.speed) {
418 case SPEED_10000:
419 s = "10Gbps";
420 break;
421 case SPEED_1000:
422 s = "1000Mbps";
423 break;
424 case SPEED_100:
425 s = "100Mbps";
426 break;
427 }
428
429 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
430 fc[p->link_cfg.fc]);
431 }
432}
433
434void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
435{
436 struct net_device *dev = adapter->port[port_id];
437
438 /* Skip changes from disabled ports. */
439 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
440 if (link_stat)
441 netif_carrier_on(dev);
442 else
443 netif_carrier_off(dev);
444
445 link_report(dev);
446 }
447}
448
449void t4_os_portmod_changed(const struct adapter *adap, int port_id)
450{
451 static const char *mod_str[] = {
a0881cab 452 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
453 };
454
455 const struct net_device *dev = adap->port[port_id];
456 const struct port_info *pi = netdev_priv(dev);
457
458 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
459 netdev_info(dev, "port module unplugged\n");
a0881cab 460 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
461 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
462}
463
464/*
465 * Configure the exact and hash address filters to handle a port's multicast
466 * and secondary unicast MAC addresses.
467 */
468static int set_addr_filters(const struct net_device *dev, bool sleep)
469{
470 u64 mhash = 0;
471 u64 uhash = 0;
472 bool free = true;
473 u16 filt_idx[7];
474 const u8 *addr[7];
475 int ret, naddr = 0;
b8ff05a9
DM
476 const struct netdev_hw_addr *ha;
477 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 478 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 479 const struct port_info *pi = netdev_priv(dev);
060e0c75 480 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
481
482 /* first do the secondary unicast addresses */
483 netdev_for_each_uc_addr(ha, dev) {
484 addr[naddr++] = ha->addr;
485 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 486 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
487 naddr, addr, filt_idx, &uhash, sleep);
488 if (ret < 0)
489 return ret;
490
491 free = false;
492 naddr = 0;
493 }
494 }
495
496 /* next set up the multicast addresses */
4a35ecf8
DM
497 netdev_for_each_mc_addr(ha, dev) {
498 addr[naddr++] = ha->addr;
499 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 500 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
501 naddr, addr, filt_idx, &mhash, sleep);
502 if (ret < 0)
503 return ret;
504
505 free = false;
506 naddr = 0;
507 }
508 }
509
060e0c75 510 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
511 uhash | mhash, sleep);
512}
513
3069ee9b
VP
514int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
515module_param(dbfifo_int_thresh, int, 0644);
516MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
517
404d9e3f
VP
518/*
519 * usecs to sleep while draining the dbfifo
520 */
521static int dbfifo_drain_delay = 1000;
3069ee9b
VP
522module_param(dbfifo_drain_delay, int, 0644);
523MODULE_PARM_DESC(dbfifo_drain_delay,
524 "usecs to sleep while draining the dbfifo");
525
b8ff05a9
DM
526/*
527 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
528 * If @mtu is -1 it is left unchanged.
529 */
530static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
531{
532 int ret;
533 struct port_info *pi = netdev_priv(dev);
534
535 ret = set_addr_filters(dev, sleep_ok);
536 if (ret == 0)
060e0c75 537 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
b8ff05a9 538 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 539 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
540 sleep_ok);
541 return ret;
542}
543
3069ee9b
VP
544static struct workqueue_struct *workq;
545
b8ff05a9
DM
546/**
547 * link_start - enable a port
548 * @dev: the port to enable
549 *
550 * Performs the MAC and PHY actions needed to enable a port.
551 */
552static int link_start(struct net_device *dev)
553{
554 int ret;
555 struct port_info *pi = netdev_priv(dev);
060e0c75 556 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
557
558 /*
559 * We do not set address filters and promiscuity here, the stack does
560 * that step explicitly.
561 */
060e0c75 562 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
19ecae2c 563 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
b8ff05a9 564 if (ret == 0) {
060e0c75 565 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 566 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 567 true);
b8ff05a9
DM
568 if (ret >= 0) {
569 pi->xact_addr_filt = ret;
570 ret = 0;
571 }
572 }
573 if (ret == 0)
060e0c75
DM
574 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
575 &pi->link_cfg);
b8ff05a9 576 if (ret == 0)
060e0c75 577 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
b8ff05a9
DM
578 return ret;
579}
580
f2b7e78d
VP
581/* Clear a filter and release any of its resources that we own. This also
582 * clears the filter's "pending" status.
583 */
584static void clear_filter(struct adapter *adap, struct filter_entry *f)
585{
586 /* If the new or old filter have loopback rewriteing rules then we'll
587 * need to free any existing Layer Two Table (L2T) entries of the old
588 * filter rule. The firmware will handle freeing up any Source MAC
589 * Table (SMT) entries used for rewriting Source MAC Addresses in
590 * loopback rules.
591 */
592 if (f->l2t)
593 cxgb4_l2t_release(f->l2t);
594
595 /* The zeroing of the filter rule below clears the filter valid,
596 * pending, locked flags, l2t pointer, etc. so it's all we need for
597 * this operation.
598 */
599 memset(f, 0, sizeof(*f));
600}
601
602/* Handle a filter write/deletion reply.
603 */
604static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
605{
606 unsigned int idx = GET_TID(rpl);
607 unsigned int nidx = idx - adap->tids.ftid_base;
608 unsigned int ret;
609 struct filter_entry *f;
610
611 if (idx >= adap->tids.ftid_base && nidx <
612 (adap->tids.nftids + adap->tids.nsftids)) {
613 idx = nidx;
614 ret = GET_TCB_COOKIE(rpl->cookie);
615 f = &adap->tids.ftid_tab[idx];
616
617 if (ret == FW_FILTER_WR_FLT_DELETED) {
618 /* Clear the filter when we get confirmation from the
619 * hardware that the filter has been deleted.
620 */
621 clear_filter(adap, f);
622 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
623 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
624 idx);
625 clear_filter(adap, f);
626 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
627 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
628 f->pending = 0; /* asynchronous setup completed */
629 f->valid = 1;
630 } else {
631 /* Something went wrong. Issue a warning about the
632 * problem and clear everything out.
633 */
634 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
635 idx, ret);
636 clear_filter(adap, f);
637 }
638 }
639}
640
641/* Response queue handler for the FW event queue.
b8ff05a9
DM
642 */
643static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
644 const struct pkt_gl *gl)
645{
646 u8 opcode = ((const struct rss_header *)rsp)->opcode;
647
648 rsp++; /* skip RSS header */
649 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
650 const struct cpl_sge_egr_update *p = (void *)rsp;
651 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
e46dab4d 652 struct sge_txq *txq;
b8ff05a9 653
e46dab4d 654 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 655 txq->restarts++;
e46dab4d 656 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
b8ff05a9
DM
657 struct sge_eth_txq *eq;
658
659 eq = container_of(txq, struct sge_eth_txq, q);
660 netif_tx_wake_queue(eq->txq);
661 } else {
662 struct sge_ofld_txq *oq;
663
664 oq = container_of(txq, struct sge_ofld_txq, q);
665 tasklet_schedule(&oq->qresume_tsk);
666 }
667 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
668 const struct cpl_fw6_msg *p = (void *)rsp;
669
670 if (p->type == 0)
671 t4_handle_fw_rpl(q->adap, p->data);
672 } else if (opcode == CPL_L2T_WRITE_RPL) {
673 const struct cpl_l2t_write_rpl *p = (void *)rsp;
674
675 do_l2t_write_rpl(q->adap, p);
f2b7e78d
VP
676 } else if (opcode == CPL_SET_TCB_RPL) {
677 const struct cpl_set_tcb_rpl *p = (void *)rsp;
678
679 filter_rpl(q->adap, p);
b8ff05a9
DM
680 } else
681 dev_err(q->adap->pdev_dev,
682 "unexpected CPL %#x on FW event queue\n", opcode);
683 return 0;
684}
685
686/**
687 * uldrx_handler - response queue handler for ULD queues
688 * @q: the response queue that received the packet
689 * @rsp: the response queue descriptor holding the offload message
690 * @gl: the gather list of packet fragments
691 *
692 * Deliver an ingress offload packet to a ULD. All processing is done by
693 * the ULD, we just maintain statistics.
694 */
695static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
696 const struct pkt_gl *gl)
697{
698 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
699
700 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
701 rxq->stats.nomem++;
702 return -1;
703 }
704 if (gl == NULL)
705 rxq->stats.imm++;
706 else if (gl == CXGB4_MSG_AN)
707 rxq->stats.an++;
708 else
709 rxq->stats.pkts++;
710 return 0;
711}
712
713static void disable_msi(struct adapter *adapter)
714{
715 if (adapter->flags & USING_MSIX) {
716 pci_disable_msix(adapter->pdev);
717 adapter->flags &= ~USING_MSIX;
718 } else if (adapter->flags & USING_MSI) {
719 pci_disable_msi(adapter->pdev);
720 adapter->flags &= ~USING_MSI;
721 }
722}
723
724/*
725 * Interrupt handler for non-data events used with MSI-X.
726 */
727static irqreturn_t t4_nondata_intr(int irq, void *cookie)
728{
729 struct adapter *adap = cookie;
730
731 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
732 if (v & PFSW) {
733 adap->swintr = 1;
734 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
735 }
736 t4_slow_intr_handler(adap);
737 return IRQ_HANDLED;
738}
739
740/*
741 * Name the MSI-X interrupts.
742 */
743static void name_msix_vecs(struct adapter *adap)
744{
ba27816c 745 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
746
747 /* non-data interrupts */
b1a3c2b6 748 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
749
750 /* FW events */
b1a3c2b6
DM
751 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
752 adap->port[0]->name);
b8ff05a9
DM
753
754 /* Ethernet queues */
755 for_each_port(adap, j) {
756 struct net_device *d = adap->port[j];
757 const struct port_info *pi = netdev_priv(d);
758
ba27816c 759 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
760 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
761 d->name, i);
b8ff05a9
DM
762 }
763
764 /* offload queues */
ba27816c
DM
765 for_each_ofldrxq(&adap->sge, i)
766 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
b1a3c2b6 767 adap->port[0]->name, i);
ba27816c
DM
768
769 for_each_rdmarxq(&adap->sge, i)
770 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
b1a3c2b6 771 adap->port[0]->name, i);
b8ff05a9
DM
772}
773
774static int request_msix_queue_irqs(struct adapter *adap)
775{
776 struct sge *s = &adap->sge;
404d9e3f 777 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
b8ff05a9
DM
778
779 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
780 adap->msix_info[1].desc, &s->fw_evtq);
781 if (err)
782 return err;
783
784 for_each_ethrxq(s, ethqidx) {
404d9e3f
VP
785 err = request_irq(adap->msix_info[msi_index].vec,
786 t4_sge_intr_msix, 0,
787 adap->msix_info[msi_index].desc,
b8ff05a9
DM
788 &s->ethrxq[ethqidx].rspq);
789 if (err)
790 goto unwind;
404d9e3f 791 msi_index++;
b8ff05a9
DM
792 }
793 for_each_ofldrxq(s, ofldqidx) {
404d9e3f
VP
794 err = request_irq(adap->msix_info[msi_index].vec,
795 t4_sge_intr_msix, 0,
796 adap->msix_info[msi_index].desc,
b8ff05a9
DM
797 &s->ofldrxq[ofldqidx].rspq);
798 if (err)
799 goto unwind;
404d9e3f 800 msi_index++;
b8ff05a9
DM
801 }
802 for_each_rdmarxq(s, rdmaqidx) {
404d9e3f
VP
803 err = request_irq(adap->msix_info[msi_index].vec,
804 t4_sge_intr_msix, 0,
805 adap->msix_info[msi_index].desc,
b8ff05a9
DM
806 &s->rdmarxq[rdmaqidx].rspq);
807 if (err)
808 goto unwind;
404d9e3f 809 msi_index++;
b8ff05a9
DM
810 }
811 return 0;
812
813unwind:
814 while (--rdmaqidx >= 0)
404d9e3f 815 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
816 &s->rdmarxq[rdmaqidx].rspq);
817 while (--ofldqidx >= 0)
404d9e3f 818 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
819 &s->ofldrxq[ofldqidx].rspq);
820 while (--ethqidx >= 0)
404d9e3f
VP
821 free_irq(adap->msix_info[--msi_index].vec,
822 &s->ethrxq[ethqidx].rspq);
b8ff05a9
DM
823 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
824 return err;
825}
826
827static void free_msix_queue_irqs(struct adapter *adap)
828{
404d9e3f 829 int i, msi_index = 2;
b8ff05a9
DM
830 struct sge *s = &adap->sge;
831
832 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
833 for_each_ethrxq(s, i)
404d9e3f 834 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
b8ff05a9 835 for_each_ofldrxq(s, i)
404d9e3f 836 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
b8ff05a9 837 for_each_rdmarxq(s, i)
404d9e3f 838 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
b8ff05a9
DM
839}
840
671b0060
DM
841/**
842 * write_rss - write the RSS table for a given port
843 * @pi: the port
844 * @queues: array of queue indices for RSS
845 *
846 * Sets up the portion of the HW RSS table for the port's VI to distribute
847 * packets to the Rx queues in @queues.
848 */
849static int write_rss(const struct port_info *pi, const u16 *queues)
850{
851 u16 *rss;
852 int i, err;
853 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
854
855 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
856 if (!rss)
857 return -ENOMEM;
858
859 /* map the queue indices to queue ids */
860 for (i = 0; i < pi->rss_size; i++, queues++)
861 rss[i] = q[*queues].rspq.abs_id;
862
060e0c75
DM
863 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
864 pi->rss_size, rss, pi->rss_size);
671b0060
DM
865 kfree(rss);
866 return err;
867}
868
b8ff05a9
DM
869/**
870 * setup_rss - configure RSS
871 * @adap: the adapter
872 *
671b0060 873 * Sets up RSS for each port.
b8ff05a9
DM
874 */
875static int setup_rss(struct adapter *adap)
876{
671b0060 877 int i, err;
b8ff05a9
DM
878
879 for_each_port(adap, i) {
880 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 881
671b0060 882 err = write_rss(pi, pi->rss);
b8ff05a9
DM
883 if (err)
884 return err;
885 }
886 return 0;
887}
888
e46dab4d
DM
889/*
890 * Return the channel of the ingress queue with the given qid.
891 */
892static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
893{
894 qid -= p->ingr_start;
895 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
896}
897
b8ff05a9
DM
898/*
899 * Wait until all NAPI handlers are descheduled.
900 */
901static void quiesce_rx(struct adapter *adap)
902{
903 int i;
904
905 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
906 struct sge_rspq *q = adap->sge.ingr_map[i];
907
908 if (q && q->handler)
909 napi_disable(&q->napi);
910 }
911}
912
913/*
914 * Enable NAPI scheduling and interrupt generation for all Rx queues.
915 */
916static void enable_rx(struct adapter *adap)
917{
918 int i;
919
920 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
921 struct sge_rspq *q = adap->sge.ingr_map[i];
922
923 if (!q)
924 continue;
925 if (q->handler)
926 napi_enable(&q->napi);
927 /* 0-increment GTS to start the timer and enable interrupts */
928 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
929 SEINTARM(q->intr_params) |
930 INGRESSQID(q->cntxt_id));
931 }
932}
933
934/**
935 * setup_sge_queues - configure SGE Tx/Rx/response queues
936 * @adap: the adapter
937 *
938 * Determines how many sets of SGE queues to use and initializes them.
939 * We support multiple queue sets per port if we have MSI-X, otherwise
940 * just one queue set per port.
941 */
942static int setup_sge_queues(struct adapter *adap)
943{
944 int err, msi_idx, i, j;
945 struct sge *s = &adap->sge;
946
947 bitmap_zero(s->starving_fl, MAX_EGRQ);
948 bitmap_zero(s->txq_maperr, MAX_EGRQ);
949
950 if (adap->flags & USING_MSIX)
951 msi_idx = 1; /* vector 0 is for non-queue interrupts */
952 else {
953 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
954 NULL, NULL);
955 if (err)
956 return err;
957 msi_idx = -((int)s->intrq.abs_id + 1);
958 }
959
960 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
961 msi_idx, NULL, fwevtq_handler);
962 if (err) {
963freeout: t4_free_sge_resources(adap);
964 return err;
965 }
966
967 for_each_port(adap, i) {
968 struct net_device *dev = adap->port[i];
969 struct port_info *pi = netdev_priv(dev);
970 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
971 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
972
973 for (j = 0; j < pi->nqsets; j++, q++) {
974 if (msi_idx > 0)
975 msi_idx++;
976 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
977 msi_idx, &q->fl,
978 t4_ethrx_handler);
979 if (err)
980 goto freeout;
981 q->rspq.idx = j;
982 memset(&q->stats, 0, sizeof(q->stats));
983 }
984 for (j = 0; j < pi->nqsets; j++, t++) {
985 err = t4_sge_alloc_eth_txq(adap, t, dev,
986 netdev_get_tx_queue(dev, j),
987 s->fw_evtq.cntxt_id);
988 if (err)
989 goto freeout;
990 }
991 }
992
993 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
994 for_each_ofldrxq(s, i) {
995 struct sge_ofld_rxq *q = &s->ofldrxq[i];
996 struct net_device *dev = adap->port[i / j];
997
998 if (msi_idx > 0)
999 msi_idx++;
1000 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1001 &q->fl, uldrx_handler);
1002 if (err)
1003 goto freeout;
1004 memset(&q->stats, 0, sizeof(q->stats));
1005 s->ofld_rxq[i] = q->rspq.abs_id;
1006 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1007 s->fw_evtq.cntxt_id);
1008 if (err)
1009 goto freeout;
1010 }
1011
1012 for_each_rdmarxq(s, i) {
1013 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1014
1015 if (msi_idx > 0)
1016 msi_idx++;
1017 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1018 msi_idx, &q->fl, uldrx_handler);
1019 if (err)
1020 goto freeout;
1021 memset(&q->stats, 0, sizeof(q->stats));
1022 s->rdma_rxq[i] = q->rspq.abs_id;
1023 }
1024
1025 for_each_port(adap, i) {
1026 /*
1027 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1028 * have RDMA queues, and that's the right value.
1029 */
1030 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1031 s->fw_evtq.cntxt_id,
1032 s->rdmarxq[i].rspq.cntxt_id);
1033 if (err)
1034 goto freeout;
1035 }
1036
1037 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1038 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1039 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1040 return 0;
1041}
1042
1043/*
1044 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1045 * started but failed, and a negative errno if flash load couldn't start.
1046 */
1047static int upgrade_fw(struct adapter *adap)
1048{
1049 int ret;
0a57a536 1050 u32 vers, exp_major;
b8ff05a9
DM
1051 const struct fw_hdr *hdr;
1052 const struct firmware *fw;
1053 struct device *dev = adap->pdev_dev;
0a57a536 1054 char *fw_file_name;
b8ff05a9 1055
0a57a536
SR
1056 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1057 case CHELSIO_T4:
1058 fw_file_name = FW_FNAME;
1059 exp_major = FW_VERSION_MAJOR;
1060 break;
1061 case CHELSIO_T5:
1062 fw_file_name = FW5_FNAME;
1063 exp_major = FW_VERSION_MAJOR_T5;
1064 break;
1065 default:
1066 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1067 return -EINVAL;
1068 }
1069
1070 ret = request_firmware(&fw, fw_file_name, dev);
b8ff05a9 1071 if (ret < 0) {
0a57a536
SR
1072 dev_err(dev, "unable to load firmware image %s, error %d\n",
1073 fw_file_name, ret);
b8ff05a9
DM
1074 return ret;
1075 }
1076
1077 hdr = (const struct fw_hdr *)fw->data;
1078 vers = ntohl(hdr->fw_ver);
0a57a536 1079 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
b8ff05a9
DM
1080 ret = -EINVAL; /* wrong major version, won't do */
1081 goto out;
1082 }
1083
1084 /*
1085 * If the flash FW is unusable or we found something newer, load it.
1086 */
0a57a536 1087 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
b8ff05a9 1088 vers > adap->params.fw_vers) {
26f7cbc0
VP
1089 dev_info(dev, "upgrading firmware ...\n");
1090 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1091 /*force=*/false);
b8ff05a9 1092 if (!ret)
0a57a536
SR
1093 dev_info(dev,
1094 "firmware upgraded to version %pI4 from %s\n",
1095 &hdr->fw_ver, fw_file_name);
26f7cbc0
VP
1096 else
1097 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1648a22b
VP
1098 } else {
1099 /*
1100 * Tell our caller that we didn't upgrade the firmware.
1101 */
1102 ret = -EINVAL;
b8ff05a9 1103 }
1648a22b 1104
b8ff05a9
DM
1105out: release_firmware(fw);
1106 return ret;
1107}
1108
1109/*
1110 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1111 * The allocated memory is cleared.
1112 */
1113void *t4_alloc_mem(size_t size)
1114{
89bf67f1 1115 void *p = kzalloc(size, GFP_KERNEL);
b8ff05a9
DM
1116
1117 if (!p)
89bf67f1 1118 p = vzalloc(size);
b8ff05a9
DM
1119 return p;
1120}
1121
1122/*
1123 * Free memory allocated through alloc_mem().
1124 */
31b9c19b 1125static void t4_free_mem(void *addr)
b8ff05a9
DM
1126{
1127 if (is_vmalloc_addr(addr))
1128 vfree(addr);
1129 else
1130 kfree(addr);
1131}
1132
f2b7e78d
VP
1133/* Send a Work Request to write the filter at a specified index. We construct
1134 * a Firmware Filter Work Request to have the work done and put the indicated
1135 * filter into "pending" mode which will prevent any further actions against
1136 * it till we get a reply from the firmware on the completion status of the
1137 * request.
1138 */
1139static int set_filter_wr(struct adapter *adapter, int fidx)
1140{
1141 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1142 struct sk_buff *skb;
1143 struct fw_filter_wr *fwr;
1144 unsigned int ftid;
1145
1146 /* If the new filter requires loopback Destination MAC and/or VLAN
1147 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1148 * the filter.
1149 */
1150 if (f->fs.newdmac || f->fs.newvlan) {
1151 /* allocate L2T entry for new filter */
1152 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1153 if (f->l2t == NULL)
1154 return -EAGAIN;
1155 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1156 f->fs.eport, f->fs.dmac)) {
1157 cxgb4_l2t_release(f->l2t);
1158 f->l2t = NULL;
1159 return -ENOMEM;
1160 }
1161 }
1162
1163 ftid = adapter->tids.ftid_base + fidx;
1164
1165 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1166 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1167 memset(fwr, 0, sizeof(*fwr));
1168
1169 /* It would be nice to put most of the following in t4_hw.c but most
1170 * of the work is translating the cxgbtool ch_filter_specification
1171 * into the Work Request and the definition of that structure is
1172 * currently in cxgbtool.h which isn't appropriate to pull into the
1173 * common code. We may eventually try to come up with a more neutral
1174 * filter specification structure but for now it's easiest to simply
1175 * put this fairly direct code in line ...
1176 */
1177 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1178 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1179 fwr->tid_to_iq =
1180 htonl(V_FW_FILTER_WR_TID(ftid) |
1181 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1182 V_FW_FILTER_WR_NOREPLY(0) |
1183 V_FW_FILTER_WR_IQ(f->fs.iq));
1184 fwr->del_filter_to_l2tix =
1185 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1186 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1187 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1188 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1189 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1190 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1191 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1192 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1193 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1194 f->fs.newvlan == VLAN_REWRITE) |
1195 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1196 f->fs.newvlan == VLAN_REWRITE) |
1197 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1198 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1199 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1200 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1201 fwr->ethtype = htons(f->fs.val.ethtype);
1202 fwr->ethtypem = htons(f->fs.mask.ethtype);
1203 fwr->frag_to_ovlan_vldm =
1204 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1205 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1206 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1207 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1208 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1209 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1210 fwr->smac_sel = 0;
1211 fwr->rx_chan_rx_rpl_iq =
1212 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1213 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1214 fwr->maci_to_matchtypem =
1215 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1216 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1217 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1218 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1219 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1220 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1221 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1222 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1223 fwr->ptcl = f->fs.val.proto;
1224 fwr->ptclm = f->fs.mask.proto;
1225 fwr->ttyp = f->fs.val.tos;
1226 fwr->ttypm = f->fs.mask.tos;
1227 fwr->ivlan = htons(f->fs.val.ivlan);
1228 fwr->ivlanm = htons(f->fs.mask.ivlan);
1229 fwr->ovlan = htons(f->fs.val.ovlan);
1230 fwr->ovlanm = htons(f->fs.mask.ovlan);
1231 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1232 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1233 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1234 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1235 fwr->lp = htons(f->fs.val.lport);
1236 fwr->lpm = htons(f->fs.mask.lport);
1237 fwr->fp = htons(f->fs.val.fport);
1238 fwr->fpm = htons(f->fs.mask.fport);
1239 if (f->fs.newsmac)
1240 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1241
1242 /* Mark the filter as "pending" and ship off the Filter Work Request.
1243 * When we get the Work Request Reply we'll clear the pending status.
1244 */
1245 f->pending = 1;
1246 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1247 t4_ofld_send(adapter, skb);
1248 return 0;
1249}
1250
1251/* Delete the filter at a specified index.
1252 */
1253static int del_filter_wr(struct adapter *adapter, int fidx)
1254{
1255 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1256 struct sk_buff *skb;
1257 struct fw_filter_wr *fwr;
1258 unsigned int len, ftid;
1259
1260 len = sizeof(*fwr);
1261 ftid = adapter->tids.ftid_base + fidx;
1262
1263 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1264 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1265 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1266
1267 /* Mark the filter as "pending" and ship off the Filter Work Request.
1268 * When we get the Work Request Reply we'll clear the pending status.
1269 */
1270 f->pending = 1;
1271 t4_mgmt_tx(adapter, skb);
1272 return 0;
1273}
1274
b8ff05a9
DM
1275static inline int is_offload(const struct adapter *adap)
1276{
1277 return adap->params.offload;
1278}
1279
1280/*
1281 * Implementation of ethtool operations.
1282 */
1283
1284static u32 get_msglevel(struct net_device *dev)
1285{
1286 return netdev2adap(dev)->msg_enable;
1287}
1288
1289static void set_msglevel(struct net_device *dev, u32 val)
1290{
1291 netdev2adap(dev)->msg_enable = val;
1292}
1293
1294static char stats_strings[][ETH_GSTRING_LEN] = {
1295 "TxOctetsOK ",
1296 "TxFramesOK ",
1297 "TxBroadcastFrames ",
1298 "TxMulticastFrames ",
1299 "TxUnicastFrames ",
1300 "TxErrorFrames ",
1301
1302 "TxFrames64 ",
1303 "TxFrames65To127 ",
1304 "TxFrames128To255 ",
1305 "TxFrames256To511 ",
1306 "TxFrames512To1023 ",
1307 "TxFrames1024To1518 ",
1308 "TxFrames1519ToMax ",
1309
1310 "TxFramesDropped ",
1311 "TxPauseFrames ",
1312 "TxPPP0Frames ",
1313 "TxPPP1Frames ",
1314 "TxPPP2Frames ",
1315 "TxPPP3Frames ",
1316 "TxPPP4Frames ",
1317 "TxPPP5Frames ",
1318 "TxPPP6Frames ",
1319 "TxPPP7Frames ",
1320
1321 "RxOctetsOK ",
1322 "RxFramesOK ",
1323 "RxBroadcastFrames ",
1324 "RxMulticastFrames ",
1325 "RxUnicastFrames ",
1326
1327 "RxFramesTooLong ",
1328 "RxJabberErrors ",
1329 "RxFCSErrors ",
1330 "RxLengthErrors ",
1331 "RxSymbolErrors ",
1332 "RxRuntFrames ",
1333
1334 "RxFrames64 ",
1335 "RxFrames65To127 ",
1336 "RxFrames128To255 ",
1337 "RxFrames256To511 ",
1338 "RxFrames512To1023 ",
1339 "RxFrames1024To1518 ",
1340 "RxFrames1519ToMax ",
1341
1342 "RxPauseFrames ",
1343 "RxPPP0Frames ",
1344 "RxPPP1Frames ",
1345 "RxPPP2Frames ",
1346 "RxPPP3Frames ",
1347 "RxPPP4Frames ",
1348 "RxPPP5Frames ",
1349 "RxPPP6Frames ",
1350 "RxPPP7Frames ",
1351
1352 "RxBG0FramesDropped ",
1353 "RxBG1FramesDropped ",
1354 "RxBG2FramesDropped ",
1355 "RxBG3FramesDropped ",
1356 "RxBG0FramesTrunc ",
1357 "RxBG1FramesTrunc ",
1358 "RxBG2FramesTrunc ",
1359 "RxBG3FramesTrunc ",
1360
1361 "TSO ",
1362 "TxCsumOffload ",
1363 "RxCsumGood ",
1364 "VLANextractions ",
1365 "VLANinsertions ",
4a6346d4
DM
1366 "GROpackets ",
1367 "GROmerged ",
22adfe0a
SR
1368 "WriteCoalSuccess ",
1369 "WriteCoalFail ",
b8ff05a9
DM
1370};
1371
1372static int get_sset_count(struct net_device *dev, int sset)
1373{
1374 switch (sset) {
1375 case ETH_SS_STATS:
1376 return ARRAY_SIZE(stats_strings);
1377 default:
1378 return -EOPNOTSUPP;
1379 }
1380}
1381
1382#define T4_REGMAP_SIZE (160 * 1024)
251f9e88 1383#define T5_REGMAP_SIZE (332 * 1024)
b8ff05a9
DM
1384
1385static int get_regs_len(struct net_device *dev)
1386{
251f9e88
SR
1387 struct adapter *adap = netdev2adap(dev);
1388 if (is_t4(adap->chip))
1389 return T4_REGMAP_SIZE;
1390 else
1391 return T5_REGMAP_SIZE;
b8ff05a9
DM
1392}
1393
1394static int get_eeprom_len(struct net_device *dev)
1395{
1396 return EEPROMSIZE;
1397}
1398
1399static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1400{
1401 struct adapter *adapter = netdev2adap(dev);
1402
23020ab3
RJ
1403 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1404 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1405 strlcpy(info->bus_info, pci_name(adapter->pdev),
1406 sizeof(info->bus_info));
b8ff05a9 1407
84b40501 1408 if (adapter->params.fw_vers)
b8ff05a9
DM
1409 snprintf(info->fw_version, sizeof(info->fw_version),
1410 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1411 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1412 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1413 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1414 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1415 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1416 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1417 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1418 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1419}
1420
1421static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1422{
1423 if (stringset == ETH_SS_STATS)
1424 memcpy(data, stats_strings, sizeof(stats_strings));
1425}
1426
1427/*
1428 * port stats maintained per queue of the port. They should be in the same
1429 * order as in stats_strings above.
1430 */
1431struct queue_port_stats {
1432 u64 tso;
1433 u64 tx_csum;
1434 u64 rx_csum;
1435 u64 vlan_ex;
1436 u64 vlan_ins;
4a6346d4
DM
1437 u64 gro_pkts;
1438 u64 gro_merged;
b8ff05a9
DM
1439};
1440
1441static void collect_sge_port_stats(const struct adapter *adap,
1442 const struct port_info *p, struct queue_port_stats *s)
1443{
1444 int i;
1445 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1446 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1447
1448 memset(s, 0, sizeof(*s));
1449 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1450 s->tso += tx->tso;
1451 s->tx_csum += tx->tx_cso;
1452 s->rx_csum += rx->stats.rx_cso;
1453 s->vlan_ex += rx->stats.vlan_ex;
1454 s->vlan_ins += tx->vlan_ins;
4a6346d4
DM
1455 s->gro_pkts += rx->stats.lro_pkts;
1456 s->gro_merged += rx->stats.lro_merged;
b8ff05a9
DM
1457 }
1458}
1459
1460static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1461 u64 *data)
1462{
1463 struct port_info *pi = netdev_priv(dev);
1464 struct adapter *adapter = pi->adapter;
22adfe0a 1465 u32 val1, val2;
b8ff05a9
DM
1466
1467 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1468
1469 data += sizeof(struct port_stats) / sizeof(u64);
1470 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
22adfe0a
SR
1471 data += sizeof(struct queue_port_stats) / sizeof(u64);
1472 if (!is_t4(adapter->chip)) {
1473 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1474 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1475 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1476 *data = val1 - val2;
1477 data++;
1478 *data = val2;
1479 data++;
1480 } else {
1481 memset(data, 0, 2 * sizeof(u64));
1482 *data += 2;
1483 }
b8ff05a9
DM
1484}
1485
1486/*
1487 * Return a version number to identify the type of adapter. The scheme is:
1488 * - bits 0..9: chip version
1489 * - bits 10..15: chip revision
835bb606 1490 * - bits 16..23: register dump version
b8ff05a9
DM
1491 */
1492static inline unsigned int mk_adap_vers(const struct adapter *ap)
1493{
0a57a536
SR
1494 return CHELSIO_CHIP_VERSION(ap->chip) |
1495 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
b8ff05a9
DM
1496}
1497
1498static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1499 unsigned int end)
1500{
1501 u32 *p = buf + start;
1502
1503 for ( ; start <= end; start += sizeof(u32))
1504 *p++ = t4_read_reg(ap, start);
1505}
1506
1507static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1508 void *buf)
1509{
251f9e88 1510 static const unsigned int t4_reg_ranges[] = {
b8ff05a9
DM
1511 0x1008, 0x1108,
1512 0x1180, 0x11b4,
1513 0x11fc, 0x123c,
1514 0x1300, 0x173c,
1515 0x1800, 0x18fc,
1516 0x3000, 0x30d8,
1517 0x30e0, 0x5924,
1518 0x5960, 0x59d4,
1519 0x5a00, 0x5af8,
1520 0x6000, 0x6098,
1521 0x6100, 0x6150,
1522 0x6200, 0x6208,
1523 0x6240, 0x6248,
1524 0x6280, 0x6338,
1525 0x6370, 0x638c,
1526 0x6400, 0x643c,
1527 0x6500, 0x6524,
1528 0x6a00, 0x6a38,
1529 0x6a60, 0x6a78,
1530 0x6b00, 0x6b84,
1531 0x6bf0, 0x6c84,
1532 0x6cf0, 0x6d84,
1533 0x6df0, 0x6e84,
1534 0x6ef0, 0x6f84,
1535 0x6ff0, 0x7084,
1536 0x70f0, 0x7184,
1537 0x71f0, 0x7284,
1538 0x72f0, 0x7384,
1539 0x73f0, 0x7450,
1540 0x7500, 0x7530,
1541 0x7600, 0x761c,
1542 0x7680, 0x76cc,
1543 0x7700, 0x7798,
1544 0x77c0, 0x77fc,
1545 0x7900, 0x79fc,
1546 0x7b00, 0x7c38,
1547 0x7d00, 0x7efc,
1548 0x8dc0, 0x8e1c,
1549 0x8e30, 0x8e78,
1550 0x8ea0, 0x8f6c,
1551 0x8fc0, 0x9074,
1552 0x90fc, 0x90fc,
1553 0x9400, 0x9458,
1554 0x9600, 0x96bc,
1555 0x9800, 0x9808,
1556 0x9820, 0x983c,
1557 0x9850, 0x9864,
1558 0x9c00, 0x9c6c,
1559 0x9c80, 0x9cec,
1560 0x9d00, 0x9d6c,
1561 0x9d80, 0x9dec,
1562 0x9e00, 0x9e6c,
1563 0x9e80, 0x9eec,
1564 0x9f00, 0x9f6c,
1565 0x9f80, 0x9fec,
1566 0xd004, 0xd03c,
1567 0xdfc0, 0xdfe0,
1568 0xe000, 0xea7c,
1569 0xf000, 0x11190,
835bb606
DM
1570 0x19040, 0x1906c,
1571 0x19078, 0x19080,
1572 0x1908c, 0x19124,
b8ff05a9
DM
1573 0x19150, 0x191b0,
1574 0x191d0, 0x191e8,
1575 0x19238, 0x1924c,
1576 0x193f8, 0x19474,
1577 0x19490, 0x194f8,
1578 0x19800, 0x19f30,
1579 0x1a000, 0x1a06c,
1580 0x1a0b0, 0x1a120,
1581 0x1a128, 0x1a138,
1582 0x1a190, 0x1a1c4,
1583 0x1a1fc, 0x1a1fc,
1584 0x1e040, 0x1e04c,
835bb606 1585 0x1e284, 0x1e28c,
b8ff05a9
DM
1586 0x1e2c0, 0x1e2c0,
1587 0x1e2e0, 0x1e2e0,
1588 0x1e300, 0x1e384,
1589 0x1e3c0, 0x1e3c8,
1590 0x1e440, 0x1e44c,
835bb606 1591 0x1e684, 0x1e68c,
b8ff05a9
DM
1592 0x1e6c0, 0x1e6c0,
1593 0x1e6e0, 0x1e6e0,
1594 0x1e700, 0x1e784,
1595 0x1e7c0, 0x1e7c8,
1596 0x1e840, 0x1e84c,
835bb606 1597 0x1ea84, 0x1ea8c,
b8ff05a9
DM
1598 0x1eac0, 0x1eac0,
1599 0x1eae0, 0x1eae0,
1600 0x1eb00, 0x1eb84,
1601 0x1ebc0, 0x1ebc8,
1602 0x1ec40, 0x1ec4c,
835bb606 1603 0x1ee84, 0x1ee8c,
b8ff05a9
DM
1604 0x1eec0, 0x1eec0,
1605 0x1eee0, 0x1eee0,
1606 0x1ef00, 0x1ef84,
1607 0x1efc0, 0x1efc8,
1608 0x1f040, 0x1f04c,
835bb606 1609 0x1f284, 0x1f28c,
b8ff05a9
DM
1610 0x1f2c0, 0x1f2c0,
1611 0x1f2e0, 0x1f2e0,
1612 0x1f300, 0x1f384,
1613 0x1f3c0, 0x1f3c8,
1614 0x1f440, 0x1f44c,
835bb606 1615 0x1f684, 0x1f68c,
b8ff05a9
DM
1616 0x1f6c0, 0x1f6c0,
1617 0x1f6e0, 0x1f6e0,
1618 0x1f700, 0x1f784,
1619 0x1f7c0, 0x1f7c8,
1620 0x1f840, 0x1f84c,
835bb606 1621 0x1fa84, 0x1fa8c,
b8ff05a9
DM
1622 0x1fac0, 0x1fac0,
1623 0x1fae0, 0x1fae0,
1624 0x1fb00, 0x1fb84,
1625 0x1fbc0, 0x1fbc8,
1626 0x1fc40, 0x1fc4c,
835bb606 1627 0x1fe84, 0x1fe8c,
b8ff05a9
DM
1628 0x1fec0, 0x1fec0,
1629 0x1fee0, 0x1fee0,
1630 0x1ff00, 0x1ff84,
1631 0x1ffc0, 0x1ffc8,
1632 0x20000, 0x2002c,
1633 0x20100, 0x2013c,
1634 0x20190, 0x201c8,
1635 0x20200, 0x20318,
1636 0x20400, 0x20528,
1637 0x20540, 0x20614,
1638 0x21000, 0x21040,
1639 0x2104c, 0x21060,
1640 0x210c0, 0x210ec,
1641 0x21200, 0x21268,
1642 0x21270, 0x21284,
1643 0x212fc, 0x21388,
1644 0x21400, 0x21404,
1645 0x21500, 0x21518,
1646 0x2152c, 0x2153c,
1647 0x21550, 0x21554,
1648 0x21600, 0x21600,
1649 0x21608, 0x21628,
1650 0x21630, 0x2163c,
1651 0x21700, 0x2171c,
1652 0x21780, 0x2178c,
1653 0x21800, 0x21c38,
1654 0x21c80, 0x21d7c,
1655 0x21e00, 0x21e04,
1656 0x22000, 0x2202c,
1657 0x22100, 0x2213c,
1658 0x22190, 0x221c8,
1659 0x22200, 0x22318,
1660 0x22400, 0x22528,
1661 0x22540, 0x22614,
1662 0x23000, 0x23040,
1663 0x2304c, 0x23060,
1664 0x230c0, 0x230ec,
1665 0x23200, 0x23268,
1666 0x23270, 0x23284,
1667 0x232fc, 0x23388,
1668 0x23400, 0x23404,
1669 0x23500, 0x23518,
1670 0x2352c, 0x2353c,
1671 0x23550, 0x23554,
1672 0x23600, 0x23600,
1673 0x23608, 0x23628,
1674 0x23630, 0x2363c,
1675 0x23700, 0x2371c,
1676 0x23780, 0x2378c,
1677 0x23800, 0x23c38,
1678 0x23c80, 0x23d7c,
1679 0x23e00, 0x23e04,
1680 0x24000, 0x2402c,
1681 0x24100, 0x2413c,
1682 0x24190, 0x241c8,
1683 0x24200, 0x24318,
1684 0x24400, 0x24528,
1685 0x24540, 0x24614,
1686 0x25000, 0x25040,
1687 0x2504c, 0x25060,
1688 0x250c0, 0x250ec,
1689 0x25200, 0x25268,
1690 0x25270, 0x25284,
1691 0x252fc, 0x25388,
1692 0x25400, 0x25404,
1693 0x25500, 0x25518,
1694 0x2552c, 0x2553c,
1695 0x25550, 0x25554,
1696 0x25600, 0x25600,
1697 0x25608, 0x25628,
1698 0x25630, 0x2563c,
1699 0x25700, 0x2571c,
1700 0x25780, 0x2578c,
1701 0x25800, 0x25c38,
1702 0x25c80, 0x25d7c,
1703 0x25e00, 0x25e04,
1704 0x26000, 0x2602c,
1705 0x26100, 0x2613c,
1706 0x26190, 0x261c8,
1707 0x26200, 0x26318,
1708 0x26400, 0x26528,
1709 0x26540, 0x26614,
1710 0x27000, 0x27040,
1711 0x2704c, 0x27060,
1712 0x270c0, 0x270ec,
1713 0x27200, 0x27268,
1714 0x27270, 0x27284,
1715 0x272fc, 0x27388,
1716 0x27400, 0x27404,
1717 0x27500, 0x27518,
1718 0x2752c, 0x2753c,
1719 0x27550, 0x27554,
1720 0x27600, 0x27600,
1721 0x27608, 0x27628,
1722 0x27630, 0x2763c,
1723 0x27700, 0x2771c,
1724 0x27780, 0x2778c,
1725 0x27800, 0x27c38,
1726 0x27c80, 0x27d7c,
1727 0x27e00, 0x27e04
1728 };
1729
251f9e88
SR
1730 static const unsigned int t5_reg_ranges[] = {
1731 0x1008, 0x1148,
1732 0x1180, 0x11b4,
1733 0x11fc, 0x123c,
1734 0x1280, 0x173c,
1735 0x1800, 0x18fc,
1736 0x3000, 0x3028,
1737 0x3060, 0x30d8,
1738 0x30e0, 0x30fc,
1739 0x3140, 0x357c,
1740 0x35a8, 0x35cc,
1741 0x35ec, 0x35ec,
1742 0x3600, 0x5624,
1743 0x56cc, 0x575c,
1744 0x580c, 0x5814,
1745 0x5890, 0x58bc,
1746 0x5940, 0x59dc,
1747 0x59fc, 0x5a18,
1748 0x5a60, 0x5a9c,
1749 0x5b9c, 0x5bfc,
1750 0x6000, 0x6040,
1751 0x6058, 0x614c,
1752 0x7700, 0x7798,
1753 0x77c0, 0x78fc,
1754 0x7b00, 0x7c54,
1755 0x7d00, 0x7efc,
1756 0x8dc0, 0x8de0,
1757 0x8df8, 0x8e84,
1758 0x8ea0, 0x8f84,
1759 0x8fc0, 0x90f8,
1760 0x9400, 0x9470,
1761 0x9600, 0x96f4,
1762 0x9800, 0x9808,
1763 0x9820, 0x983c,
1764 0x9850, 0x9864,
1765 0x9c00, 0x9c6c,
1766 0x9c80, 0x9cec,
1767 0x9d00, 0x9d6c,
1768 0x9d80, 0x9dec,
1769 0x9e00, 0x9e6c,
1770 0x9e80, 0x9eec,
1771 0x9f00, 0x9f6c,
1772 0x9f80, 0xa020,
1773 0xd004, 0xd03c,
1774 0xdfc0, 0xdfe0,
1775 0xe000, 0x11088,
1776 0x1109c, 0x1117c,
1777 0x11190, 0x11204,
1778 0x19040, 0x1906c,
1779 0x19078, 0x19080,
1780 0x1908c, 0x19124,
1781 0x19150, 0x191b0,
1782 0x191d0, 0x191e8,
1783 0x19238, 0x19290,
1784 0x193f8, 0x19474,
1785 0x19490, 0x194cc,
1786 0x194f0, 0x194f8,
1787 0x19c00, 0x19c60,
1788 0x19c94, 0x19e10,
1789 0x19e50, 0x19f34,
1790 0x19f40, 0x19f50,
1791 0x19f90, 0x19fe4,
1792 0x1a000, 0x1a06c,
1793 0x1a0b0, 0x1a120,
1794 0x1a128, 0x1a138,
1795 0x1a190, 0x1a1c4,
1796 0x1a1fc, 0x1a1fc,
1797 0x1e008, 0x1e00c,
1798 0x1e040, 0x1e04c,
1799 0x1e284, 0x1e290,
1800 0x1e2c0, 0x1e2c0,
1801 0x1e2e0, 0x1e2e0,
1802 0x1e300, 0x1e384,
1803 0x1e3c0, 0x1e3c8,
1804 0x1e408, 0x1e40c,
1805 0x1e440, 0x1e44c,
1806 0x1e684, 0x1e690,
1807 0x1e6c0, 0x1e6c0,
1808 0x1e6e0, 0x1e6e0,
1809 0x1e700, 0x1e784,
1810 0x1e7c0, 0x1e7c8,
1811 0x1e808, 0x1e80c,
1812 0x1e840, 0x1e84c,
1813 0x1ea84, 0x1ea90,
1814 0x1eac0, 0x1eac0,
1815 0x1eae0, 0x1eae0,
1816 0x1eb00, 0x1eb84,
1817 0x1ebc0, 0x1ebc8,
1818 0x1ec08, 0x1ec0c,
1819 0x1ec40, 0x1ec4c,
1820 0x1ee84, 0x1ee90,
1821 0x1eec0, 0x1eec0,
1822 0x1eee0, 0x1eee0,
1823 0x1ef00, 0x1ef84,
1824 0x1efc0, 0x1efc8,
1825 0x1f008, 0x1f00c,
1826 0x1f040, 0x1f04c,
1827 0x1f284, 0x1f290,
1828 0x1f2c0, 0x1f2c0,
1829 0x1f2e0, 0x1f2e0,
1830 0x1f300, 0x1f384,
1831 0x1f3c0, 0x1f3c8,
1832 0x1f408, 0x1f40c,
1833 0x1f440, 0x1f44c,
1834 0x1f684, 0x1f690,
1835 0x1f6c0, 0x1f6c0,
1836 0x1f6e0, 0x1f6e0,
1837 0x1f700, 0x1f784,
1838 0x1f7c0, 0x1f7c8,
1839 0x1f808, 0x1f80c,
1840 0x1f840, 0x1f84c,
1841 0x1fa84, 0x1fa90,
1842 0x1fac0, 0x1fac0,
1843 0x1fae0, 0x1fae0,
1844 0x1fb00, 0x1fb84,
1845 0x1fbc0, 0x1fbc8,
1846 0x1fc08, 0x1fc0c,
1847 0x1fc40, 0x1fc4c,
1848 0x1fe84, 0x1fe90,
1849 0x1fec0, 0x1fec0,
1850 0x1fee0, 0x1fee0,
1851 0x1ff00, 0x1ff84,
1852 0x1ffc0, 0x1ffc8,
1853 0x30000, 0x30030,
1854 0x30100, 0x30144,
1855 0x30190, 0x301d0,
1856 0x30200, 0x30318,
1857 0x30400, 0x3052c,
1858 0x30540, 0x3061c,
1859 0x30800, 0x30834,
1860 0x308c0, 0x30908,
1861 0x30910, 0x309ac,
1862 0x30a00, 0x30a04,
1863 0x30a0c, 0x30a2c,
1864 0x30a44, 0x30a50,
1865 0x30a74, 0x30c24,
1866 0x30d08, 0x30d14,
1867 0x30d1c, 0x30d20,
1868 0x30d3c, 0x30d50,
1869 0x31200, 0x3120c,
1870 0x31220, 0x31220,
1871 0x31240, 0x31240,
1872 0x31600, 0x31600,
1873 0x31608, 0x3160c,
1874 0x31a00, 0x31a1c,
1875 0x31e04, 0x31e20,
1876 0x31e38, 0x31e3c,
1877 0x31e80, 0x31e80,
1878 0x31e88, 0x31ea8,
1879 0x31eb0, 0x31eb4,
1880 0x31ec8, 0x31ed4,
1881 0x31fb8, 0x32004,
1882 0x32208, 0x3223c,
1883 0x32600, 0x32630,
1884 0x32a00, 0x32abc,
1885 0x32b00, 0x32b70,
1886 0x33000, 0x33048,
1887 0x33060, 0x3309c,
1888 0x330f0, 0x33148,
1889 0x33160, 0x3319c,
1890 0x331f0, 0x332e4,
1891 0x332f8, 0x333e4,
1892 0x333f8, 0x33448,
1893 0x33460, 0x3349c,
1894 0x334f0, 0x33548,
1895 0x33560, 0x3359c,
1896 0x335f0, 0x336e4,
1897 0x336f8, 0x337e4,
1898 0x337f8, 0x337fc,
1899 0x33814, 0x33814,
1900 0x3382c, 0x3382c,
1901 0x33880, 0x3388c,
1902 0x338e8, 0x338ec,
1903 0x33900, 0x33948,
1904 0x33960, 0x3399c,
1905 0x339f0, 0x33ae4,
1906 0x33af8, 0x33b10,
1907 0x33b28, 0x33b28,
1908 0x33b3c, 0x33b50,
1909 0x33bf0, 0x33c10,
1910 0x33c28, 0x33c28,
1911 0x33c3c, 0x33c50,
1912 0x33cf0, 0x33cfc,
1913 0x34000, 0x34030,
1914 0x34100, 0x34144,
1915 0x34190, 0x341d0,
1916 0x34200, 0x34318,
1917 0x34400, 0x3452c,
1918 0x34540, 0x3461c,
1919 0x34800, 0x34834,
1920 0x348c0, 0x34908,
1921 0x34910, 0x349ac,
1922 0x34a00, 0x34a04,
1923 0x34a0c, 0x34a2c,
1924 0x34a44, 0x34a50,
1925 0x34a74, 0x34c24,
1926 0x34d08, 0x34d14,
1927 0x34d1c, 0x34d20,
1928 0x34d3c, 0x34d50,
1929 0x35200, 0x3520c,
1930 0x35220, 0x35220,
1931 0x35240, 0x35240,
1932 0x35600, 0x35600,
1933 0x35608, 0x3560c,
1934 0x35a00, 0x35a1c,
1935 0x35e04, 0x35e20,
1936 0x35e38, 0x35e3c,
1937 0x35e80, 0x35e80,
1938 0x35e88, 0x35ea8,
1939 0x35eb0, 0x35eb4,
1940 0x35ec8, 0x35ed4,
1941 0x35fb8, 0x36004,
1942 0x36208, 0x3623c,
1943 0x36600, 0x36630,
1944 0x36a00, 0x36abc,
1945 0x36b00, 0x36b70,
1946 0x37000, 0x37048,
1947 0x37060, 0x3709c,
1948 0x370f0, 0x37148,
1949 0x37160, 0x3719c,
1950 0x371f0, 0x372e4,
1951 0x372f8, 0x373e4,
1952 0x373f8, 0x37448,
1953 0x37460, 0x3749c,
1954 0x374f0, 0x37548,
1955 0x37560, 0x3759c,
1956 0x375f0, 0x376e4,
1957 0x376f8, 0x377e4,
1958 0x377f8, 0x377fc,
1959 0x37814, 0x37814,
1960 0x3782c, 0x3782c,
1961 0x37880, 0x3788c,
1962 0x378e8, 0x378ec,
1963 0x37900, 0x37948,
1964 0x37960, 0x3799c,
1965 0x379f0, 0x37ae4,
1966 0x37af8, 0x37b10,
1967 0x37b28, 0x37b28,
1968 0x37b3c, 0x37b50,
1969 0x37bf0, 0x37c10,
1970 0x37c28, 0x37c28,
1971 0x37c3c, 0x37c50,
1972 0x37cf0, 0x37cfc,
1973 0x38000, 0x38030,
1974 0x38100, 0x38144,
1975 0x38190, 0x381d0,
1976 0x38200, 0x38318,
1977 0x38400, 0x3852c,
1978 0x38540, 0x3861c,
1979 0x38800, 0x38834,
1980 0x388c0, 0x38908,
1981 0x38910, 0x389ac,
1982 0x38a00, 0x38a04,
1983 0x38a0c, 0x38a2c,
1984 0x38a44, 0x38a50,
1985 0x38a74, 0x38c24,
1986 0x38d08, 0x38d14,
1987 0x38d1c, 0x38d20,
1988 0x38d3c, 0x38d50,
1989 0x39200, 0x3920c,
1990 0x39220, 0x39220,
1991 0x39240, 0x39240,
1992 0x39600, 0x39600,
1993 0x39608, 0x3960c,
1994 0x39a00, 0x39a1c,
1995 0x39e04, 0x39e20,
1996 0x39e38, 0x39e3c,
1997 0x39e80, 0x39e80,
1998 0x39e88, 0x39ea8,
1999 0x39eb0, 0x39eb4,
2000 0x39ec8, 0x39ed4,
2001 0x39fb8, 0x3a004,
2002 0x3a208, 0x3a23c,
2003 0x3a600, 0x3a630,
2004 0x3aa00, 0x3aabc,
2005 0x3ab00, 0x3ab70,
2006 0x3b000, 0x3b048,
2007 0x3b060, 0x3b09c,
2008 0x3b0f0, 0x3b148,
2009 0x3b160, 0x3b19c,
2010 0x3b1f0, 0x3b2e4,
2011 0x3b2f8, 0x3b3e4,
2012 0x3b3f8, 0x3b448,
2013 0x3b460, 0x3b49c,
2014 0x3b4f0, 0x3b548,
2015 0x3b560, 0x3b59c,
2016 0x3b5f0, 0x3b6e4,
2017 0x3b6f8, 0x3b7e4,
2018 0x3b7f8, 0x3b7fc,
2019 0x3b814, 0x3b814,
2020 0x3b82c, 0x3b82c,
2021 0x3b880, 0x3b88c,
2022 0x3b8e8, 0x3b8ec,
2023 0x3b900, 0x3b948,
2024 0x3b960, 0x3b99c,
2025 0x3b9f0, 0x3bae4,
2026 0x3baf8, 0x3bb10,
2027 0x3bb28, 0x3bb28,
2028 0x3bb3c, 0x3bb50,
2029 0x3bbf0, 0x3bc10,
2030 0x3bc28, 0x3bc28,
2031 0x3bc3c, 0x3bc50,
2032 0x3bcf0, 0x3bcfc,
2033 0x3c000, 0x3c030,
2034 0x3c100, 0x3c144,
2035 0x3c190, 0x3c1d0,
2036 0x3c200, 0x3c318,
2037 0x3c400, 0x3c52c,
2038 0x3c540, 0x3c61c,
2039 0x3c800, 0x3c834,
2040 0x3c8c0, 0x3c908,
2041 0x3c910, 0x3c9ac,
2042 0x3ca00, 0x3ca04,
2043 0x3ca0c, 0x3ca2c,
2044 0x3ca44, 0x3ca50,
2045 0x3ca74, 0x3cc24,
2046 0x3cd08, 0x3cd14,
2047 0x3cd1c, 0x3cd20,
2048 0x3cd3c, 0x3cd50,
2049 0x3d200, 0x3d20c,
2050 0x3d220, 0x3d220,
2051 0x3d240, 0x3d240,
2052 0x3d600, 0x3d600,
2053 0x3d608, 0x3d60c,
2054 0x3da00, 0x3da1c,
2055 0x3de04, 0x3de20,
2056 0x3de38, 0x3de3c,
2057 0x3de80, 0x3de80,
2058 0x3de88, 0x3dea8,
2059 0x3deb0, 0x3deb4,
2060 0x3dec8, 0x3ded4,
2061 0x3dfb8, 0x3e004,
2062 0x3e208, 0x3e23c,
2063 0x3e600, 0x3e630,
2064 0x3ea00, 0x3eabc,
2065 0x3eb00, 0x3eb70,
2066 0x3f000, 0x3f048,
2067 0x3f060, 0x3f09c,
2068 0x3f0f0, 0x3f148,
2069 0x3f160, 0x3f19c,
2070 0x3f1f0, 0x3f2e4,
2071 0x3f2f8, 0x3f3e4,
2072 0x3f3f8, 0x3f448,
2073 0x3f460, 0x3f49c,
2074 0x3f4f0, 0x3f548,
2075 0x3f560, 0x3f59c,
2076 0x3f5f0, 0x3f6e4,
2077 0x3f6f8, 0x3f7e4,
2078 0x3f7f8, 0x3f7fc,
2079 0x3f814, 0x3f814,
2080 0x3f82c, 0x3f82c,
2081 0x3f880, 0x3f88c,
2082 0x3f8e8, 0x3f8ec,
2083 0x3f900, 0x3f948,
2084 0x3f960, 0x3f99c,
2085 0x3f9f0, 0x3fae4,
2086 0x3faf8, 0x3fb10,
2087 0x3fb28, 0x3fb28,
2088 0x3fb3c, 0x3fb50,
2089 0x3fbf0, 0x3fc10,
2090 0x3fc28, 0x3fc28,
2091 0x3fc3c, 0x3fc50,
2092 0x3fcf0, 0x3fcfc,
2093 0x40000, 0x4000c,
2094 0x40040, 0x40068,
2095 0x40080, 0x40144,
2096 0x40180, 0x4018c,
2097 0x40200, 0x40298,
2098 0x402ac, 0x4033c,
2099 0x403f8, 0x403fc,
2100 0x41300, 0x413c4,
2101 0x41400, 0x4141c,
2102 0x41480, 0x414d0,
2103 0x44000, 0x44078,
2104 0x440c0, 0x44278,
2105 0x442c0, 0x44478,
2106 0x444c0, 0x44678,
2107 0x446c0, 0x44878,
2108 0x448c0, 0x449fc,
2109 0x45000, 0x45068,
2110 0x45080, 0x45084,
2111 0x450a0, 0x450b0,
2112 0x45200, 0x45268,
2113 0x45280, 0x45284,
2114 0x452a0, 0x452b0,
2115 0x460c0, 0x460e4,
2116 0x47000, 0x4708c,
2117 0x47200, 0x47250,
2118 0x47400, 0x47420,
2119 0x47600, 0x47618,
2120 0x47800, 0x47814,
2121 0x48000, 0x4800c,
2122 0x48040, 0x48068,
2123 0x48080, 0x48144,
2124 0x48180, 0x4818c,
2125 0x48200, 0x48298,
2126 0x482ac, 0x4833c,
2127 0x483f8, 0x483fc,
2128 0x49300, 0x493c4,
2129 0x49400, 0x4941c,
2130 0x49480, 0x494d0,
2131 0x4c000, 0x4c078,
2132 0x4c0c0, 0x4c278,
2133 0x4c2c0, 0x4c478,
2134 0x4c4c0, 0x4c678,
2135 0x4c6c0, 0x4c878,
2136 0x4c8c0, 0x4c9fc,
2137 0x4d000, 0x4d068,
2138 0x4d080, 0x4d084,
2139 0x4d0a0, 0x4d0b0,
2140 0x4d200, 0x4d268,
2141 0x4d280, 0x4d284,
2142 0x4d2a0, 0x4d2b0,
2143 0x4e0c0, 0x4e0e4,
2144 0x4f000, 0x4f08c,
2145 0x4f200, 0x4f250,
2146 0x4f400, 0x4f420,
2147 0x4f600, 0x4f618,
2148 0x4f800, 0x4f814,
2149 0x50000, 0x500cc,
2150 0x50400, 0x50400,
2151 0x50800, 0x508cc,
2152 0x50c00, 0x50c00,
2153 0x51000, 0x5101c,
2154 0x51300, 0x51308,
2155 };
2156
b8ff05a9
DM
2157 int i;
2158 struct adapter *ap = netdev2adap(dev);
251f9e88
SR
2159 static const unsigned int *reg_ranges;
2160 int arr_size = 0, buf_size = 0;
2161
2162 if (is_t4(ap->chip)) {
2163 reg_ranges = &t4_reg_ranges[0];
2164 arr_size = ARRAY_SIZE(t4_reg_ranges);
2165 buf_size = T4_REGMAP_SIZE;
2166 } else {
2167 reg_ranges = &t5_reg_ranges[0];
2168 arr_size = ARRAY_SIZE(t5_reg_ranges);
2169 buf_size = T5_REGMAP_SIZE;
2170 }
b8ff05a9
DM
2171
2172 regs->version = mk_adap_vers(ap);
2173
251f9e88
SR
2174 memset(buf, 0, buf_size);
2175 for (i = 0; i < arr_size; i += 2)
b8ff05a9
DM
2176 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2177}
2178
2179static int restart_autoneg(struct net_device *dev)
2180{
2181 struct port_info *p = netdev_priv(dev);
2182
2183 if (!netif_running(dev))
2184 return -EAGAIN;
2185 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2186 return -EINVAL;
060e0c75 2187 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
b8ff05a9
DM
2188 return 0;
2189}
2190
c5e06360
DM
2191static int identify_port(struct net_device *dev,
2192 enum ethtool_phys_id_state state)
b8ff05a9 2193{
c5e06360 2194 unsigned int val;
060e0c75
DM
2195 struct adapter *adap = netdev2adap(dev);
2196
c5e06360
DM
2197 if (state == ETHTOOL_ID_ACTIVE)
2198 val = 0xffff;
2199 else if (state == ETHTOOL_ID_INACTIVE)
2200 val = 0;
2201 else
2202 return -EINVAL;
b8ff05a9 2203
c5e06360 2204 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
b8ff05a9
DM
2205}
2206
2207static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2208{
2209 unsigned int v = 0;
2210
a0881cab
DM
2211 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2212 type == FW_PORT_TYPE_BT_XAUI) {
b8ff05a9
DM
2213 v |= SUPPORTED_TP;
2214 if (caps & FW_PORT_CAP_SPEED_100M)
2215 v |= SUPPORTED_100baseT_Full;
2216 if (caps & FW_PORT_CAP_SPEED_1G)
2217 v |= SUPPORTED_1000baseT_Full;
2218 if (caps & FW_PORT_CAP_SPEED_10G)
2219 v |= SUPPORTED_10000baseT_Full;
2220 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2221 v |= SUPPORTED_Backplane;
2222 if (caps & FW_PORT_CAP_SPEED_1G)
2223 v |= SUPPORTED_1000baseKX_Full;
2224 if (caps & FW_PORT_CAP_SPEED_10G)
2225 v |= SUPPORTED_10000baseKX4_Full;
2226 } else if (type == FW_PORT_TYPE_KR)
2227 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
a0881cab 2228 else if (type == FW_PORT_TYPE_BP_AP)
7d5e77aa
DM
2229 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2230 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2231 else if (type == FW_PORT_TYPE_BP4_AP)
2232 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2233 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2234 SUPPORTED_10000baseKX4_Full;
a0881cab
DM
2235 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2236 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
b8ff05a9
DM
2237 v |= SUPPORTED_FIBRE;
2238
2239 if (caps & FW_PORT_CAP_ANEG)
2240 v |= SUPPORTED_Autoneg;
2241 return v;
2242}
2243
2244static unsigned int to_fw_linkcaps(unsigned int caps)
2245{
2246 unsigned int v = 0;
2247
2248 if (caps & ADVERTISED_100baseT_Full)
2249 v |= FW_PORT_CAP_SPEED_100M;
2250 if (caps & ADVERTISED_1000baseT_Full)
2251 v |= FW_PORT_CAP_SPEED_1G;
2252 if (caps & ADVERTISED_10000baseT_Full)
2253 v |= FW_PORT_CAP_SPEED_10G;
2254 return v;
2255}
2256
2257static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2258{
2259 const struct port_info *p = netdev_priv(dev);
2260
2261 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
a0881cab 2262 p->port_type == FW_PORT_TYPE_BT_XFI ||
b8ff05a9
DM
2263 p->port_type == FW_PORT_TYPE_BT_XAUI)
2264 cmd->port = PORT_TP;
a0881cab
DM
2265 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2266 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
b8ff05a9 2267 cmd->port = PORT_FIBRE;
a0881cab
DM
2268 else if (p->port_type == FW_PORT_TYPE_SFP) {
2269 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2270 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2271 cmd->port = PORT_DA;
2272 else
2273 cmd->port = PORT_FIBRE;
2274 } else
b8ff05a9
DM
2275 cmd->port = PORT_OTHER;
2276
2277 if (p->mdio_addr >= 0) {
2278 cmd->phy_address = p->mdio_addr;
2279 cmd->transceiver = XCVR_EXTERNAL;
2280 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2281 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2282 } else {
2283 cmd->phy_address = 0; /* not really, but no better option */
2284 cmd->transceiver = XCVR_INTERNAL;
2285 cmd->mdio_support = 0;
2286 }
2287
2288 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2289 cmd->advertising = from_fw_linkcaps(p->port_type,
2290 p->link_cfg.advertising);
70739497
DD
2291 ethtool_cmd_speed_set(cmd,
2292 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
b8ff05a9
DM
2293 cmd->duplex = DUPLEX_FULL;
2294 cmd->autoneg = p->link_cfg.autoneg;
2295 cmd->maxtxpkt = 0;
2296 cmd->maxrxpkt = 0;
2297 return 0;
2298}
2299
2300static unsigned int speed_to_caps(int speed)
2301{
2302 if (speed == SPEED_100)
2303 return FW_PORT_CAP_SPEED_100M;
2304 if (speed == SPEED_1000)
2305 return FW_PORT_CAP_SPEED_1G;
2306 if (speed == SPEED_10000)
2307 return FW_PORT_CAP_SPEED_10G;
2308 return 0;
2309}
2310
2311static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2312{
2313 unsigned int cap;
2314 struct port_info *p = netdev_priv(dev);
2315 struct link_config *lc = &p->link_cfg;
25db0338 2316 u32 speed = ethtool_cmd_speed(cmd);
b8ff05a9
DM
2317
2318 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2319 return -EINVAL;
2320
2321 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2322 /*
2323 * PHY offers a single speed. See if that's what's
2324 * being requested.
2325 */
2326 if (cmd->autoneg == AUTONEG_DISABLE &&
25db0338
DD
2327 (lc->supported & speed_to_caps(speed)))
2328 return 0;
b8ff05a9
DM
2329 return -EINVAL;
2330 }
2331
2332 if (cmd->autoneg == AUTONEG_DISABLE) {
25db0338 2333 cap = speed_to_caps(speed);
b8ff05a9 2334
25db0338
DD
2335 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2336 (speed == SPEED_10000))
b8ff05a9
DM
2337 return -EINVAL;
2338 lc->requested_speed = cap;
2339 lc->advertising = 0;
2340 } else {
2341 cap = to_fw_linkcaps(cmd->advertising);
2342 if (!(lc->supported & cap))
2343 return -EINVAL;
2344 lc->requested_speed = 0;
2345 lc->advertising = cap | FW_PORT_CAP_ANEG;
2346 }
2347 lc->autoneg = cmd->autoneg;
2348
2349 if (netif_running(dev))
060e0c75
DM
2350 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2351 lc);
b8ff05a9
DM
2352 return 0;
2353}
2354
2355static void get_pauseparam(struct net_device *dev,
2356 struct ethtool_pauseparam *epause)
2357{
2358 struct port_info *p = netdev_priv(dev);
2359
2360 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2361 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2362 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2363}
2364
2365static int set_pauseparam(struct net_device *dev,
2366 struct ethtool_pauseparam *epause)
2367{
2368 struct port_info *p = netdev_priv(dev);
2369 struct link_config *lc = &p->link_cfg;
2370
2371 if (epause->autoneg == AUTONEG_DISABLE)
2372 lc->requested_fc = 0;
2373 else if (lc->supported & FW_PORT_CAP_ANEG)
2374 lc->requested_fc = PAUSE_AUTONEG;
2375 else
2376 return -EINVAL;
2377
2378 if (epause->rx_pause)
2379 lc->requested_fc |= PAUSE_RX;
2380 if (epause->tx_pause)
2381 lc->requested_fc |= PAUSE_TX;
2382 if (netif_running(dev))
060e0c75
DM
2383 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2384 lc);
b8ff05a9
DM
2385 return 0;
2386}
2387
b8ff05a9
DM
2388static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2389{
2390 const struct port_info *pi = netdev_priv(dev);
2391 const struct sge *s = &pi->adapter->sge;
2392
2393 e->rx_max_pending = MAX_RX_BUFFERS;
2394 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2395 e->rx_jumbo_max_pending = 0;
2396 e->tx_max_pending = MAX_TXQ_ENTRIES;
2397
2398 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2399 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2400 e->rx_jumbo_pending = 0;
2401 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2402}
2403
2404static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2405{
2406 int i;
2407 const struct port_info *pi = netdev_priv(dev);
2408 struct adapter *adapter = pi->adapter;
2409 struct sge *s = &adapter->sge;
2410
2411 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2412 e->tx_pending > MAX_TXQ_ENTRIES ||
2413 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2414 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2415 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2416 return -EINVAL;
2417
2418 if (adapter->flags & FULL_INIT_DONE)
2419 return -EBUSY;
2420
2421 for (i = 0; i < pi->nqsets; ++i) {
2422 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2423 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2424 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2425 }
2426 return 0;
2427}
2428
2429static int closest_timer(const struct sge *s, int time)
2430{
2431 int i, delta, match = 0, min_delta = INT_MAX;
2432
2433 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2434 delta = time - s->timer_val[i];
2435 if (delta < 0)
2436 delta = -delta;
2437 if (delta < min_delta) {
2438 min_delta = delta;
2439 match = i;
2440 }
2441 }
2442 return match;
2443}
2444
2445static int closest_thres(const struct sge *s, int thres)
2446{
2447 int i, delta, match = 0, min_delta = INT_MAX;
2448
2449 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2450 delta = thres - s->counter_val[i];
2451 if (delta < 0)
2452 delta = -delta;
2453 if (delta < min_delta) {
2454 min_delta = delta;
2455 match = i;
2456 }
2457 }
2458 return match;
2459}
2460
2461/*
2462 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2463 */
2464static unsigned int qtimer_val(const struct adapter *adap,
2465 const struct sge_rspq *q)
2466{
2467 unsigned int idx = q->intr_params >> 1;
2468
2469 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2470}
2471
2472/**
2473 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2474 * @adap: the adapter
2475 * @q: the Rx queue
2476 * @us: the hold-off time in us, or 0 to disable timer
2477 * @cnt: the hold-off packet count, or 0 to disable counter
2478 *
2479 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2480 * one of the two needs to be enabled for the queue to generate interrupts.
2481 */
2482static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2483 unsigned int us, unsigned int cnt)
2484{
2485 if ((us | cnt) == 0)
2486 cnt = 1;
2487
2488 if (cnt) {
2489 int err;
2490 u32 v, new_idx;
2491
2492 new_idx = closest_thres(&adap->sge, cnt);
2493 if (q->desc && q->pktcnt_idx != new_idx) {
2494 /* the queue has already been created, update it */
2495 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2496 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2497 FW_PARAMS_PARAM_YZ(q->cntxt_id);
060e0c75
DM
2498 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2499 &new_idx);
b8ff05a9
DM
2500 if (err)
2501 return err;
2502 }
2503 q->pktcnt_idx = new_idx;
2504 }
2505
2506 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2507 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2508 return 0;
2509}
2510
2511static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2512{
2513 const struct port_info *pi = netdev_priv(dev);
2514 struct adapter *adap = pi->adapter;
d4fc9dc2
TLSC
2515 struct sge_rspq *q;
2516 int i;
2517 int r = 0;
2518
2519 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2520 q = &adap->sge.ethrxq[i].rspq;
2521 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2522 c->rx_max_coalesced_frames);
2523 if (r) {
2524 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2525 break;
2526 }
2527 }
2528 return r;
b8ff05a9
DM
2529}
2530
2531static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2532{
2533 const struct port_info *pi = netdev_priv(dev);
2534 const struct adapter *adap = pi->adapter;
2535 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2536
2537 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2538 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2539 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2540 return 0;
2541}
2542
1478b3ee
DM
2543/**
2544 * eeprom_ptov - translate a physical EEPROM address to virtual
2545 * @phys_addr: the physical EEPROM address
2546 * @fn: the PCI function number
2547 * @sz: size of function-specific area
2548 *
2549 * Translate a physical EEPROM address to virtual. The first 1K is
2550 * accessed through virtual addresses starting at 31K, the rest is
2551 * accessed through virtual addresses starting at 0.
2552 *
2553 * The mapping is as follows:
2554 * [0..1K) -> [31K..32K)
2555 * [1K..1K+A) -> [31K-A..31K)
2556 * [1K+A..ES) -> [0..ES-A-1K)
2557 *
2558 * where A = @fn * @sz, and ES = EEPROM size.
b8ff05a9 2559 */
1478b3ee 2560static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
b8ff05a9 2561{
1478b3ee 2562 fn *= sz;
b8ff05a9
DM
2563 if (phys_addr < 1024)
2564 return phys_addr + (31 << 10);
1478b3ee
DM
2565 if (phys_addr < 1024 + fn)
2566 return 31744 - fn + phys_addr - 1024;
b8ff05a9 2567 if (phys_addr < EEPROMSIZE)
1478b3ee 2568 return phys_addr - 1024 - fn;
b8ff05a9
DM
2569 return -EINVAL;
2570}
2571
2572/*
2573 * The next two routines implement eeprom read/write from physical addresses.
b8ff05a9
DM
2574 */
2575static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2576{
1478b3ee 2577 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2578
2579 if (vaddr >= 0)
2580 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2581 return vaddr < 0 ? vaddr : 0;
2582}
2583
2584static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2585{
1478b3ee 2586 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2587
2588 if (vaddr >= 0)
2589 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2590 return vaddr < 0 ? vaddr : 0;
2591}
2592
2593#define EEPROM_MAGIC 0x38E2F10C
2594
2595static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2596 u8 *data)
2597{
2598 int i, err = 0;
2599 struct adapter *adapter = netdev2adap(dev);
2600
2601 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2602 if (!buf)
2603 return -ENOMEM;
2604
2605 e->magic = EEPROM_MAGIC;
2606 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2607 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2608
2609 if (!err)
2610 memcpy(data, buf + e->offset, e->len);
2611 kfree(buf);
2612 return err;
2613}
2614
2615static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2616 u8 *data)
2617{
2618 u8 *buf;
2619 int err = 0;
2620 u32 aligned_offset, aligned_len, *p;
2621 struct adapter *adapter = netdev2adap(dev);
2622
2623 if (eeprom->magic != EEPROM_MAGIC)
2624 return -EINVAL;
2625
2626 aligned_offset = eeprom->offset & ~3;
2627 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2628
1478b3ee
DM
2629 if (adapter->fn > 0) {
2630 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2631
2632 if (aligned_offset < start ||
2633 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2634 return -EPERM;
2635 }
2636
b8ff05a9
DM
2637 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2638 /*
2639 * RMW possibly needed for first or last words.
2640 */
2641 buf = kmalloc(aligned_len, GFP_KERNEL);
2642 if (!buf)
2643 return -ENOMEM;
2644 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2645 if (!err && aligned_len > 4)
2646 err = eeprom_rd_phys(adapter,
2647 aligned_offset + aligned_len - 4,
2648 (u32 *)&buf[aligned_len - 4]);
2649 if (err)
2650 goto out;
2651 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2652 } else
2653 buf = data;
2654
2655 err = t4_seeprom_wp(adapter, false);
2656 if (err)
2657 goto out;
2658
2659 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2660 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2661 aligned_offset += 4;
2662 }
2663
2664 if (!err)
2665 err = t4_seeprom_wp(adapter, true);
2666out:
2667 if (buf != data)
2668 kfree(buf);
2669 return err;
2670}
2671
2672static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2673{
2674 int ret;
2675 const struct firmware *fw;
2676 struct adapter *adap = netdev2adap(netdev);
2677
2678 ef->data[sizeof(ef->data) - 1] = '\0';
2679 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2680 if (ret < 0)
2681 return ret;
2682
2683 ret = t4_load_fw(adap, fw->data, fw->size);
2684 release_firmware(fw);
2685 if (!ret)
2686 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2687 return ret;
2688}
2689
2690#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2691#define BCAST_CRC 0xa0ccc1a6
2692
2693static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2694{
2695 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2696 wol->wolopts = netdev2adap(dev)->wol;
2697 memset(&wol->sopass, 0, sizeof(wol->sopass));
2698}
2699
2700static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2701{
2702 int err = 0;
2703 struct port_info *pi = netdev_priv(dev);
2704
2705 if (wol->wolopts & ~WOL_SUPPORTED)
2706 return -EINVAL;
2707 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2708 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2709 if (wol->wolopts & WAKE_BCAST) {
2710 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2711 ~0ULL, 0, false);
2712 if (!err)
2713 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2714 ~6ULL, ~0ULL, BCAST_CRC, true);
2715 } else
2716 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2717 return err;
2718}
2719
c8f44aff 2720static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 2721{
2ed28baa 2722 const struct port_info *pi = netdev_priv(dev);
c8f44aff 2723 netdev_features_t changed = dev->features ^ features;
19ecae2c 2724 int err;
19ecae2c 2725
2ed28baa
MM
2726 if (!(changed & NETIF_F_HW_VLAN_RX))
2727 return 0;
19ecae2c 2728
2ed28baa
MM
2729 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2730 -1, -1, -1,
2731 !!(features & NETIF_F_HW_VLAN_RX), true);
2732 if (unlikely(err))
2733 dev->features = features ^ NETIF_F_HW_VLAN_RX;
19ecae2c 2734 return err;
87b6cf51
DM
2735}
2736
7850f63f 2737static u32 get_rss_table_size(struct net_device *dev)
671b0060
DM
2738{
2739 const struct port_info *pi = netdev_priv(dev);
671b0060 2740
7850f63f
BH
2741 return pi->rss_size;
2742}
2743
2744static int get_rss_table(struct net_device *dev, u32 *p)
2745{
2746 const struct port_info *pi = netdev_priv(dev);
2747 unsigned int n = pi->rss_size;
2748
671b0060 2749 while (n--)
7850f63f 2750 p[n] = pi->rss[n];
671b0060
DM
2751 return 0;
2752}
2753
7850f63f 2754static int set_rss_table(struct net_device *dev, const u32 *p)
671b0060
DM
2755{
2756 unsigned int i;
2757 struct port_info *pi = netdev_priv(dev);
2758
7850f63f
BH
2759 for (i = 0; i < pi->rss_size; i++)
2760 pi->rss[i] = p[i];
671b0060
DM
2761 if (pi->adapter->flags & FULL_INIT_DONE)
2762 return write_rss(pi, pi->rss);
2763 return 0;
2764}
2765
2766static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
815c7db5 2767 u32 *rules)
671b0060 2768{
f796564a
DM
2769 const struct port_info *pi = netdev_priv(dev);
2770
671b0060 2771 switch (info->cmd) {
f796564a
DM
2772 case ETHTOOL_GRXFH: {
2773 unsigned int v = pi->rss_mode;
2774
2775 info->data = 0;
2776 switch (info->flow_type) {
2777 case TCP_V4_FLOW:
2778 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2779 info->data = RXH_IP_SRC | RXH_IP_DST |
2780 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2781 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2782 info->data = RXH_IP_SRC | RXH_IP_DST;
2783 break;
2784 case UDP_V4_FLOW:
2785 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2786 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2787 info->data = RXH_IP_SRC | RXH_IP_DST |
2788 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2789 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2790 info->data = RXH_IP_SRC | RXH_IP_DST;
2791 break;
2792 case SCTP_V4_FLOW:
2793 case AH_ESP_V4_FLOW:
2794 case IPV4_FLOW:
2795 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2796 info->data = RXH_IP_SRC | RXH_IP_DST;
2797 break;
2798 case TCP_V6_FLOW:
2799 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2800 info->data = RXH_IP_SRC | RXH_IP_DST |
2801 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2802 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2803 info->data = RXH_IP_SRC | RXH_IP_DST;
2804 break;
2805 case UDP_V6_FLOW:
2806 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2807 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2808 info->data = RXH_IP_SRC | RXH_IP_DST |
2809 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2810 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2811 info->data = RXH_IP_SRC | RXH_IP_DST;
2812 break;
2813 case SCTP_V6_FLOW:
2814 case AH_ESP_V6_FLOW:
2815 case IPV6_FLOW:
2816 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2817 info->data = RXH_IP_SRC | RXH_IP_DST;
2818 break;
2819 }
2820 return 0;
2821 }
671b0060 2822 case ETHTOOL_GRXRINGS:
f796564a 2823 info->data = pi->nqsets;
671b0060
DM
2824 return 0;
2825 }
2826 return -EOPNOTSUPP;
2827}
2828
9b07be4b 2829static const struct ethtool_ops cxgb_ethtool_ops = {
b8ff05a9
DM
2830 .get_settings = get_settings,
2831 .set_settings = set_settings,
2832 .get_drvinfo = get_drvinfo,
2833 .get_msglevel = get_msglevel,
2834 .set_msglevel = set_msglevel,
2835 .get_ringparam = get_sge_param,
2836 .set_ringparam = set_sge_param,
2837 .get_coalesce = get_coalesce,
2838 .set_coalesce = set_coalesce,
2839 .get_eeprom_len = get_eeprom_len,
2840 .get_eeprom = get_eeprom,
2841 .set_eeprom = set_eeprom,
2842 .get_pauseparam = get_pauseparam,
2843 .set_pauseparam = set_pauseparam,
b8ff05a9
DM
2844 .get_link = ethtool_op_get_link,
2845 .get_strings = get_strings,
c5e06360 2846 .set_phys_id = identify_port,
b8ff05a9
DM
2847 .nway_reset = restart_autoneg,
2848 .get_sset_count = get_sset_count,
2849 .get_ethtool_stats = get_stats,
2850 .get_regs_len = get_regs_len,
2851 .get_regs = get_regs,
2852 .get_wol = get_wol,
2853 .set_wol = set_wol,
671b0060 2854 .get_rxnfc = get_rxnfc,
7850f63f 2855 .get_rxfh_indir_size = get_rss_table_size,
671b0060
DM
2856 .get_rxfh_indir = get_rss_table,
2857 .set_rxfh_indir = set_rss_table,
b8ff05a9
DM
2858 .flash_device = set_flash,
2859};
2860
2861/*
2862 * debugfs support
2863 */
b8ff05a9
DM
2864static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2865 loff_t *ppos)
2866{
2867 loff_t pos = *ppos;
496ad9aa 2868 loff_t avail = file_inode(file)->i_size;
b8ff05a9
DM
2869 unsigned int mem = (uintptr_t)file->private_data & 3;
2870 struct adapter *adap = file->private_data - mem;
2871
2872 if (pos < 0)
2873 return -EINVAL;
2874 if (pos >= avail)
2875 return 0;
2876 if (count > avail - pos)
2877 count = avail - pos;
2878
2879 while (count) {
2880 size_t len;
2881 int ret, ofst;
2882 __be32 data[16];
2883
19dd37ba
SR
2884 if ((mem == MEM_MC) || (mem == MEM_MC1))
2885 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
b8ff05a9
DM
2886 else
2887 ret = t4_edc_read(adap, mem, pos, data, NULL);
2888 if (ret)
2889 return ret;
2890
2891 ofst = pos % sizeof(data);
2892 len = min(count, sizeof(data) - ofst);
2893 if (copy_to_user(buf, (u8 *)data + ofst, len))
2894 return -EFAULT;
2895
2896 buf += len;
2897 pos += len;
2898 count -= len;
2899 }
2900 count = pos - *ppos;
2901 *ppos = pos;
2902 return count;
2903}
2904
2905static const struct file_operations mem_debugfs_fops = {
2906 .owner = THIS_MODULE,
234e3405 2907 .open = simple_open,
b8ff05a9 2908 .read = mem_read,
6038f373 2909 .llseek = default_llseek,
b8ff05a9
DM
2910};
2911
91744948 2912static void add_debugfs_mem(struct adapter *adap, const char *name,
1dd06ae8 2913 unsigned int idx, unsigned int size_mb)
b8ff05a9
DM
2914{
2915 struct dentry *de;
2916
2917 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2918 (void *)adap + idx, &mem_debugfs_fops);
2919 if (de && de->d_inode)
2920 de->d_inode->i_size = size_mb << 20;
2921}
2922
91744948 2923static int setup_debugfs(struct adapter *adap)
b8ff05a9
DM
2924{
2925 int i;
19dd37ba 2926 u32 size;
b8ff05a9
DM
2927
2928 if (IS_ERR_OR_NULL(adap->debugfs_root))
2929 return -1;
2930
2931 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
19dd37ba
SR
2932 if (i & EDRAM0_ENABLE) {
2933 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2934 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2935 }
2936 if (i & EDRAM1_ENABLE) {
2937 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2938 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2939 }
2940 if (is_t4(adap->chip)) {
2941 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2942 if (i & EXT_MEM_ENABLE)
2943 add_debugfs_mem(adap, "mc", MEM_MC,
2944 EXT_MEM_SIZE_GET(size));
2945 } else {
2946 if (i & EXT_MEM_ENABLE) {
2947 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2948 add_debugfs_mem(adap, "mc0", MEM_MC0,
2949 EXT_MEM_SIZE_GET(size));
2950 }
2951 if (i & EXT_MEM1_ENABLE) {
2952 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2953 add_debugfs_mem(adap, "mc1", MEM_MC1,
2954 EXT_MEM_SIZE_GET(size));
2955 }
2956 }
b8ff05a9
DM
2957 if (adap->l2t)
2958 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2959 &t4_l2t_fops);
2960 return 0;
2961}
2962
2963/*
2964 * upper-layer driver support
2965 */
2966
2967/*
2968 * Allocate an active-open TID and set it to the supplied value.
2969 */
2970int cxgb4_alloc_atid(struct tid_info *t, void *data)
2971{
2972 int atid = -1;
2973
2974 spin_lock_bh(&t->atid_lock);
2975 if (t->afree) {
2976 union aopen_entry *p = t->afree;
2977
f2b7e78d 2978 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
2979 t->afree = p->next;
2980 p->data = data;
2981 t->atids_in_use++;
2982 }
2983 spin_unlock_bh(&t->atid_lock);
2984 return atid;
2985}
2986EXPORT_SYMBOL(cxgb4_alloc_atid);
2987
2988/*
2989 * Release an active-open TID.
2990 */
2991void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2992{
f2b7e78d 2993 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
2994
2995 spin_lock_bh(&t->atid_lock);
2996 p->next = t->afree;
2997 t->afree = p;
2998 t->atids_in_use--;
2999 spin_unlock_bh(&t->atid_lock);
3000}
3001EXPORT_SYMBOL(cxgb4_free_atid);
3002
3003/*
3004 * Allocate a server TID and set it to the supplied value.
3005 */
3006int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3007{
3008 int stid;
3009
3010 spin_lock_bh(&t->stid_lock);
3011 if (family == PF_INET) {
3012 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3013 if (stid < t->nstids)
3014 __set_bit(stid, t->stid_bmap);
3015 else
3016 stid = -1;
3017 } else {
3018 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3019 if (stid < 0)
3020 stid = -1;
3021 }
3022 if (stid >= 0) {
3023 t->stid_tab[stid].data = data;
3024 stid += t->stid_base;
3025 t->stids_in_use++;
3026 }
3027 spin_unlock_bh(&t->stid_lock);
3028 return stid;
3029}
3030EXPORT_SYMBOL(cxgb4_alloc_stid);
3031
dca4faeb
VP
3032/* Allocate a server filter TID and set it to the supplied value.
3033 */
3034int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3035{
3036 int stid;
3037
3038 spin_lock_bh(&t->stid_lock);
3039 if (family == PF_INET) {
3040 stid = find_next_zero_bit(t->stid_bmap,
3041 t->nstids + t->nsftids, t->nstids);
3042 if (stid < (t->nstids + t->nsftids))
3043 __set_bit(stid, t->stid_bmap);
3044 else
3045 stid = -1;
3046 } else {
3047 stid = -1;
3048 }
3049 if (stid >= 0) {
3050 t->stid_tab[stid].data = data;
3051 stid += t->stid_base;
3052 t->stids_in_use++;
3053 }
3054 spin_unlock_bh(&t->stid_lock);
3055 return stid;
3056}
3057EXPORT_SYMBOL(cxgb4_alloc_sftid);
3058
3059/* Release a server TID.
b8ff05a9
DM
3060 */
3061void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3062{
3063 stid -= t->stid_base;
3064 spin_lock_bh(&t->stid_lock);
3065 if (family == PF_INET)
3066 __clear_bit(stid, t->stid_bmap);
3067 else
3068 bitmap_release_region(t->stid_bmap, stid, 2);
3069 t->stid_tab[stid].data = NULL;
3070 t->stids_in_use--;
3071 spin_unlock_bh(&t->stid_lock);
3072}
3073EXPORT_SYMBOL(cxgb4_free_stid);
3074
3075/*
3076 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3077 */
3078static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3079 unsigned int tid)
3080{
3081 struct cpl_tid_release *req;
3082
3083 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3084 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3085 INIT_TP_WR(req, tid);
3086 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3087}
3088
3089/*
3090 * Queue a TID release request and if necessary schedule a work queue to
3091 * process it.
3092 */
31b9c19b 3093static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3094 unsigned int tid)
b8ff05a9
DM
3095{
3096 void **p = &t->tid_tab[tid];
3097 struct adapter *adap = container_of(t, struct adapter, tids);
3098
3099 spin_lock_bh(&adap->tid_release_lock);
3100 *p = adap->tid_release_head;
3101 /* Low 2 bits encode the Tx channel number */
3102 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3103 if (!adap->tid_release_task_busy) {
3104 adap->tid_release_task_busy = true;
3069ee9b 3105 queue_work(workq, &adap->tid_release_task);
b8ff05a9
DM
3106 }
3107 spin_unlock_bh(&adap->tid_release_lock);
3108}
b8ff05a9
DM
3109
3110/*
3111 * Process the list of pending TID release requests.
3112 */
3113static void process_tid_release_list(struct work_struct *work)
3114{
3115 struct sk_buff *skb;
3116 struct adapter *adap;
3117
3118 adap = container_of(work, struct adapter, tid_release_task);
3119
3120 spin_lock_bh(&adap->tid_release_lock);
3121 while (adap->tid_release_head) {
3122 void **p = adap->tid_release_head;
3123 unsigned int chan = (uintptr_t)p & 3;
3124 p = (void *)p - chan;
3125
3126 adap->tid_release_head = *p;
3127 *p = NULL;
3128 spin_unlock_bh(&adap->tid_release_lock);
3129
3130 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3131 GFP_KERNEL)))
3132 schedule_timeout_uninterruptible(1);
3133
3134 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3135 t4_ofld_send(adap, skb);
3136 spin_lock_bh(&adap->tid_release_lock);
3137 }
3138 adap->tid_release_task_busy = false;
3139 spin_unlock_bh(&adap->tid_release_lock);
3140}
3141
3142/*
3143 * Release a TID and inform HW. If we are unable to allocate the release
3144 * message we defer to a work queue.
3145 */
3146void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3147{
3148 void *old;
3149 struct sk_buff *skb;
3150 struct adapter *adap = container_of(t, struct adapter, tids);
3151
3152 old = t->tid_tab[tid];
3153 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3154 if (likely(skb)) {
3155 t->tid_tab[tid] = NULL;
3156 mk_tid_release(skb, chan, tid);
3157 t4_ofld_send(adap, skb);
3158 } else
3159 cxgb4_queue_tid_release(t, chan, tid);
3160 if (old)
3161 atomic_dec(&t->tids_in_use);
3162}
3163EXPORT_SYMBOL(cxgb4_remove_tid);
3164
3165/*
3166 * Allocate and initialize the TID tables. Returns 0 on success.
3167 */
3168static int tid_init(struct tid_info *t)
3169{
3170 size_t size;
f2b7e78d 3171 unsigned int stid_bmap_size;
b8ff05a9
DM
3172 unsigned int natids = t->natids;
3173
dca4faeb 3174 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
f2b7e78d
VP
3175 size = t->ntids * sizeof(*t->tid_tab) +
3176 natids * sizeof(*t->atid_tab) +
b8ff05a9 3177 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 3178 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 3179 stid_bmap_size * sizeof(long) +
dca4faeb
VP
3180 t->nftids * sizeof(*t->ftid_tab) +
3181 t->nsftids * sizeof(*t->ftid_tab);
f2b7e78d 3182
b8ff05a9
DM
3183 t->tid_tab = t4_alloc_mem(size);
3184 if (!t->tid_tab)
3185 return -ENOMEM;
3186
3187 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3188 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 3189 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
f2b7e78d 3190 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
b8ff05a9
DM
3191 spin_lock_init(&t->stid_lock);
3192 spin_lock_init(&t->atid_lock);
3193
3194 t->stids_in_use = 0;
3195 t->afree = NULL;
3196 t->atids_in_use = 0;
3197 atomic_set(&t->tids_in_use, 0);
3198
3199 /* Setup the free list for atid_tab and clear the stid bitmap. */
3200 if (natids) {
3201 while (--natids)
3202 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3203 t->afree = t->atid_tab;
3204 }
dca4faeb 3205 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
b8ff05a9
DM
3206 return 0;
3207}
3208
3209/**
3210 * cxgb4_create_server - create an IP server
3211 * @dev: the device
3212 * @stid: the server TID
3213 * @sip: local IP address to bind server to
3214 * @sport: the server's TCP port
3215 * @queue: queue to direct messages from this server to
3216 *
3217 * Create an IP server for the given port and address.
3218 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3219 */
3220int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
793dad94
VP
3221 __be32 sip, __be16 sport, __be16 vlan,
3222 unsigned int queue)
b8ff05a9
DM
3223{
3224 unsigned int chan;
3225 struct sk_buff *skb;
3226 struct adapter *adap;
3227 struct cpl_pass_open_req *req;
3228
3229 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3230 if (!skb)
3231 return -ENOMEM;
3232
3233 adap = netdev2adap(dev);
3234 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3235 INIT_TP_WR(req, 0);
3236 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3237 req->local_port = sport;
3238 req->peer_port = htons(0);
3239 req->local_ip = sip;
3240 req->peer_ip = htonl(0);
e46dab4d 3241 chan = rxq_to_chan(&adap->sge, queue);
b8ff05a9
DM
3242 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3243 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3244 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3245 return t4_mgmt_tx(adap, skb);
3246}
3247EXPORT_SYMBOL(cxgb4_create_server);
3248
b8ff05a9
DM
3249/**
3250 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3251 * @mtus: the HW MTU table
3252 * @mtu: the target MTU
3253 * @idx: index of selected entry in the MTU table
3254 *
3255 * Returns the index and the value in the HW MTU table that is closest to
3256 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3257 * table, in which case that smallest available value is selected.
3258 */
3259unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3260 unsigned int *idx)
3261{
3262 unsigned int i = 0;
3263
3264 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3265 ++i;
3266 if (idx)
3267 *idx = i;
3268 return mtus[i];
3269}
3270EXPORT_SYMBOL(cxgb4_best_mtu);
3271
3272/**
3273 * cxgb4_port_chan - get the HW channel of a port
3274 * @dev: the net device for the port
3275 *
3276 * Return the HW Tx channel of the given port.
3277 */
3278unsigned int cxgb4_port_chan(const struct net_device *dev)
3279{
3280 return netdev2pinfo(dev)->tx_chan;
3281}
3282EXPORT_SYMBOL(cxgb4_port_chan);
3283
881806bc
VP
3284unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3285{
3286 struct adapter *adap = netdev2adap(dev);
2cc301d2 3287 u32 v1, v2, lp_count, hp_count;
881806bc 3288
2cc301d2
SR
3289 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3290 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3291 if (is_t4(adap->chip)) {
3292 lp_count = G_LP_COUNT(v1);
3293 hp_count = G_HP_COUNT(v1);
3294 } else {
3295 lp_count = G_LP_COUNT_T5(v1);
3296 hp_count = G_HP_COUNT_T5(v2);
3297 }
3298 return lpfifo ? lp_count : hp_count;
881806bc
VP
3299}
3300EXPORT_SYMBOL(cxgb4_dbfifo_count);
3301
b8ff05a9
DM
3302/**
3303 * cxgb4_port_viid - get the VI id of a port
3304 * @dev: the net device for the port
3305 *
3306 * Return the VI id of the given port.
3307 */
3308unsigned int cxgb4_port_viid(const struct net_device *dev)
3309{
3310 return netdev2pinfo(dev)->viid;
3311}
3312EXPORT_SYMBOL(cxgb4_port_viid);
3313
3314/**
3315 * cxgb4_port_idx - get the index of a port
3316 * @dev: the net device for the port
3317 *
3318 * Return the index of the given port.
3319 */
3320unsigned int cxgb4_port_idx(const struct net_device *dev)
3321{
3322 return netdev2pinfo(dev)->port_id;
3323}
3324EXPORT_SYMBOL(cxgb4_port_idx);
3325
b8ff05a9
DM
3326void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3327 struct tp_tcp_stats *v6)
3328{
3329 struct adapter *adap = pci_get_drvdata(pdev);
3330
3331 spin_lock(&adap->stats_lock);
3332 t4_tp_get_tcp_stats(adap, v4, v6);
3333 spin_unlock(&adap->stats_lock);
3334}
3335EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3336
3337void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3338 const unsigned int *pgsz_order)
3339{
3340 struct adapter *adap = netdev2adap(dev);
3341
3342 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3343 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3344 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3345 HPZ3(pgsz_order[3]));
3346}
3347EXPORT_SYMBOL(cxgb4_iscsi_init);
3348
3069ee9b
VP
3349int cxgb4_flush_eq_cache(struct net_device *dev)
3350{
3351 struct adapter *adap = netdev2adap(dev);
3352 int ret;
3353
3354 ret = t4_fwaddrspace_write(adap, adap->mbox,
3355 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3356 return ret;
3357}
3358EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3359
3360static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3361{
3362 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3363 __be64 indices;
3364 int ret;
3365
3366 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3367 if (!ret) {
404d9e3f
VP
3368 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3369 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
3370 }
3371 return ret;
3372}
3373
3374int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3375 u16 size)
3376{
3377 struct adapter *adap = netdev2adap(dev);
3378 u16 hw_pidx, hw_cidx;
3379 int ret;
3380
3381 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3382 if (ret)
3383 goto out;
3384
3385 if (pidx != hw_pidx) {
3386 u16 delta;
3387
3388 if (pidx >= hw_pidx)
3389 delta = pidx - hw_pidx;
3390 else
3391 delta = size - hw_pidx + pidx;
3392 wmb();
840f3000
VP
3393 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3394 QID(qid) | PIDX(delta));
3069ee9b
VP
3395 }
3396out:
3397 return ret;
3398}
3399EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3400
b8ff05a9
DM
3401static struct pci_driver cxgb4_driver;
3402
3403static void check_neigh_update(struct neighbour *neigh)
3404{
3405 const struct device *parent;
3406 const struct net_device *netdev = neigh->dev;
3407
3408 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3409 netdev = vlan_dev_real_dev(netdev);
3410 parent = netdev->dev.parent;
3411 if (parent && parent->driver == &cxgb4_driver.driver)
3412 t4_l2t_update(dev_get_drvdata(parent), neigh);
3413}
3414
3415static int netevent_cb(struct notifier_block *nb, unsigned long event,
3416 void *data)
3417{
3418 switch (event) {
3419 case NETEVENT_NEIGH_UPDATE:
3420 check_neigh_update(data);
3421 break;
b8ff05a9
DM
3422 case NETEVENT_REDIRECT:
3423 default:
3424 break;
3425 }
3426 return 0;
3427}
3428
3429static bool netevent_registered;
3430static struct notifier_block cxgb4_netevent_nb = {
3431 .notifier_call = netevent_cb
3432};
3433
3069ee9b
VP
3434static void drain_db_fifo(struct adapter *adap, int usecs)
3435{
2cc301d2 3436 u32 v1, v2, lp_count, hp_count;
3069ee9b
VP
3437
3438 do {
2cc301d2
SR
3439 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3440 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3441 if (is_t4(adap->chip)) {
3442 lp_count = G_LP_COUNT(v1);
3443 hp_count = G_HP_COUNT(v1);
3444 } else {
3445 lp_count = G_LP_COUNT_T5(v1);
3446 hp_count = G_HP_COUNT_T5(v2);
3447 }
3448
3449 if (lp_count == 0 && hp_count == 0)
3450 break;
3069ee9b
VP
3451 set_current_state(TASK_UNINTERRUPTIBLE);
3452 schedule_timeout(usecs_to_jiffies(usecs));
3069ee9b
VP
3453 } while (1);
3454}
3455
3456static void disable_txq_db(struct sge_txq *q)
3457{
3458 spin_lock_irq(&q->db_lock);
3459 q->db_disabled = 1;
3460 spin_unlock_irq(&q->db_lock);
3461}
3462
3463static void enable_txq_db(struct sge_txq *q)
3464{
3465 spin_lock_irq(&q->db_lock);
3466 q->db_disabled = 0;
3467 spin_unlock_irq(&q->db_lock);
3468}
3469
3470static void disable_dbs(struct adapter *adap)
3471{
3472 int i;
3473
3474 for_each_ethrxq(&adap->sge, i)
3475 disable_txq_db(&adap->sge.ethtxq[i].q);
3476 for_each_ofldrxq(&adap->sge, i)
3477 disable_txq_db(&adap->sge.ofldtxq[i].q);
3478 for_each_port(adap, i)
3479 disable_txq_db(&adap->sge.ctrlq[i].q);
3480}
3481
3482static void enable_dbs(struct adapter *adap)
3483{
3484 int i;
3485
3486 for_each_ethrxq(&adap->sge, i)
3487 enable_txq_db(&adap->sge.ethtxq[i].q);
3488 for_each_ofldrxq(&adap->sge, i)
3489 enable_txq_db(&adap->sge.ofldtxq[i].q);
3490 for_each_port(adap, i)
3491 enable_txq_db(&adap->sge.ctrlq[i].q);
3492}
3493
3494static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3495{
3496 u16 hw_pidx, hw_cidx;
3497 int ret;
3498
3499 spin_lock_bh(&q->db_lock);
3500 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3501 if (ret)
3502 goto out;
3503 if (q->db_pidx != hw_pidx) {
3504 u16 delta;
3505
3506 if (q->db_pidx >= hw_pidx)
3507 delta = q->db_pidx - hw_pidx;
3508 else
3509 delta = q->size - hw_pidx + q->db_pidx;
3510 wmb();
840f3000
VP
3511 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3512 QID(q->cntxt_id) | PIDX(delta));
3069ee9b
VP
3513 }
3514out:
3515 q->db_disabled = 0;
3516 spin_unlock_bh(&q->db_lock);
3517 if (ret)
3518 CH_WARN(adap, "DB drop recovery failed.\n");
3519}
3520static void recover_all_queues(struct adapter *adap)
3521{
3522 int i;
3523
3524 for_each_ethrxq(&adap->sge, i)
3525 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3526 for_each_ofldrxq(&adap->sge, i)
3527 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3528 for_each_port(adap, i)
3529 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3530}
3531
881806bc
VP
3532static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3533{
3534 mutex_lock(&uld_mutex);
3535 if (adap->uld_handle[CXGB4_ULD_RDMA])
3536 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3537 cmd);
3538 mutex_unlock(&uld_mutex);
3539}
3540
3541static void process_db_full(struct work_struct *work)
3542{
3543 struct adapter *adap;
881806bc
VP
3544
3545 adap = container_of(work, struct adapter, db_full_task);
3546
881806bc 3547 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3069ee9b 3548 drain_db_fifo(adap, dbfifo_drain_delay);
840f3000
VP
3549 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3550 DBFIFO_HP_INT | DBFIFO_LP_INT,
3551 DBFIFO_HP_INT | DBFIFO_LP_INT);
881806bc 3552 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
881806bc
VP
3553}
3554
3555static void process_db_drop(struct work_struct *work)
3556{
3557 struct adapter *adap;
881806bc 3558
3069ee9b 3559 adap = container_of(work, struct adapter, db_drop_task);
881806bc 3560
2cc301d2
SR
3561 if (is_t4(adap->chip)) {
3562 disable_dbs(adap);
3563 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3564 drain_db_fifo(adap, 1);
3565 recover_all_queues(adap);
3566 enable_dbs(adap);
3567 } else {
3568 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3569 u16 qid = (dropped_db >> 15) & 0x1ffff;
3570 u16 pidx_inc = dropped_db & 0x1fff;
3571 unsigned int s_qpp;
3572 unsigned short udb_density;
3573 unsigned long qpshift;
3574 int page;
3575 u32 udb;
3576
3577 dev_warn(adap->pdev_dev,
3578 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3579 dropped_db, qid,
3580 (dropped_db >> 14) & 1,
3581 (dropped_db >> 13) & 1,
3582 pidx_inc);
3583
3584 drain_db_fifo(adap, 1);
3585
3586 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3587 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3588 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3589 qpshift = PAGE_SHIFT - ilog2(udb_density);
3590 udb = qid << qpshift;
3591 udb &= PAGE_MASK;
3592 page = udb / PAGE_SIZE;
3593 udb += (qid - (page * udb_density)) * 128;
3594
3595 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3596
3597 /* Re-enable BAR2 WC */
3598 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3599 }
3600
3069ee9b 3601 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
881806bc
VP
3602}
3603
3604void t4_db_full(struct adapter *adap)
3605{
2cc301d2
SR
3606 if (is_t4(adap->chip)) {
3607 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3608 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3609 queue_work(workq, &adap->db_full_task);
3610 }
881806bc
VP
3611}
3612
3613void t4_db_dropped(struct adapter *adap)
3614{
2cc301d2
SR
3615 if (is_t4(adap->chip))
3616 queue_work(workq, &adap->db_drop_task);
881806bc
VP
3617}
3618
b8ff05a9
DM
3619static void uld_attach(struct adapter *adap, unsigned int uld)
3620{
3621 void *handle;
3622 struct cxgb4_lld_info lli;
dca4faeb 3623 unsigned short i;
b8ff05a9
DM
3624
3625 lli.pdev = adap->pdev;
3626 lli.l2t = adap->l2t;
3627 lli.tids = &adap->tids;
3628 lli.ports = adap->port;
3629 lli.vr = &adap->vres;
3630 lli.mtus = adap->params.mtus;
3631 if (uld == CXGB4_ULD_RDMA) {
3632 lli.rxq_ids = adap->sge.rdma_rxq;
3633 lli.nrxq = adap->sge.rdmaqs;
3634 } else if (uld == CXGB4_ULD_ISCSI) {
3635 lli.rxq_ids = adap->sge.ofld_rxq;
3636 lli.nrxq = adap->sge.ofldqsets;
3637 }
3638 lli.ntxq = adap->sge.ofldqsets;
3639 lli.nchan = adap->params.nports;
3640 lli.nports = adap->params.nports;
3641 lli.wr_cred = adap->params.ofldq_wr_cred;
3642 lli.adapter_type = adap->params.rev;
3643 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3644 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3645 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3646 (adap->fn * 4));
b8ff05a9 3647 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3648 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3649 (adap->fn * 4));
793dad94 3650 lli.filt_mode = adap->filter_mode;
dca4faeb
VP
3651 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3652 for (i = 0; i < NCHAN; i++)
3653 lli.tx_modq[i] = i;
b8ff05a9
DM
3654 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3655 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3656 lli.fw_vers = adap->params.fw_vers;
3069ee9b 3657 lli.dbfifo_int_thresh = dbfifo_int_thresh;
dca4faeb
VP
3658 lli.sge_pktshift = adap->sge.pktshift;
3659 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
b8ff05a9
DM
3660
3661 handle = ulds[uld].add(&lli);
3662 if (IS_ERR(handle)) {
3663 dev_warn(adap->pdev_dev,
3664 "could not attach to the %s driver, error %ld\n",
3665 uld_str[uld], PTR_ERR(handle));
3666 return;
3667 }
3668
3669 adap->uld_handle[uld] = handle;
3670
3671 if (!netevent_registered) {
3672 register_netevent_notifier(&cxgb4_netevent_nb);
3673 netevent_registered = true;
3674 }
e29f5dbc
DM
3675
3676 if (adap->flags & FULL_INIT_DONE)
3677 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
3678}
3679
3680static void attach_ulds(struct adapter *adap)
3681{
3682 unsigned int i;
3683
3684 mutex_lock(&uld_mutex);
3685 list_add_tail(&adap->list_node, &adapter_list);
3686 for (i = 0; i < CXGB4_ULD_MAX; i++)
3687 if (ulds[i].add)
3688 uld_attach(adap, i);
3689 mutex_unlock(&uld_mutex);
3690}
3691
3692static void detach_ulds(struct adapter *adap)
3693{
3694 unsigned int i;
3695
3696 mutex_lock(&uld_mutex);
3697 list_del(&adap->list_node);
3698 for (i = 0; i < CXGB4_ULD_MAX; i++)
3699 if (adap->uld_handle[i]) {
3700 ulds[i].state_change(adap->uld_handle[i],
3701 CXGB4_STATE_DETACH);
3702 adap->uld_handle[i] = NULL;
3703 }
3704 if (netevent_registered && list_empty(&adapter_list)) {
3705 unregister_netevent_notifier(&cxgb4_netevent_nb);
3706 netevent_registered = false;
3707 }
3708 mutex_unlock(&uld_mutex);
3709}
3710
3711static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3712{
3713 unsigned int i;
3714
3715 mutex_lock(&uld_mutex);
3716 for (i = 0; i < CXGB4_ULD_MAX; i++)
3717 if (adap->uld_handle[i])
3718 ulds[i].state_change(adap->uld_handle[i], new_state);
3719 mutex_unlock(&uld_mutex);
3720}
3721
3722/**
3723 * cxgb4_register_uld - register an upper-layer driver
3724 * @type: the ULD type
3725 * @p: the ULD methods
3726 *
3727 * Registers an upper-layer driver with this driver and notifies the ULD
3728 * about any presently available devices that support its type. Returns
3729 * %-EBUSY if a ULD of the same type is already registered.
3730 */
3731int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3732{
3733 int ret = 0;
3734 struct adapter *adap;
3735
3736 if (type >= CXGB4_ULD_MAX)
3737 return -EINVAL;
3738 mutex_lock(&uld_mutex);
3739 if (ulds[type].add) {
3740 ret = -EBUSY;
3741 goto out;
3742 }
3743 ulds[type] = *p;
3744 list_for_each_entry(adap, &adapter_list, list_node)
3745 uld_attach(adap, type);
3746out: mutex_unlock(&uld_mutex);
3747 return ret;
3748}
3749EXPORT_SYMBOL(cxgb4_register_uld);
3750
3751/**
3752 * cxgb4_unregister_uld - unregister an upper-layer driver
3753 * @type: the ULD type
3754 *
3755 * Unregisters an existing upper-layer driver.
3756 */
3757int cxgb4_unregister_uld(enum cxgb4_uld type)
3758{
3759 struct adapter *adap;
3760
3761 if (type >= CXGB4_ULD_MAX)
3762 return -EINVAL;
3763 mutex_lock(&uld_mutex);
3764 list_for_each_entry(adap, &adapter_list, list_node)
3765 adap->uld_handle[type] = NULL;
3766 ulds[type].add = NULL;
3767 mutex_unlock(&uld_mutex);
3768 return 0;
3769}
3770EXPORT_SYMBOL(cxgb4_unregister_uld);
3771
3772/**
3773 * cxgb_up - enable the adapter
3774 * @adap: adapter being enabled
3775 *
3776 * Called when the first port is enabled, this function performs the
3777 * actions necessary to make an adapter operational, such as completing
3778 * the initialization of HW modules, and enabling interrupts.
3779 *
3780 * Must be called with the rtnl lock held.
3781 */
3782static int cxgb_up(struct adapter *adap)
3783{
aaefae9b 3784 int err;
b8ff05a9 3785
aaefae9b
DM
3786 err = setup_sge_queues(adap);
3787 if (err)
3788 goto out;
3789 err = setup_rss(adap);
3790 if (err)
3791 goto freeq;
b8ff05a9
DM
3792
3793 if (adap->flags & USING_MSIX) {
aaefae9b 3794 name_msix_vecs(adap);
b8ff05a9
DM
3795 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
3796 adap->msix_info[0].desc, adap);
3797 if (err)
3798 goto irq_err;
3799
3800 err = request_msix_queue_irqs(adap);
3801 if (err) {
3802 free_irq(adap->msix_info[0].vec, adap);
3803 goto irq_err;
3804 }
3805 } else {
3806 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
3807 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 3808 adap->port[0]->name, adap);
b8ff05a9
DM
3809 if (err)
3810 goto irq_err;
3811 }
3812 enable_rx(adap);
3813 t4_sge_start(adap);
3814 t4_intr_enable(adap);
aaefae9b 3815 adap->flags |= FULL_INIT_DONE;
b8ff05a9
DM
3816 notify_ulds(adap, CXGB4_STATE_UP);
3817 out:
3818 return err;
3819 irq_err:
3820 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
3821 freeq:
3822 t4_free_sge_resources(adap);
b8ff05a9
DM
3823 goto out;
3824}
3825
3826static void cxgb_down(struct adapter *adapter)
3827{
3828 t4_intr_disable(adapter);
3829 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
3830 cancel_work_sync(&adapter->db_full_task);
3831 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 3832 adapter->tid_release_task_busy = false;
204dc3c0 3833 adapter->tid_release_head = NULL;
b8ff05a9
DM
3834
3835 if (adapter->flags & USING_MSIX) {
3836 free_msix_queue_irqs(adapter);
3837 free_irq(adapter->msix_info[0].vec, adapter);
3838 } else
3839 free_irq(adapter->pdev->irq, adapter);
3840 quiesce_rx(adapter);
aaefae9b
DM
3841 t4_sge_stop(adapter);
3842 t4_free_sge_resources(adapter);
3843 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
3844}
3845
3846/*
3847 * net_device operations
3848 */
3849static int cxgb_open(struct net_device *dev)
3850{
3851 int err;
3852 struct port_info *pi = netdev_priv(dev);
3853 struct adapter *adapter = pi->adapter;
3854
6a3c869a
DM
3855 netif_carrier_off(dev);
3856
aaefae9b
DM
3857 if (!(adapter->flags & FULL_INIT_DONE)) {
3858 err = cxgb_up(adapter);
3859 if (err < 0)
3860 return err;
3861 }
b8ff05a9 3862
f68707b8
DM
3863 err = link_start(dev);
3864 if (!err)
3865 netif_tx_start_all_queues(dev);
3866 return err;
b8ff05a9
DM
3867}
3868
3869static int cxgb_close(struct net_device *dev)
3870{
b8ff05a9
DM
3871 struct port_info *pi = netdev_priv(dev);
3872 struct adapter *adapter = pi->adapter;
3873
3874 netif_tx_stop_all_queues(dev);
3875 netif_carrier_off(dev);
060e0c75 3876 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
b8ff05a9
DM
3877}
3878
f2b7e78d
VP
3879/* Return an error number if the indicated filter isn't writable ...
3880 */
3881static int writable_filter(struct filter_entry *f)
3882{
3883 if (f->locked)
3884 return -EPERM;
3885 if (f->pending)
3886 return -EBUSY;
3887
3888 return 0;
3889}
3890
3891/* Delete the filter at the specified index (if valid). The checks for all
3892 * the common problems with doing this like the filter being locked, currently
3893 * pending in another operation, etc.
3894 */
3895static int delete_filter(struct adapter *adapter, unsigned int fidx)
3896{
3897 struct filter_entry *f;
3898 int ret;
3899
dca4faeb 3900 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
f2b7e78d
VP
3901 return -EINVAL;
3902
3903 f = &adapter->tids.ftid_tab[fidx];
3904 ret = writable_filter(f);
3905 if (ret)
3906 return ret;
3907 if (f->valid)
3908 return del_filter_wr(adapter, fidx);
3909
3910 return 0;
3911}
3912
dca4faeb 3913int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
793dad94
VP
3914 __be32 sip, __be16 sport, __be16 vlan,
3915 unsigned int queue, unsigned char port, unsigned char mask)
dca4faeb
VP
3916{
3917 int ret;
3918 struct filter_entry *f;
3919 struct adapter *adap;
3920 int i;
3921 u8 *val;
3922
3923 adap = netdev2adap(dev);
3924
1cab775c
VP
3925 /* Adjust stid to correct filter index */
3926 stid -= adap->tids.nstids;
3927 stid += adap->tids.nftids;
3928
dca4faeb
VP
3929 /* Check to make sure the filter requested is writable ...
3930 */
3931 f = &adap->tids.ftid_tab[stid];
3932 ret = writable_filter(f);
3933 if (ret)
3934 return ret;
3935
3936 /* Clear out any old resources being used by the filter before
3937 * we start constructing the new filter.
3938 */
3939 if (f->valid)
3940 clear_filter(adap, f);
3941
3942 /* Clear out filter specifications */
3943 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
3944 f->fs.val.lport = cpu_to_be16(sport);
3945 f->fs.mask.lport = ~0;
3946 val = (u8 *)&sip;
793dad94 3947 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
dca4faeb
VP
3948 for (i = 0; i < 4; i++) {
3949 f->fs.val.lip[i] = val[i];
3950 f->fs.mask.lip[i] = ~0;
3951 }
793dad94
VP
3952 if (adap->filter_mode & F_PORT) {
3953 f->fs.val.iport = port;
3954 f->fs.mask.iport = mask;
3955 }
3956 }
dca4faeb
VP
3957
3958 f->fs.dirsteer = 1;
3959 f->fs.iq = queue;
3960 /* Mark filter as locked */
3961 f->locked = 1;
3962 f->fs.rpttid = 1;
3963
3964 ret = set_filter_wr(adap, stid);
3965 if (ret) {
3966 clear_filter(adap, f);
3967 return ret;
3968 }
3969
3970 return 0;
3971}
3972EXPORT_SYMBOL(cxgb4_create_server_filter);
3973
3974int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
3975 unsigned int queue, bool ipv6)
3976{
3977 int ret;
3978 struct filter_entry *f;
3979 struct adapter *adap;
3980
3981 adap = netdev2adap(dev);
1cab775c
VP
3982
3983 /* Adjust stid to correct filter index */
3984 stid -= adap->tids.nstids;
3985 stid += adap->tids.nftids;
3986
dca4faeb
VP
3987 f = &adap->tids.ftid_tab[stid];
3988 /* Unlock the filter */
3989 f->locked = 0;
3990
3991 ret = delete_filter(adap, stid);
3992 if (ret)
3993 return ret;
3994
3995 return 0;
3996}
3997EXPORT_SYMBOL(cxgb4_remove_server_filter);
3998
f5152c90
DM
3999static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4000 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
4001{
4002 struct port_stats stats;
4003 struct port_info *p = netdev_priv(dev);
4004 struct adapter *adapter = p->adapter;
b8ff05a9
DM
4005
4006 spin_lock(&adapter->stats_lock);
4007 t4_get_port_stats(adapter, p->tx_chan, &stats);
4008 spin_unlock(&adapter->stats_lock);
4009
4010 ns->tx_bytes = stats.tx_octets;
4011 ns->tx_packets = stats.tx_frames;
4012 ns->rx_bytes = stats.rx_octets;
4013 ns->rx_packets = stats.rx_frames;
4014 ns->multicast = stats.rx_mcast_frames;
4015
4016 /* detailed rx_errors */
4017 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4018 stats.rx_runt;
4019 ns->rx_over_errors = 0;
4020 ns->rx_crc_errors = stats.rx_fcs_err;
4021 ns->rx_frame_errors = stats.rx_symbol_err;
4022 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4023 stats.rx_ovflow2 + stats.rx_ovflow3 +
4024 stats.rx_trunc0 + stats.rx_trunc1 +
4025 stats.rx_trunc2 + stats.rx_trunc3;
4026 ns->rx_missed_errors = 0;
4027
4028 /* detailed tx_errors */
4029 ns->tx_aborted_errors = 0;
4030 ns->tx_carrier_errors = 0;
4031 ns->tx_fifo_errors = 0;
4032 ns->tx_heartbeat_errors = 0;
4033 ns->tx_window_errors = 0;
4034
4035 ns->tx_errors = stats.tx_error_frames;
4036 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4037 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4038 return ns;
4039}
4040
4041static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4042{
060e0c75 4043 unsigned int mbox;
b8ff05a9
DM
4044 int ret = 0, prtad, devad;
4045 struct port_info *pi = netdev_priv(dev);
4046 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4047
4048 switch (cmd) {
4049 case SIOCGMIIPHY:
4050 if (pi->mdio_addr < 0)
4051 return -EOPNOTSUPP;
4052 data->phy_id = pi->mdio_addr;
4053 break;
4054 case SIOCGMIIREG:
4055 case SIOCSMIIREG:
4056 if (mdio_phy_id_is_c45(data->phy_id)) {
4057 prtad = mdio_phy_id_prtad(data->phy_id);
4058 devad = mdio_phy_id_devad(data->phy_id);
4059 } else if (data->phy_id < 32) {
4060 prtad = data->phy_id;
4061 devad = 0;
4062 data->reg_num &= 0x1f;
4063 } else
4064 return -EINVAL;
4065
060e0c75 4066 mbox = pi->adapter->fn;
b8ff05a9 4067 if (cmd == SIOCGMIIREG)
060e0c75 4068 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4069 data->reg_num, &data->val_out);
4070 else
060e0c75 4071 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4072 data->reg_num, data->val_in);
4073 break;
4074 default:
4075 return -EOPNOTSUPP;
4076 }
4077 return ret;
4078}
4079
4080static void cxgb_set_rxmode(struct net_device *dev)
4081{
4082 /* unfortunately we can't return errors to the stack */
4083 set_rxmode(dev, -1, false);
4084}
4085
4086static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4087{
4088 int ret;
4089 struct port_info *pi = netdev_priv(dev);
4090
4091 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4092 return -EINVAL;
060e0c75
DM
4093 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4094 -1, -1, -1, true);
b8ff05a9
DM
4095 if (!ret)
4096 dev->mtu = new_mtu;
4097 return ret;
4098}
4099
4100static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4101{
4102 int ret;
4103 struct sockaddr *addr = p;
4104 struct port_info *pi = netdev_priv(dev);
4105
4106 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 4107 return -EADDRNOTAVAIL;
b8ff05a9 4108
060e0c75
DM
4109 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4110 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
4111 if (ret < 0)
4112 return ret;
4113
4114 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4115 pi->xact_addr_filt = ret;
4116 return 0;
4117}
4118
b8ff05a9
DM
4119#ifdef CONFIG_NET_POLL_CONTROLLER
4120static void cxgb_netpoll(struct net_device *dev)
4121{
4122 struct port_info *pi = netdev_priv(dev);
4123 struct adapter *adap = pi->adapter;
4124
4125 if (adap->flags & USING_MSIX) {
4126 int i;
4127 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4128
4129 for (i = pi->nqsets; i; i--, rx++)
4130 t4_sge_intr_msix(0, &rx->rspq);
4131 } else
4132 t4_intr_handler(adap)(0, adap);
4133}
4134#endif
4135
4136static const struct net_device_ops cxgb4_netdev_ops = {
4137 .ndo_open = cxgb_open,
4138 .ndo_stop = cxgb_close,
4139 .ndo_start_xmit = t4_eth_xmit,
9be793bf 4140 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
4141 .ndo_set_rx_mode = cxgb_set_rxmode,
4142 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 4143 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
4144 .ndo_validate_addr = eth_validate_addr,
4145 .ndo_do_ioctl = cxgb_ioctl,
4146 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
4147#ifdef CONFIG_NET_POLL_CONTROLLER
4148 .ndo_poll_controller = cxgb_netpoll,
4149#endif
4150};
4151
4152void t4_fatal_err(struct adapter *adap)
4153{
4154 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4155 t4_intr_disable(adap);
4156 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4157}
4158
4159static void setup_memwin(struct adapter *adap)
4160{
19dd37ba 4161 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
b8ff05a9
DM
4162
4163 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
19dd37ba
SR
4164 if (is_t4(adap->chip)) {
4165 mem_win0_base = bar0 + MEMWIN0_BASE;
4166 mem_win1_base = bar0 + MEMWIN1_BASE;
4167 mem_win2_base = bar0 + MEMWIN2_BASE;
4168 } else {
4169 /* For T5, only relative offset inside the PCIe BAR is passed */
4170 mem_win0_base = MEMWIN0_BASE;
4171 mem_win1_base = MEMWIN1_BASE_T5;
4172 mem_win2_base = MEMWIN2_BASE_T5;
4173 }
b8ff05a9 4174 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
19dd37ba 4175 mem_win0_base | BIR(0) |
b8ff05a9
DM
4176 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4177 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
19dd37ba 4178 mem_win1_base | BIR(0) |
b8ff05a9
DM
4179 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4180 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
19dd37ba 4181 mem_win2_base | BIR(0) |
b8ff05a9 4182 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
636f9d37
VP
4183}
4184
4185static void setup_memwin_rdma(struct adapter *adap)
4186{
1ae970e0
DM
4187 if (adap->vres.ocq.size) {
4188 unsigned int start, sz_kb;
4189
4190 start = pci_resource_start(adap->pdev, 2) +
4191 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4192 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4193 t4_write_reg(adap,
4194 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4195 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4196 t4_write_reg(adap,
4197 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4198 adap->vres.ocq.start);
4199 t4_read_reg(adap,
4200 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4201 }
b8ff05a9
DM
4202}
4203
02b5fb8e
DM
4204static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4205{
4206 u32 v;
4207 int ret;
4208
4209 /* get device capabilities */
4210 memset(c, 0, sizeof(*c));
4211 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4212 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4213 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
060e0c75 4214 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
02b5fb8e
DM
4215 if (ret < 0)
4216 return ret;
4217
4218 /* select capabilities we'll be using */
4219 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4220 if (!vf_acls)
4221 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4222 else
4223 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4224 } else if (vf_acls) {
4225 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4226 return ret;
4227 }
4228 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4229 FW_CMD_REQUEST | FW_CMD_WRITE);
060e0c75 4230 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
02b5fb8e
DM
4231 if (ret < 0)
4232 return ret;
4233
060e0c75 4234 ret = t4_config_glbl_rss(adap, adap->fn,
02b5fb8e
DM
4235 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4236 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4237 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4238 if (ret < 0)
4239 return ret;
4240
060e0c75
DM
4241 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4242 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
02b5fb8e
DM
4243 if (ret < 0)
4244 return ret;
4245
4246 t4_sge_init(adap);
4247
02b5fb8e
DM
4248 /* tweak some settings */
4249 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4250 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4251 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4252 v = t4_read_reg(adap, TP_PIO_DATA);
4253 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
060e0c75 4254
dca4faeb
VP
4255 /* first 4 Tx modulation queues point to consecutive Tx channels */
4256 adap->params.tp.tx_modq_map = 0xE4;
4257 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4258 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4259
4260 /* associate each Tx modulation queue with consecutive Tx channels */
4261 v = 0x84218421;
4262 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4263 &v, 1, A_TP_TX_SCHED_HDR);
4264 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4265 &v, 1, A_TP_TX_SCHED_FIFO);
4266 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4267 &v, 1, A_TP_TX_SCHED_PCMD);
4268
4269#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4270 if (is_offload(adap)) {
4271 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4272 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4273 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4274 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4275 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4276 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4277 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4278 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4279 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4280 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4281 }
4282
060e0c75
DM
4283 /* get basic stuff going */
4284 return t4_early_init(adap, adap->fn);
02b5fb8e
DM
4285}
4286
b8ff05a9
DM
4287/*
4288 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4289 */
4290#define MAX_ATIDS 8192U
4291
636f9d37
VP
4292/*
4293 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4294 *
4295 * If the firmware we're dealing with has Configuration File support, then
4296 * we use that to perform all configuration
4297 */
4298
4299/*
4300 * Tweak configuration based on module parameters, etc. Most of these have
4301 * defaults assigned to them by Firmware Configuration Files (if we're using
4302 * them) but need to be explicitly set if we're using hard-coded
4303 * initialization. But even in the case of using Firmware Configuration
4304 * Files, we'd like to expose the ability to change these via module
4305 * parameters so these are essentially common tweaks/settings for
4306 * Configuration Files and hard-coded initialization ...
4307 */
4308static int adap_init0_tweaks(struct adapter *adapter)
4309{
4310 /*
4311 * Fix up various Host-Dependent Parameters like Page Size, Cache
4312 * Line Size, etc. The firmware default is for a 4KB Page Size and
4313 * 64B Cache Line Size ...
4314 */
4315 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4316
4317 /*
4318 * Process module parameters which affect early initialization.
4319 */
4320 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4321 dev_err(&adapter->pdev->dev,
4322 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4323 rx_dma_offset);
4324 rx_dma_offset = 2;
4325 }
4326 t4_set_reg_field(adapter, SGE_CONTROL,
4327 PKTSHIFT_MASK,
4328 PKTSHIFT(rx_dma_offset));
4329
4330 /*
4331 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4332 * adds the pseudo header itself.
4333 */
4334 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4335 CSUM_HAS_PSEUDO_HDR, 0);
4336
4337 return 0;
4338}
4339
4340/*
4341 * Attempt to initialize the adapter via a Firmware Configuration File.
4342 */
4343static int adap_init0_config(struct adapter *adapter, int reset)
4344{
4345 struct fw_caps_config_cmd caps_cmd;
4346 const struct firmware *cf;
4347 unsigned long mtype = 0, maddr = 0;
4348 u32 finiver, finicsum, cfcsum;
4349 int ret, using_flash;
0a57a536 4350 char *fw_config_file, fw_config_file_path[256];
636f9d37
VP
4351
4352 /*
4353 * Reset device if necessary.
4354 */
4355 if (reset) {
4356 ret = t4_fw_reset(adapter, adapter->mbox,
4357 PIORSTMODE | PIORST);
4358 if (ret < 0)
4359 goto bye;
4360 }
4361
4362 /*
4363 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4364 * then use that. Otherwise, use the configuration file stored
4365 * in the adapter flash ...
4366 */
0a57a536
SR
4367 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
4368 case CHELSIO_T4:
4369 fw_config_file = FW_CFNAME;
4370 break;
4371 case CHELSIO_T5:
4372 fw_config_file = FW5_CFNAME;
4373 break;
4374 default:
4375 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4376 adapter->pdev->device);
4377 ret = -EINVAL;
4378 goto bye;
4379 }
4380
4381 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
636f9d37
VP
4382 if (ret < 0) {
4383 using_flash = 1;
4384 mtype = FW_MEMTYPE_CF_FLASH;
4385 maddr = t4_flash_cfg_addr(adapter);
4386 } else {
4387 u32 params[7], val[7];
4388
4389 using_flash = 0;
4390 if (cf->size >= FLASH_CFG_MAX_SIZE)
4391 ret = -ENOMEM;
4392 else {
4393 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4394 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4395 ret = t4_query_params(adapter, adapter->mbox,
4396 adapter->fn, 0, 1, params, val);
4397 if (ret == 0) {
4398 /*
4399 * For t4_memory_write() below addresses and
4400 * sizes have to be in terms of multiples of 4
4401 * bytes. So, if the Configuration File isn't
4402 * a multiple of 4 bytes in length we'll have
4403 * to write that out separately since we can't
4404 * guarantee that the bytes following the
4405 * residual byte in the buffer returned by
4406 * request_firmware() are zeroed out ...
4407 */
4408 size_t resid = cf->size & 0x3;
4409 size_t size = cf->size & ~0x3;
4410 __be32 *data = (__be32 *)cf->data;
4411
4412 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4413 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4414
4415 ret = t4_memory_write(adapter, mtype, maddr,
4416 size, data);
4417 if (ret == 0 && resid != 0) {
4418 union {
4419 __be32 word;
4420 char buf[4];
4421 } last;
4422 int i;
4423
4424 last.word = data[size >> 2];
4425 for (i = resid; i < 4; i++)
4426 last.buf[i] = 0;
4427 ret = t4_memory_write(adapter, mtype,
4428 maddr + size,
4429 4, &last.word);
4430 }
4431 }
4432 }
4433
4434 release_firmware(cf);
4435 if (ret)
4436 goto bye;
4437 }
4438
4439 /*
4440 * Issue a Capability Configuration command to the firmware to get it
4441 * to parse the Configuration File. We don't use t4_fw_config_file()
4442 * because we want the ability to modify various features after we've
4443 * processed the configuration file ...
4444 */
4445 memset(&caps_cmd, 0, sizeof(caps_cmd));
4446 caps_cmd.op_to_write =
4447 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4448 FW_CMD_REQUEST |
4449 FW_CMD_READ);
ce91a923 4450 caps_cmd.cfvalid_to_len16 =
636f9d37
VP
4451 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4452 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4453 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4454 FW_LEN16(caps_cmd));
4455 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4456 &caps_cmd);
4457 if (ret < 0)
4458 goto bye;
4459
4460 finiver = ntohl(caps_cmd.finiver);
4461 finicsum = ntohl(caps_cmd.finicsum);
4462 cfcsum = ntohl(caps_cmd.cfcsum);
4463 if (finicsum != cfcsum)
4464 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4465 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4466 finicsum, cfcsum);
4467
636f9d37
VP
4468 /*
4469 * And now tell the firmware to use the configuration we just loaded.
4470 */
4471 caps_cmd.op_to_write =
4472 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4473 FW_CMD_REQUEST |
4474 FW_CMD_WRITE);
ce91a923 4475 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
4476 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4477 NULL);
4478 if (ret < 0)
4479 goto bye;
4480
4481 /*
4482 * Tweak configuration based on system architecture, module
4483 * parameters, etc.
4484 */
4485 ret = adap_init0_tweaks(adapter);
4486 if (ret < 0)
4487 goto bye;
4488
4489 /*
4490 * And finally tell the firmware to initialize itself using the
4491 * parameters from the Configuration File.
4492 */
4493 ret = t4_fw_initialize(adapter, adapter->mbox);
4494 if (ret < 0)
4495 goto bye;
4496
0a57a536 4497 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
636f9d37
VP
4498 /*
4499 * Return successfully and note that we're operating with parameters
4500 * not supplied by the driver, rather than from hard-wired
4501 * initialization constants burried in the driver.
4502 */
4503 adapter->flags |= USING_SOFT_PARAMS;
4504 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4505 "Configuration File %s, version %#x, computed checksum %#x\n",
4506 (using_flash
4507 ? "in device FLASH"
0a57a536 4508 : fw_config_file_path),
636f9d37
VP
4509 finiver, cfcsum);
4510 return 0;
4511
4512 /*
4513 * Something bad happened. Return the error ... (If the "error"
4514 * is that there's no Configuration File on the adapter we don't
4515 * want to issue a warning since this is fairly common.)
4516 */
4517bye:
4518 if (ret != -ENOENT)
4519 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
4520 -ret);
4521 return ret;
4522}
4523
13ee15d3
VP
4524/*
4525 * Attempt to initialize the adapter via hard-coded, driver supplied
4526 * parameters ...
4527 */
4528static int adap_init0_no_config(struct adapter *adapter, int reset)
4529{
4530 struct sge *s = &adapter->sge;
4531 struct fw_caps_config_cmd caps_cmd;
4532 u32 v;
4533 int i, ret;
4534
4535 /*
4536 * Reset device if necessary
4537 */
4538 if (reset) {
4539 ret = t4_fw_reset(adapter, adapter->mbox,
4540 PIORSTMODE | PIORST);
4541 if (ret < 0)
4542 goto bye;
4543 }
4544
4545 /*
4546 * Get device capabilities and select which we'll be using.
4547 */
4548 memset(&caps_cmd, 0, sizeof(caps_cmd));
4549 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4550 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4551 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
13ee15d3
VP
4552 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4553 &caps_cmd);
4554 if (ret < 0)
4555 goto bye;
4556
13ee15d3
VP
4557 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4558 if (!vf_acls)
4559 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4560 else
4561 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4562 } else if (vf_acls) {
4563 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4564 goto bye;
4565 }
4566 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4567 FW_CMD_REQUEST | FW_CMD_WRITE);
4568 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4569 NULL);
4570 if (ret < 0)
4571 goto bye;
4572
4573 /*
4574 * Tweak configuration based on system architecture, module
4575 * parameters, etc.
4576 */
4577 ret = adap_init0_tweaks(adapter);
4578 if (ret < 0)
4579 goto bye;
4580
4581 /*
4582 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4583 * mode which maps each Virtual Interface to its own section of
4584 * the RSS Table and we turn on all map and hash enables ...
4585 */
4586 adapter->flags |= RSS_TNLALLLOOKUP;
4587 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4588 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4589 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4590 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4591 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4592 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4593 if (ret < 0)
4594 goto bye;
4595
4596 /*
4597 * Set up our own fundamental resource provisioning ...
4598 */
4599 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4600 PFRES_NEQ, PFRES_NETHCTRL,
4601 PFRES_NIQFLINT, PFRES_NIQ,
4602 PFRES_TC, PFRES_NVI,
4603 FW_PFVF_CMD_CMASK_MASK,
4604 pfvfres_pmask(adapter, adapter->fn, 0),
4605 PFRES_NEXACTF,
4606 PFRES_R_CAPS, PFRES_WX_CAPS);
4607 if (ret < 0)
4608 goto bye;
4609
4610 /*
4611 * Perform low level SGE initialization. We need to do this before we
4612 * send the firmware the INITIALIZE command because that will cause
4613 * any other PF Drivers which are waiting for the Master
4614 * Initialization to proceed forward.
4615 */
4616 for (i = 0; i < SGE_NTIMERS - 1; i++)
4617 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4618 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4619 s->counter_val[0] = 1;
4620 for (i = 1; i < SGE_NCOUNTERS; i++)
4621 s->counter_val[i] = min(intr_cnt[i - 1],
4622 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4623 t4_sge_init(adapter);
4624
4625#ifdef CONFIG_PCI_IOV
4626 /*
4627 * Provision resource limits for Virtual Functions. We currently
4628 * grant them all the same static resource limits except for the Port
4629 * Access Rights Mask which we're assigning based on the PF. All of
4630 * the static provisioning stuff for both the PF and VF really needs
4631 * to be managed in a persistent manner for each device which the
4632 * firmware controls.
4633 */
4634 {
4635 int pf, vf;
0a57a536
SR
4636 int max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
4637 NUM_OF_PF_WITH_SRIOV_T5;
13ee15d3 4638
0a57a536 4639 for (pf = 0; pf < max_no_pf; pf++) {
13ee15d3
VP
4640 if (num_vf[pf] <= 0)
4641 continue;
4642
4643 /* VF numbering starts at 1! */
4644 for (vf = 1; vf <= num_vf[pf]; vf++) {
4645 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4646 pf, vf,
4647 VFRES_NEQ, VFRES_NETHCTRL,
4648 VFRES_NIQFLINT, VFRES_NIQ,
4649 VFRES_TC, VFRES_NVI,
1f1e4958 4650 FW_PFVF_CMD_CMASK_MASK,
13ee15d3
VP
4651 pfvfres_pmask(
4652 adapter, pf, vf),
4653 VFRES_NEXACTF,
4654 VFRES_R_CAPS, VFRES_WX_CAPS);
4655 if (ret < 0)
4656 dev_warn(adapter->pdev_dev,
4657 "failed to "\
4658 "provision pf/vf=%d/%d; "
4659 "err=%d\n", pf, vf, ret);
4660 }
4661 }
4662 }
4663#endif
4664
4665 /*
4666 * Set up the default filter mode. Later we'll want to implement this
4667 * via a firmware command, etc. ... This needs to be done before the
4668 * firmare initialization command ... If the selected set of fields
4669 * isn't equal to the default value, we'll need to make sure that the
4670 * field selections will fit in the 36-bit budget.
4671 */
4672 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
404d9e3f 4673 int j, bits = 0;
13ee15d3 4674
404d9e3f
VP
4675 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4676 switch (tp_vlan_pri_map & (1 << j)) {
13ee15d3
VP
4677 case 0:
4678 /* compressed filter field not enabled */
4679 break;
4680 case FCOE_MASK:
4681 bits += 1;
4682 break;
4683 case PORT_MASK:
4684 bits += 3;
4685 break;
4686 case VNIC_ID_MASK:
4687 bits += 17;
4688 break;
4689 case VLAN_MASK:
4690 bits += 17;
4691 break;
4692 case TOS_MASK:
4693 bits += 8;
4694 break;
4695 case PROTOCOL_MASK:
4696 bits += 8;
4697 break;
4698 case ETHERTYPE_MASK:
4699 bits += 16;
4700 break;
4701 case MACMATCH_MASK:
4702 bits += 9;
4703 break;
4704 case MPSHITTYPE_MASK:
4705 bits += 3;
4706 break;
4707 case FRAGMENTATION_MASK:
4708 bits += 1;
4709 break;
4710 }
4711
4712 if (bits > 36) {
4713 dev_err(adapter->pdev_dev,
4714 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
4715 " using %#x\n", tp_vlan_pri_map, bits,
4716 TP_VLAN_PRI_MAP_DEFAULT);
4717 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4718 }
4719 }
4720 v = tp_vlan_pri_map;
4721 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4722 &v, 1, TP_VLAN_PRI_MAP);
4723
4724 /*
4725 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
4726 * to support any of the compressed filter fields above. Newer
4727 * versions of the firmware do this automatically but it doesn't hurt
4728 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
4729 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
4730 * since the firmware automatically turns this on and off when we have
4731 * a non-zero number of filters active (since it does have a
4732 * performance impact).
4733 */
4734 if (tp_vlan_pri_map)
4735 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
4736 FIVETUPLELOOKUP_MASK,
4737 FIVETUPLELOOKUP_MASK);
4738
4739 /*
4740 * Tweak some settings.
4741 */
4742 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
4743 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
4744 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
4745 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
4746
4747 /*
4748 * Get basic stuff going by issuing the Firmware Initialize command.
4749 * Note that this _must_ be after all PFVF commands ...
4750 */
4751 ret = t4_fw_initialize(adapter, adapter->mbox);
4752 if (ret < 0)
4753 goto bye;
4754
4755 /*
4756 * Return successfully!
4757 */
4758 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
4759 "driver parameters\n");
4760 return 0;
4761
4762 /*
4763 * Something bad happened. Return the error ...
4764 */
4765bye:
4766 return ret;
4767}
4768
b8ff05a9
DM
4769/*
4770 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4771 */
4772static int adap_init0(struct adapter *adap)
4773{
4774 int ret;
4775 u32 v, port_vec;
4776 enum dev_state state;
4777 u32 params[7], val[7];
9a4da2cd 4778 struct fw_caps_config_cmd caps_cmd;
636f9d37 4779 int reset = 1, j;
b8ff05a9 4780
636f9d37
VP
4781 /*
4782 * Contact FW, advertising Master capability (and potentially forcing
4783 * ourselves as the Master PF if our module parameter force_init is
4784 * set).
4785 */
4786 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
4787 force_init ? MASTER_MUST : MASTER_MAY,
4788 &state);
b8ff05a9
DM
4789 if (ret < 0) {
4790 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4791 ret);
4792 return ret;
4793 }
636f9d37
VP
4794 if (ret == adap->mbox)
4795 adap->flags |= MASTER_PF;
4796 if (force_init && state == DEV_STATE_INIT)
4797 state = DEV_STATE_UNINIT;
b8ff05a9 4798
636f9d37
VP
4799 /*
4800 * If we're the Master PF Driver and the device is uninitialized,
4801 * then let's consider upgrading the firmware ... (We always want
4802 * to check the firmware version number in order to A. get it for
4803 * later reporting and B. to warn if the currently loaded firmware
4804 * is excessively mismatched relative to the driver.)
4805 */
4806 ret = t4_check_fw_version(adap);
4807 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4808 if (ret == -EINVAL || ret > 0) {
4809 if (upgrade_fw(adap) >= 0) {
4810 /*
4811 * Note that the chip was reset as part of the
4812 * firmware upgrade so we don't reset it again
4813 * below and grab the new firmware version.
4814 */
4815 reset = 0;
4816 ret = t4_check_fw_version(adap);
4817 }
4818 }
4819 if (ret < 0)
4820 return ret;
4821 }
b8ff05a9 4822
636f9d37
VP
4823 /*
4824 * Grab VPD parameters. This should be done after we establish a
4825 * connection to the firmware since some of the VPD parameters
4826 * (notably the Core Clock frequency) are retrieved via requests to
4827 * the firmware. On the other hand, we need these fairly early on
4828 * so we do this right after getting ahold of the firmware.
4829 */
4830 ret = get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
4831 if (ret < 0)
4832 goto bye;
a0881cab 4833
636f9d37 4834 /*
13ee15d3
VP
4835 * Find out what ports are available to us. Note that we need to do
4836 * this before calling adap_init0_no_config() since it needs nports
4837 * and portvec ...
636f9d37
VP
4838 */
4839 v =
4840 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4841 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
4842 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
a0881cab
DM
4843 if (ret < 0)
4844 goto bye;
4845
636f9d37
VP
4846 adap->params.nports = hweight32(port_vec);
4847 adap->params.portvec = port_vec;
4848
4849 /*
4850 * If the firmware is initialized already (and we're not forcing a
4851 * master initialization), note that we're living with existing
4852 * adapter parameters. Otherwise, it's time to try initializing the
4853 * adapter ...
4854 */
4855 if (state == DEV_STATE_INIT) {
4856 dev_info(adap->pdev_dev, "Coming up as %s: "\
4857 "Adapter already initialized\n",
4858 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4859 adap->flags |= USING_SOFT_PARAMS;
4860 } else {
4861 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4862 "Initializing adapter\n");
636f9d37
VP
4863
4864 /*
4865 * If the firmware doesn't support Configuration
4866 * Files warn user and exit,
4867 */
4868 if (ret < 0)
13ee15d3 4869 dev_warn(adap->pdev_dev, "Firmware doesn't support "
636f9d37 4870 "configuration file.\n");
13ee15d3
VP
4871 if (force_old_init)
4872 ret = adap_init0_no_config(adap, reset);
636f9d37
VP
4873 else {
4874 /*
13ee15d3
VP
4875 * Find out whether we're dealing with a version of
4876 * the firmware which has configuration file support.
636f9d37 4877 */
13ee15d3
VP
4878 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4879 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4880 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
4881 params, val);
636f9d37 4882
13ee15d3
VP
4883 /*
4884 * If the firmware doesn't support Configuration
4885 * Files, use the old Driver-based, hard-wired
4886 * initialization. Otherwise, try using the
4887 * Configuration File support and fall back to the
4888 * Driver-based initialization if there's no
4889 * Configuration File found.
4890 */
4891 if (ret < 0)
4892 ret = adap_init0_no_config(adap, reset);
4893 else {
4894 /*
4895 * The firmware provides us with a memory
4896 * buffer where we can load a Configuration
4897 * File from the host if we want to override
4898 * the Configuration File in flash.
4899 */
4900
4901 ret = adap_init0_config(adap, reset);
4902 if (ret == -ENOENT) {
4903 dev_info(adap->pdev_dev,
4904 "No Configuration File present "
4905 "on adapter. Using hard-wired "
4906 "configuration parameters.\n");
4907 ret = adap_init0_no_config(adap, reset);
4908 }
636f9d37
VP
4909 }
4910 }
4911 if (ret < 0) {
4912 dev_err(adap->pdev_dev,
4913 "could not initialize adapter, error %d\n",
4914 -ret);
4915 goto bye;
4916 }
4917 }
4918
4919 /*
4920 * If we're living with non-hard-coded parameters (either from a
4921 * Firmware Configuration File or values programmed by a different PF
4922 * Driver), give the SGE code a chance to pull in anything that it
4923 * needs ... Note that this must be called after we retrieve our VPD
4924 * parameters in order to know how to convert core ticks to seconds.
4925 */
4926 if (adap->flags & USING_SOFT_PARAMS) {
4927 ret = t4_sge_init(adap);
4928 if (ret < 0)
4929 goto bye;
4930 }
4931
9a4da2cd
VP
4932 if (is_bypass_device(adap->pdev->device))
4933 adap->params.bypass = 1;
4934
636f9d37
VP
4935 /*
4936 * Grab some of our basic fundamental operating parameters.
4937 */
4938#define FW_PARAM_DEV(param) \
4939 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4940 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4941
b8ff05a9 4942#define FW_PARAM_PFVF(param) \
636f9d37
VP
4943 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4944 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
4945 FW_PARAMS_PARAM_Y(0) | \
4946 FW_PARAMS_PARAM_Z(0)
b8ff05a9 4947
636f9d37 4948 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
4949 params[1] = FW_PARAM_PFVF(L2T_START);
4950 params[2] = FW_PARAM_PFVF(L2T_END);
4951 params[3] = FW_PARAM_PFVF(FILTER_START);
4952 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 4953 params[5] = FW_PARAM_PFVF(IQFLINT_START);
636f9d37 4954 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
b8ff05a9
DM
4955 if (ret < 0)
4956 goto bye;
636f9d37
VP
4957 adap->sge.egr_start = val[0];
4958 adap->l2t_start = val[1];
4959 adap->l2t_end = val[2];
b8ff05a9
DM
4960 adap->tids.ftid_base = val[3];
4961 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 4962 adap->sge.ingr_start = val[5];
b8ff05a9 4963
636f9d37
VP
4964 /* query params related to active filter region */
4965 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4966 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4967 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
4968 /* If Active filter size is set we enable establishing
4969 * offload connection through firmware work request
4970 */
4971 if ((val[0] != val[1]) && (ret >= 0)) {
4972 adap->flags |= FW_OFLD_CONN;
4973 adap->tids.aftid_base = val[0];
4974 adap->tids.aftid_end = val[1];
4975 }
4976
636f9d37
VP
4977 /*
4978 * Get device capabilities so we can determine what resources we need
4979 * to manage.
4980 */
4981 memset(&caps_cmd, 0, sizeof(caps_cmd));
9a4da2cd 4982 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
13ee15d3 4983 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4984 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
4985 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4986 &caps_cmd);
4987 if (ret < 0)
4988 goto bye;
4989
13ee15d3 4990 if (caps_cmd.ofldcaps) {
b8ff05a9
DM
4991 /* query offload-related parameters */
4992 params[0] = FW_PARAM_DEV(NTID);
4993 params[1] = FW_PARAM_PFVF(SERVER_START);
4994 params[2] = FW_PARAM_PFVF(SERVER_END);
4995 params[3] = FW_PARAM_PFVF(TDDP_START);
4996 params[4] = FW_PARAM_PFVF(TDDP_END);
4997 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
636f9d37
VP
4998 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4999 params, val);
b8ff05a9
DM
5000 if (ret < 0)
5001 goto bye;
5002 adap->tids.ntids = val[0];
5003 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5004 adap->tids.stid_base = val[1];
5005 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37
VP
5006 /*
5007 * Setup server filter region. Divide the availble filter
5008 * region into two parts. Regular filters get 1/3rd and server
5009 * filters get 2/3rd part. This is only enabled if workarond
5010 * path is enabled.
5011 * 1. For regular filters.
5012 * 2. Server filter: This are special filters which are used
5013 * to redirect SYN packets to offload queue.
5014 */
5015 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5016 adap->tids.sftid_base = adap->tids.ftid_base +
5017 DIV_ROUND_UP(adap->tids.nftids, 3);
5018 adap->tids.nsftids = adap->tids.nftids -
5019 DIV_ROUND_UP(adap->tids.nftids, 3);
5020 adap->tids.nftids = adap->tids.sftid_base -
5021 adap->tids.ftid_base;
5022 }
b8ff05a9
DM
5023 adap->vres.ddp.start = val[3];
5024 adap->vres.ddp.size = val[4] - val[3] + 1;
5025 adap->params.ofldq_wr_cred = val[5];
636f9d37 5026
b8ff05a9
DM
5027 adap->params.offload = 1;
5028 }
636f9d37 5029 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
5030 params[0] = FW_PARAM_PFVF(STAG_START);
5031 params[1] = FW_PARAM_PFVF(STAG_END);
5032 params[2] = FW_PARAM_PFVF(RQ_START);
5033 params[3] = FW_PARAM_PFVF(RQ_END);
5034 params[4] = FW_PARAM_PFVF(PBL_START);
5035 params[5] = FW_PARAM_PFVF(PBL_END);
636f9d37
VP
5036 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5037 params, val);
b8ff05a9
DM
5038 if (ret < 0)
5039 goto bye;
5040 adap->vres.stag.start = val[0];
5041 adap->vres.stag.size = val[1] - val[0] + 1;
5042 adap->vres.rq.start = val[2];
5043 adap->vres.rq.size = val[3] - val[2] + 1;
5044 adap->vres.pbl.start = val[4];
5045 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
5046
5047 params[0] = FW_PARAM_PFVF(SQRQ_START);
5048 params[1] = FW_PARAM_PFVF(SQRQ_END);
5049 params[2] = FW_PARAM_PFVF(CQ_START);
5050 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
5051 params[4] = FW_PARAM_PFVF(OCQ_START);
5052 params[5] = FW_PARAM_PFVF(OCQ_END);
636f9d37 5053 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
a0881cab
DM
5054 if (ret < 0)
5055 goto bye;
5056 adap->vres.qp.start = val[0];
5057 adap->vres.qp.size = val[1] - val[0] + 1;
5058 adap->vres.cq.start = val[2];
5059 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
5060 adap->vres.ocq.start = val[4];
5061 adap->vres.ocq.size = val[5] - val[4] + 1;
b8ff05a9 5062 }
636f9d37 5063 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
5064 params[0] = FW_PARAM_PFVF(ISCSI_START);
5065 params[1] = FW_PARAM_PFVF(ISCSI_END);
636f9d37
VP
5066 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5067 params, val);
b8ff05a9
DM
5068 if (ret < 0)
5069 goto bye;
5070 adap->vres.iscsi.start = val[0];
5071 adap->vres.iscsi.size = val[1] - val[0] + 1;
5072 }
5073#undef FW_PARAM_PFVF
5074#undef FW_PARAM_DEV
5075
636f9d37
VP
5076 /*
5077 * These are finalized by FW initialization, load their values now.
5078 */
b8ff05a9
DM
5079 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5080 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
636f9d37 5081 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
b8ff05a9
DM
5082 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5083 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5084 adap->params.b_wnd);
7ee9ff94 5085
636f9d37
VP
5086 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5087 for (j = 0; j < NCHAN; j++)
5088 adap->params.tp.tx_modq[j] = j;
7ee9ff94 5089
793dad94
VP
5090 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5091 &adap->filter_mode, 1,
5092 TP_VLAN_PRI_MAP);
5093
636f9d37 5094 adap->flags |= FW_OK;
b8ff05a9
DM
5095 return 0;
5096
5097 /*
636f9d37
VP
5098 * Something bad happened. If a command timed out or failed with EIO
5099 * FW does not operate within its spec or something catastrophic
5100 * happened to HW/FW, stop issuing commands.
b8ff05a9 5101 */
636f9d37
VP
5102bye:
5103 if (ret != -ETIMEDOUT && ret != -EIO)
5104 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
5105 return ret;
5106}
5107
204dc3c0
DM
5108/* EEH callbacks */
5109
5110static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5111 pci_channel_state_t state)
5112{
5113 int i;
5114 struct adapter *adap = pci_get_drvdata(pdev);
5115
5116 if (!adap)
5117 goto out;
5118
5119 rtnl_lock();
5120 adap->flags &= ~FW_OK;
5121 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5122 for_each_port(adap, i) {
5123 struct net_device *dev = adap->port[i];
5124
5125 netif_device_detach(dev);
5126 netif_carrier_off(dev);
5127 }
5128 if (adap->flags & FULL_INIT_DONE)
5129 cxgb_down(adap);
5130 rtnl_unlock();
5131 pci_disable_device(pdev);
5132out: return state == pci_channel_io_perm_failure ?
5133 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5134}
5135
5136static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5137{
5138 int i, ret;
5139 struct fw_caps_config_cmd c;
5140 struct adapter *adap = pci_get_drvdata(pdev);
5141
5142 if (!adap) {
5143 pci_restore_state(pdev);
5144 pci_save_state(pdev);
5145 return PCI_ERS_RESULT_RECOVERED;
5146 }
5147
5148 if (pci_enable_device(pdev)) {
5149 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5150 return PCI_ERS_RESULT_DISCONNECT;
5151 }
5152
5153 pci_set_master(pdev);
5154 pci_restore_state(pdev);
5155 pci_save_state(pdev);
5156 pci_cleanup_aer_uncorrect_error_status(pdev);
5157
5158 if (t4_wait_dev_ready(adap) < 0)
5159 return PCI_ERS_RESULT_DISCONNECT;
060e0c75 5160 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
204dc3c0
DM
5161 return PCI_ERS_RESULT_DISCONNECT;
5162 adap->flags |= FW_OK;
5163 if (adap_init1(adap, &c))
5164 return PCI_ERS_RESULT_DISCONNECT;
5165
5166 for_each_port(adap, i) {
5167 struct port_info *p = adap2pinfo(adap, i);
5168
060e0c75
DM
5169 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5170 NULL, NULL);
204dc3c0
DM
5171 if (ret < 0)
5172 return PCI_ERS_RESULT_DISCONNECT;
5173 p->viid = ret;
5174 p->xact_addr_filt = -1;
5175 }
5176
5177 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5178 adap->params.b_wnd);
1ae970e0 5179 setup_memwin(adap);
204dc3c0
DM
5180 if (cxgb_up(adap))
5181 return PCI_ERS_RESULT_DISCONNECT;
5182 return PCI_ERS_RESULT_RECOVERED;
5183}
5184
5185static void eeh_resume(struct pci_dev *pdev)
5186{
5187 int i;
5188 struct adapter *adap = pci_get_drvdata(pdev);
5189
5190 if (!adap)
5191 return;
5192
5193 rtnl_lock();
5194 for_each_port(adap, i) {
5195 struct net_device *dev = adap->port[i];
5196
5197 if (netif_running(dev)) {
5198 link_start(dev);
5199 cxgb_set_rxmode(dev);
5200 }
5201 netif_device_attach(dev);
5202 }
5203 rtnl_unlock();
5204}
5205
3646f0e5 5206static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
5207 .error_detected = eeh_err_detected,
5208 .slot_reset = eeh_slot_reset,
5209 .resume = eeh_resume,
5210};
5211
b8ff05a9
DM
5212static inline bool is_10g_port(const struct link_config *lc)
5213{
5214 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5215}
5216
5217static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5218 unsigned int size, unsigned int iqe_size)
5219{
5220 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5221 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5222 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5223 q->iqe_len = iqe_size;
5224 q->size = size;
5225}
5226
5227/*
5228 * Perform default configuration of DMA queues depending on the number and type
5229 * of ports we found and the number of available CPUs. Most settings can be
5230 * modified by the admin prior to actual use.
5231 */
91744948 5232static void cfg_queues(struct adapter *adap)
b8ff05a9
DM
5233{
5234 struct sge *s = &adap->sge;
5235 int i, q10g = 0, n10g = 0, qidx = 0;
5236
5237 for_each_port(adap, i)
5238 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5239
5240 /*
5241 * We default to 1 queue per non-10G port and up to # of cores queues
5242 * per 10G port.
5243 */
5244 if (n10g)
5245 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
5246 if (q10g > netif_get_num_default_rss_queues())
5247 q10g = netif_get_num_default_rss_queues();
b8ff05a9
DM
5248
5249 for_each_port(adap, i) {
5250 struct port_info *pi = adap2pinfo(adap, i);
5251
5252 pi->first_qset = qidx;
5253 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5254 qidx += pi->nqsets;
5255 }
5256
5257 s->ethqsets = qidx;
5258 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5259
5260 if (is_offload(adap)) {
5261 /*
5262 * For offload we use 1 queue/channel if all ports are up to 1G,
5263 * otherwise we divide all available queues amongst the channels
5264 * capped by the number of available cores.
5265 */
5266 if (n10g) {
5267 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5268 num_online_cpus());
5269 s->ofldqsets = roundup(i, adap->params.nports);
5270 } else
5271 s->ofldqsets = adap->params.nports;
5272 /* For RDMA one Rx queue per channel suffices */
5273 s->rdmaqs = adap->params.nports;
5274 }
5275
5276 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5277 struct sge_eth_rxq *r = &s->ethrxq[i];
5278
5279 init_rspq(&r->rspq, 0, 0, 1024, 64);
5280 r->fl.size = 72;
5281 }
5282
5283 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5284 s->ethtxq[i].q.size = 1024;
5285
5286 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5287 s->ctrlq[i].q.size = 512;
5288
5289 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5290 s->ofldtxq[i].q.size = 1024;
5291
5292 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5293 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5294
5295 init_rspq(&r->rspq, 0, 0, 1024, 64);
5296 r->rspq.uld = CXGB4_ULD_ISCSI;
5297 r->fl.size = 72;
5298 }
5299
5300 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5301 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5302
5303 init_rspq(&r->rspq, 0, 0, 511, 64);
5304 r->rspq.uld = CXGB4_ULD_RDMA;
5305 r->fl.size = 72;
5306 }
5307
5308 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5309 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5310}
5311
5312/*
5313 * Reduce the number of Ethernet queues across all ports to at most n.
5314 * n provides at least one queue per port.
5315 */
91744948 5316static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
5317{
5318 int i;
5319 struct port_info *pi;
5320
5321 while (n < adap->sge.ethqsets)
5322 for_each_port(adap, i) {
5323 pi = adap2pinfo(adap, i);
5324 if (pi->nqsets > 1) {
5325 pi->nqsets--;
5326 adap->sge.ethqsets--;
5327 if (adap->sge.ethqsets <= n)
5328 break;
5329 }
5330 }
5331
5332 n = 0;
5333 for_each_port(adap, i) {
5334 pi = adap2pinfo(adap, i);
5335 pi->first_qset = n;
5336 n += pi->nqsets;
5337 }
5338}
5339
5340/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5341#define EXTRA_VECS 2
5342
91744948 5343static int enable_msix(struct adapter *adap)
b8ff05a9
DM
5344{
5345 int ofld_need = 0;
5346 int i, err, want, need;
5347 struct sge *s = &adap->sge;
5348 unsigned int nchan = adap->params.nports;
5349 struct msix_entry entries[MAX_INGQ + 1];
5350
5351 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5352 entries[i].entry = i;
5353
5354 want = s->max_ethqsets + EXTRA_VECS;
5355 if (is_offload(adap)) {
5356 want += s->rdmaqs + s->ofldqsets;
5357 /* need nchan for each possible ULD */
5358 ofld_need = 2 * nchan;
5359 }
5360 need = adap->params.nports + EXTRA_VECS + ofld_need;
5361
5362 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5363 want = err;
5364
5365 if (!err) {
5366 /*
5367 * Distribute available vectors to the various queue groups.
5368 * Every group gets its minimum requirement and NIC gets top
5369 * priority for leftovers.
5370 */
5371 i = want - EXTRA_VECS - ofld_need;
5372 if (i < s->max_ethqsets) {
5373 s->max_ethqsets = i;
5374 if (i < s->ethqsets)
5375 reduce_ethqs(adap, i);
5376 }
5377 if (is_offload(adap)) {
5378 i = want - EXTRA_VECS - s->max_ethqsets;
5379 i -= ofld_need - nchan;
5380 s->ofldqsets = (i / nchan) * nchan; /* round down */
5381 }
5382 for (i = 0; i < want; ++i)
5383 adap->msix_info[i].vec = entries[i].vector;
5384 } else if (err > 0)
5385 dev_info(adap->pdev_dev,
5386 "only %d MSI-X vectors left, not using MSI-X\n", err);
5387 return err;
5388}
5389
5390#undef EXTRA_VECS
5391
91744948 5392static int init_rss(struct adapter *adap)
671b0060
DM
5393{
5394 unsigned int i, j;
5395
5396 for_each_port(adap, i) {
5397 struct port_info *pi = adap2pinfo(adap, i);
5398
5399 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5400 if (!pi->rss)
5401 return -ENOMEM;
5402 for (j = 0; j < pi->rss_size; j++)
278bc429 5403 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
671b0060
DM
5404 }
5405 return 0;
5406}
5407
91744948 5408static void print_port_info(const struct net_device *dev)
b8ff05a9
DM
5409{
5410 static const char *base[] = {
a0881cab 5411 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
7d5e77aa 5412 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
b8ff05a9
DM
5413 };
5414
b8ff05a9 5415 char buf[80];
118969ed 5416 char *bufp = buf;
f1a051b9 5417 const char *spd = "";
118969ed
DM
5418 const struct port_info *pi = netdev_priv(dev);
5419 const struct adapter *adap = pi->adapter;
f1a051b9
DM
5420
5421 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5422 spd = " 2.5 GT/s";
5423 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5424 spd = " 5 GT/s";
b8ff05a9 5425
118969ed
DM
5426 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5427 bufp += sprintf(bufp, "100/");
5428 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5429 bufp += sprintf(bufp, "1000/");
5430 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5431 bufp += sprintf(bufp, "10G/");
5432 if (bufp != buf)
5433 --bufp;
5434 sprintf(bufp, "BASE-%s", base[pi->port_type]);
5435
5436 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
0a57a536
SR
5437 adap->params.vpd.id,
5438 CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
118969ed
DM
5439 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5440 (adap->flags & USING_MSIX) ? " MSI-X" :
5441 (adap->flags & USING_MSI) ? " MSI" : "");
5442 netdev_info(dev, "S/N: %s, E/C: %s\n",
5443 adap->params.vpd.sn, adap->params.vpd.ec);
b8ff05a9
DM
5444}
5445
91744948 5446static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
ef306b50 5447{
e5c8ae5f 5448 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
ef306b50
DM
5449}
5450
06546391
DM
5451/*
5452 * Free the following resources:
5453 * - memory used for tables
5454 * - MSI/MSI-X
5455 * - net devices
5456 * - resources FW is holding for us
5457 */
5458static void free_some_resources(struct adapter *adapter)
5459{
5460 unsigned int i;
5461
5462 t4_free_mem(adapter->l2t);
5463 t4_free_mem(adapter->tids.tid_tab);
5464 disable_msi(adapter);
5465
5466 for_each_port(adapter, i)
671b0060
DM
5467 if (adapter->port[i]) {
5468 kfree(adap2pinfo(adapter, i)->rss);
06546391 5469 free_netdev(adapter->port[i]);
671b0060 5470 }
06546391 5471 if (adapter->flags & FW_OK)
060e0c75 5472 t4_fw_bye(adapter, adapter->fn);
06546391
DM
5473}
5474
2ed28baa 5475#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 5476#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9 5477 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
22adfe0a 5478#define SEGMENT_SIZE 128
b8ff05a9 5479
1dd06ae8 5480static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9 5481{
22adfe0a 5482 int func, i, err, s_qpp, qpp, num_seg;
b8ff05a9 5483 struct port_info *pi;
c8f44aff 5484 bool highdma = false;
b8ff05a9 5485 struct adapter *adapter = NULL;
0a57a536
SR
5486#ifdef CONFIG_PCI_IOV
5487 int max_no_pf;
5488#endif
b8ff05a9
DM
5489
5490 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5491
5492 err = pci_request_regions(pdev, KBUILD_MODNAME);
5493 if (err) {
5494 /* Just info, some other driver may have claimed the device. */
5495 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5496 return err;
5497 }
5498
060e0c75 5499 /* We control everything through one PF */
b8ff05a9 5500 func = PCI_FUNC(pdev->devfn);
060e0c75 5501 if (func != ent->driver_data) {
204dc3c0 5502 pci_save_state(pdev); /* to restore SR-IOV later */
b8ff05a9 5503 goto sriov;
204dc3c0 5504 }
b8ff05a9
DM
5505
5506 err = pci_enable_device(pdev);
5507 if (err) {
5508 dev_err(&pdev->dev, "cannot enable PCI device\n");
5509 goto out_release_regions;
5510 }
5511
5512 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 5513 highdma = true;
b8ff05a9
DM
5514 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5515 if (err) {
5516 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5517 "coherent allocations\n");
5518 goto out_disable_device;
5519 }
5520 } else {
5521 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5522 if (err) {
5523 dev_err(&pdev->dev, "no usable DMA configuration\n");
5524 goto out_disable_device;
5525 }
5526 }
5527
5528 pci_enable_pcie_error_reporting(pdev);
ef306b50 5529 enable_pcie_relaxed_ordering(pdev);
b8ff05a9
DM
5530 pci_set_master(pdev);
5531 pci_save_state(pdev);
5532
5533 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5534 if (!adapter) {
5535 err = -ENOMEM;
5536 goto out_disable_device;
5537 }
5538
5539 adapter->regs = pci_ioremap_bar(pdev, 0);
5540 if (!adapter->regs) {
5541 dev_err(&pdev->dev, "cannot map device registers\n");
5542 err = -ENOMEM;
5543 goto out_free_adapter;
5544 }
5545
5546 adapter->pdev = pdev;
5547 adapter->pdev_dev = &pdev->dev;
3069ee9b 5548 adapter->mbox = func;
060e0c75 5549 adapter->fn = func;
b8ff05a9
DM
5550 adapter->msg_enable = dflt_msg_enable;
5551 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5552
5553 spin_lock_init(&adapter->stats_lock);
5554 spin_lock_init(&adapter->tid_release_lock);
5555
5556 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
5557 INIT_WORK(&adapter->db_full_task, process_db_full);
5558 INIT_WORK(&adapter->db_drop_task, process_db_drop);
b8ff05a9
DM
5559
5560 err = t4_prep_adapter(adapter);
5561 if (err)
22adfe0a
SR
5562 goto out_unmap_bar0;
5563
5564 if (!is_t4(adapter->chip)) {
5565 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5566 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5567 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5568 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5569
5570 /* Each segment size is 128B. Write coalescing is enabled only
5571 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5572 * queue is less no of segments that can be accommodated in
5573 * a page size.
5574 */
5575 if (qpp > num_seg) {
5576 dev_err(&pdev->dev,
5577 "Incorrect number of egress queues per page\n");
5578 err = -EINVAL;
5579 goto out_unmap_bar0;
5580 }
5581 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5582 pci_resource_len(pdev, 2));
5583 if (!adapter->bar2) {
5584 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5585 err = -ENOMEM;
5586 goto out_unmap_bar0;
5587 }
5588 }
5589
636f9d37 5590 setup_memwin(adapter);
b8ff05a9 5591 err = adap_init0(adapter);
636f9d37 5592 setup_memwin_rdma(adapter);
b8ff05a9
DM
5593 if (err)
5594 goto out_unmap_bar;
5595
5596 for_each_port(adapter, i) {
5597 struct net_device *netdev;
5598
5599 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5600 MAX_ETH_QSETS);
5601 if (!netdev) {
5602 err = -ENOMEM;
5603 goto out_free_dev;
5604 }
5605
5606 SET_NETDEV_DEV(netdev, &pdev->dev);
5607
5608 adapter->port[i] = netdev;
5609 pi = netdev_priv(netdev);
5610 pi->adapter = adapter;
5611 pi->xact_addr_filt = -1;
b8ff05a9 5612 pi->port_id = i;
b8ff05a9
DM
5613 netdev->irq = pdev->irq;
5614
2ed28baa
MM
5615 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5616 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5617 NETIF_F_RXCSUM | NETIF_F_RXHASH |
5618 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
c8f44aff
MM
5619 if (highdma)
5620 netdev->hw_features |= NETIF_F_HIGHDMA;
5621 netdev->features |= netdev->hw_features;
b8ff05a9
DM
5622 netdev->vlan_features = netdev->features & VLAN_FEAT;
5623
01789349
JP
5624 netdev->priv_flags |= IFF_UNICAST_FLT;
5625
b8ff05a9
DM
5626 netdev->netdev_ops = &cxgb4_netdev_ops;
5627 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
5628 }
5629
5630 pci_set_drvdata(pdev, adapter);
5631
5632 if (adapter->flags & FW_OK) {
060e0c75 5633 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
5634 if (err)
5635 goto out_free_dev;
5636 }
5637
5638 /*
5639 * Configure queues and allocate tables now, they can be needed as
5640 * soon as the first register_netdev completes.
5641 */
5642 cfg_queues(adapter);
5643
5644 adapter->l2t = t4_init_l2t();
5645 if (!adapter->l2t) {
5646 /* We tolerate a lack of L2T, giving up some functionality */
5647 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5648 adapter->params.offload = 0;
5649 }
5650
5651 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5652 dev_warn(&pdev->dev, "could not allocate TID table, "
5653 "continuing\n");
5654 adapter->params.offload = 0;
5655 }
5656
f7cabcdd
DM
5657 /* See what interrupts we'll be using */
5658 if (msi > 1 && enable_msix(adapter) == 0)
5659 adapter->flags |= USING_MSIX;
5660 else if (msi > 0 && pci_enable_msi(pdev) == 0)
5661 adapter->flags |= USING_MSI;
5662
671b0060
DM
5663 err = init_rss(adapter);
5664 if (err)
5665 goto out_free_dev;
5666
b8ff05a9
DM
5667 /*
5668 * The card is now ready to go. If any errors occur during device
5669 * registration we do not fail the whole card but rather proceed only
5670 * with the ports we manage to register successfully. However we must
5671 * register at least one net device.
5672 */
5673 for_each_port(adapter, i) {
a57cabe0
DM
5674 pi = adap2pinfo(adapter, i);
5675 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5676 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5677
b8ff05a9
DM
5678 err = register_netdev(adapter->port[i]);
5679 if (err)
b1a3c2b6 5680 break;
b1a3c2b6
DM
5681 adapter->chan_map[pi->tx_chan] = i;
5682 print_port_info(adapter->port[i]);
b8ff05a9 5683 }
b1a3c2b6 5684 if (i == 0) {
b8ff05a9
DM
5685 dev_err(&pdev->dev, "could not register any net devices\n");
5686 goto out_free_dev;
5687 }
b1a3c2b6
DM
5688 if (err) {
5689 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5690 err = 0;
6403eab1 5691 }
b8ff05a9
DM
5692
5693 if (cxgb4_debugfs_root) {
5694 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5695 cxgb4_debugfs_root);
5696 setup_debugfs(adapter);
5697 }
5698
6482aa7c
DLR
5699 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5700 pdev->needs_freset = 1;
5701
b8ff05a9
DM
5702 if (is_offload(adapter))
5703 attach_ulds(adapter);
5704
b8ff05a9
DM
5705sriov:
5706#ifdef CONFIG_PCI_IOV
0a57a536
SR
5707 max_no_pf = is_t4(adapter->chip) ? NUM_OF_PF_WITH_SRIOV_T4 :
5708 NUM_OF_PF_WITH_SRIOV_T5;
5709
5710 if (func < max_no_pf && num_vf[func] > 0)
b8ff05a9
DM
5711 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5712 dev_info(&pdev->dev,
5713 "instantiated %u virtual functions\n",
5714 num_vf[func]);
5715#endif
5716 return 0;
5717
5718 out_free_dev:
06546391 5719 free_some_resources(adapter);
b8ff05a9 5720 out_unmap_bar:
22adfe0a
SR
5721 if (!is_t4(adapter->chip))
5722 iounmap(adapter->bar2);
5723 out_unmap_bar0:
b8ff05a9
DM
5724 iounmap(adapter->regs);
5725 out_free_adapter:
5726 kfree(adapter);
5727 out_disable_device:
5728 pci_disable_pcie_error_reporting(pdev);
5729 pci_disable_device(pdev);
5730 out_release_regions:
5731 pci_release_regions(pdev);
5732 pci_set_drvdata(pdev, NULL);
5733 return err;
5734}
5735
91744948 5736static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
5737{
5738 struct adapter *adapter = pci_get_drvdata(pdev);
5739
636f9d37 5740#ifdef CONFIG_PCI_IOV
b8ff05a9
DM
5741 pci_disable_sriov(pdev);
5742
636f9d37
VP
5743#endif
5744
b8ff05a9
DM
5745 if (adapter) {
5746 int i;
5747
5748 if (is_offload(adapter))
5749 detach_ulds(adapter);
5750
5751 for_each_port(adapter, i)
8f3a7676 5752 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
5753 unregister_netdev(adapter->port[i]);
5754
5755 if (adapter->debugfs_root)
5756 debugfs_remove_recursive(adapter->debugfs_root);
5757
f2b7e78d
VP
5758 /* If we allocated filters, free up state associated with any
5759 * valid filters ...
5760 */
5761 if (adapter->tids.ftid_tab) {
5762 struct filter_entry *f = &adapter->tids.ftid_tab[0];
dca4faeb
VP
5763 for (i = 0; i < (adapter->tids.nftids +
5764 adapter->tids.nsftids); i++, f++)
f2b7e78d
VP
5765 if (f->valid)
5766 clear_filter(adapter, f);
5767 }
5768
aaefae9b
DM
5769 if (adapter->flags & FULL_INIT_DONE)
5770 cxgb_down(adapter);
b8ff05a9 5771
06546391 5772 free_some_resources(adapter);
b8ff05a9 5773 iounmap(adapter->regs);
22adfe0a
SR
5774 if (!is_t4(adapter->chip))
5775 iounmap(adapter->bar2);
b8ff05a9
DM
5776 kfree(adapter);
5777 pci_disable_pcie_error_reporting(pdev);
5778 pci_disable_device(pdev);
5779 pci_release_regions(pdev);
5780 pci_set_drvdata(pdev, NULL);
a069ec91 5781 } else
b8ff05a9
DM
5782 pci_release_regions(pdev);
5783}
5784
5785static struct pci_driver cxgb4_driver = {
5786 .name = KBUILD_MODNAME,
5787 .id_table = cxgb4_pci_tbl,
5788 .probe = init_one,
91744948 5789 .remove = remove_one,
204dc3c0 5790 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
5791};
5792
5793static int __init cxgb4_init_module(void)
5794{
5795 int ret;
5796
3069ee9b
VP
5797 workq = create_singlethread_workqueue("cxgb4");
5798 if (!workq)
5799 return -ENOMEM;
5800
b8ff05a9
DM
5801 /* Debugfs support is optional, just warn if this fails */
5802 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5803 if (!cxgb4_debugfs_root)
428ac43f 5804 pr_warn("could not create debugfs entry, continuing\n");
b8ff05a9
DM
5805
5806 ret = pci_register_driver(&cxgb4_driver);
5807 if (ret < 0)
5808 debugfs_remove(cxgb4_debugfs_root);
5809 return ret;
5810}
5811
5812static void __exit cxgb4_cleanup_module(void)
5813{
5814 pci_unregister_driver(&cxgb4_driver);
5815 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3069ee9b
VP
5816 flush_workqueue(workq);
5817 destroy_workqueue(workq);
b8ff05a9
DM
5818}
5819
5820module_init(cxgb4_init_module);
5821module_exit(cxgb4_cleanup_module);