dev_ioctl: split out ndo_eth_ioctl
[linux-block.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
b72a32da 4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
b8ff05a9
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
01bcca68 63#include <net/addrconf.h>
1ef8019b 64#include <net/bonding.h>
7c0f6ba6 65#include <linux/uaccess.h>
c5a8c0f3 66#include <linux/crash_dump.h>
846eac3f 67#include <net/udp_tunnel.h>
b1396c2b 68#include <net/xfrm.h>
a8c16e8e 69#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
a3ac249a
RM
70#include <net/tls.h>
71#endif
b8ff05a9
DM
72
73#include "cxgb4.h"
d57fd6ca 74#include "cxgb4_filter.h"
b8ff05a9 75#include "t4_regs.h"
f612b815 76#include "t4_values.h"
b8ff05a9
DM
77#include "t4_msg.h"
78#include "t4fw_api.h"
cd6c2f12 79#include "t4fw_version.h"
688848b1 80#include "cxgb4_dcb.h"
c68644ef 81#include "srq.h"
fd88b31a 82#include "cxgb4_debugfs.h"
b5a02f50 83#include "clip_tbl.h"
b8ff05a9 84#include "l2t.h"
3bdb376e 85#include "smt.h"
b72a32da 86#include "sched.h"
d8931847 87#include "cxgb4_tc_u32.h"
6a345b3d 88#include "cxgb4_tc_flower.h"
b1396c2b 89#include "cxgb4_tc_mqprio.h"
4ec4762d 90#include "cxgb4_tc_matchall.h"
a4569504 91#include "cxgb4_ptp.h"
ad75b7d3 92#include "cxgb4_cudbg.h"
b8ff05a9 93
812034f1
HS
94char cxgb4_driver_name[] = KBUILD_MODNAME;
95
52a5f846 96#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
b8ff05a9 97
b8ff05a9
DM
98#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
99 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
100 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
101
3fedeab1
HS
102/* Macros needed to support the PCI Device ID Table ...
103 */
104#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
768ffc66 105 static const struct pci_device_id cxgb4_pci_tbl[] = {
baf50868
GG
106#define CXGB4_UNIFIED_PF 0x4
107
108#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
b8ff05a9 109
3fedeab1
HS
110/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
111 * called for both.
112 */
113#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
114
115#define CH_PCI_ID_TABLE_ENTRY(devid) \
baf50868 116 {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
3fedeab1
HS
117
118#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
119 { 0, } \
120 }
121
122#include "t4_pci_id_tbl.h"
b8ff05a9 123
16e47624 124#define FW4_FNAME "cxgb4/t4fw.bin"
0a57a536 125#define FW5_FNAME "cxgb4/t5fw.bin"
3ccc6cf7 126#define FW6_FNAME "cxgb4/t6fw.bin"
16e47624 127#define FW4_CFNAME "cxgb4/t4-config.txt"
0a57a536 128#define FW5_CFNAME "cxgb4/t5-config.txt"
3ccc6cf7 129#define FW6_CFNAME "cxgb4/t6-config.txt"
01b69614
HS
130#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
131#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
132#define PHY_AQ1202_DEVICEID 0x4409
133#define PHY_BCM84834_DEVICEID 0x4486
b8ff05a9
DM
134
135MODULE_DESCRIPTION(DRV_DESC);
136MODULE_AUTHOR("Chelsio Communications");
137MODULE_LICENSE("Dual BSD/GPL");
b8ff05a9 138MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
16e47624 139MODULE_FIRMWARE(FW4_FNAME);
0a57a536 140MODULE_FIRMWARE(FW5_FNAME);
52a5f846 141MODULE_FIRMWARE(FW6_FNAME);
b8ff05a9 142
b8ff05a9
DM
143/*
144 * The driver uses the best interrupt scheme available on a platform in the
145 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
146 * of these schemes the driver may consider as follows:
147 *
148 * msi = 2: choose from among all three options
149 * msi = 1: only consider MSI and INTx interrupts
150 * msi = 0: force INTx interrupts
151 */
152static int msi = 2;
153
154module_param(msi, int, 0644);
155MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
156
636f9d37
VP
157/*
158 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
159 * offset by 2 bytes in order to have the IP headers line up on 4-byte
160 * boundaries. This is a requirement for many architectures which will throw
161 * a machine check fault if an attempt is made to access one of the 4-byte IP
162 * header fields on a non-4-byte boundary. And it's a major performance issue
163 * even on some architectures which allow it like some implementations of the
164 * x86 ISA. However, some architectures don't mind this and for some very
165 * edge-case performance sensitive applications (like forwarding large volumes
166 * of small packets), setting this DMA offset to 0 will decrease the number of
167 * PCI-E Bus transfers enough to measurably affect performance.
168 */
169static int rx_dma_offset = 2;
170
688848b1
AB
171/* TX Queue select used to determine what algorithm to use for selecting TX
172 * queue. Select between the kernel provided function (select_queue=0) or user
173 * cxgb_select_queue function (select_queue=1)
174 *
175 * Default: select_queue=0
176 */
177static int select_queue;
178module_param(select_queue, int, 0644);
179MODULE_PARM_DESC(select_queue,
180 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
181
b8ff05a9
DM
182static struct dentry *cxgb4_debugfs_root;
183
94cdb8bb
HS
184LIST_HEAD(adapter_list);
185DEFINE_MUTEX(uld_mutex);
93a09e74 186LIST_HEAD(uld_list);
b8ff05a9 187
86e8f298
VK
188static int cfg_queues(struct adapter *adap);
189
b8ff05a9
DM
190static void link_report(struct net_device *dev)
191{
192 if (!netif_carrier_ok(dev))
193 netdev_info(dev, "link down\n");
194 else {
195 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
196
85412255 197 const char *s;
b8ff05a9
DM
198 const struct port_info *p = netdev_priv(dev);
199
200 switch (p->link_cfg.speed) {
5e78f7fd
GG
201 case 100:
202 s = "100Mbps";
b8ff05a9 203 break;
e8b39015 204 case 1000:
5e78f7fd 205 s = "1Gbps";
b8ff05a9 206 break;
5e78f7fd
GG
207 case 10000:
208 s = "10Gbps";
209 break;
210 case 25000:
211 s = "25Gbps";
b8ff05a9 212 break;
e8b39015 213 case 40000:
72aca4bf
KS
214 s = "40Gbps";
215 break;
7cbe543c
GG
216 case 50000:
217 s = "50Gbps";
218 break;
5e78f7fd
GG
219 case 100000:
220 s = "100Gbps";
221 break;
85412255
HS
222 default:
223 pr_info("%s: unsupported speed: %d\n",
224 dev->name, p->link_cfg.speed);
225 return;
b8ff05a9
DM
226 }
227
228 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
229 fc[p->link_cfg.fc]);
230 }
231}
232
688848b1
AB
233#ifdef CONFIG_CHELSIO_T4_DCB
234/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
235static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
236{
237 struct port_info *pi = netdev_priv(dev);
238 struct adapter *adap = pi->adapter;
239 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
240 int i;
241
242 /* We use a simple mapping of Port TX Queue Index to DCB
243 * Priority when we're enabling DCB.
244 */
245 for (i = 0; i < pi->nqsets; i++, txq++) {
246 u32 name, value;
247 int err;
248
5167865a
HS
249 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
250 FW_PARAMS_PARAM_X_V(
251 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
252 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
688848b1
AB
253 value = enable ? i : 0xffffffff;
254
255 /* Since we can be called while atomic (from "interrupt
256 * level") we need to issue the Set Parameters Commannd
257 * without sleeping (timeout < 0).
258 */
b2612722 259 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
01b69614
HS
260 &name, &value,
261 -FW_CMD_MAX_TIMEOUT);
688848b1
AB
262
263 if (err)
264 dev_err(adap->pdev_dev,
265 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
266 enable ? "set" : "unset", pi->port_id, i, -err);
10b00466 267 else
5ce36338 268 txq->dcb_prio = enable ? value : 0;
688848b1
AB
269 }
270}
688848b1 271
ebddd97a 272int cxgb4_dcb_enabled(const struct net_device *dev)
218d48e7 273{
218d48e7
HS
274 struct port_info *pi = netdev_priv(dev);
275
276 if (!pi->dcb.enabled)
277 return 0;
278
279 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
280 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
218d48e7 281}
7c70c4f8 282#endif /* CONFIG_CHELSIO_T4_DCB */
218d48e7 283
b8ff05a9
DM
284void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
285{
286 struct net_device *dev = adapter->port[port_id];
287
288 /* Skip changes from disabled ports. */
289 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
290 if (link_stat)
291 netif_carrier_on(dev);
688848b1
AB
292 else {
293#ifdef CONFIG_CHELSIO_T4_DCB
218d48e7 294 if (cxgb4_dcb_enabled(dev)) {
ba581f77 295 cxgb4_dcb_reset(dev);
218d48e7
HS
296 dcb_tx_queue_prio_enable(dev, false);
297 }
688848b1 298#endif /* CONFIG_CHELSIO_T4_DCB */
b8ff05a9 299 netif_carrier_off(dev);
688848b1 300 }
b8ff05a9
DM
301
302 link_report(dev);
303 }
304}
305
8156b0ba 306void t4_os_portmod_changed(struct adapter *adap, int port_id)
b8ff05a9
DM
307{
308 static const char *mod_str[] = {
a0881cab 309 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
310 };
311
8156b0ba
GG
312 struct net_device *dev = adap->port[port_id];
313 struct port_info *pi = netdev_priv(dev);
b8ff05a9
DM
314
315 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
316 netdev_info(dev, "port module unplugged\n");
a0881cab 317 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9 318 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
be81a2de
HS
319 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
320 netdev_info(dev, "%s: unsupported port module inserted\n",
321 dev->name);
322 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
323 netdev_info(dev, "%s: unknown port module inserted\n",
324 dev->name);
325 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
326 netdev_info(dev, "%s: transceiver module error\n", dev->name);
327 else
328 netdev_info(dev, "%s: unknown module type %d inserted\n",
329 dev->name, pi->mod_type);
8156b0ba
GG
330
331 /* If the interface is running, then we'll need any "sticky" Link
332 * Parameters redone with a new Transceiver Module.
333 */
334 pi->link_cfg.redo_l1cfg = netif_running(dev);
b8ff05a9
DM
335}
336
fc08a01a
HS
337int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
338module_param(dbfifo_int_thresh, int, 0644);
339MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
340
b8ff05a9 341/*
fc08a01a 342 * usecs to sleep while draining the dbfifo
b8ff05a9 343 */
fc08a01a
HS
344static int dbfifo_drain_delay = 1000;
345module_param(dbfifo_drain_delay, int, 0644);
346MODULE_PARM_DESC(dbfifo_drain_delay,
347 "usecs to sleep while draining the dbfifo");
348
349static inline int cxgb4_set_addr_hash(struct port_info *pi)
b8ff05a9 350{
fc08a01a
HS
351 struct adapter *adap = pi->adapter;
352 u64 vec = 0;
353 bool ucast = false;
354 struct hash_mac_addr *entry;
355
356 /* Calculate the hash vector for the updated list and program it */
357 list_for_each_entry(entry, &adap->mac_hlist, list) {
358 ucast |= is_unicast_ether_addr(entry->addr);
359 vec |= (1ULL << hash_mac_addr(entry->addr));
360 }
361 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
362 vec, false);
363}
364
365static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
366{
367 struct port_info *pi = netdev_priv(netdev);
368 struct adapter *adap = pi->adapter;
369 int ret;
b8ff05a9
DM
370 u64 mhash = 0;
371 u64 uhash = 0;
f9f329ad
RR
372 /* idx stores the index of allocated filters,
373 * its size should be modified based on the number of
374 * MAC addresses that we allocate filters for
375 */
376
377 u16 idx[1] = {};
fc08a01a
HS
378 bool free = false;
379 bool ucast = is_unicast_ether_addr(mac_addr);
380 const u8 *maclist[1] = {mac_addr};
381 struct hash_mac_addr *new_entry;
382
f9f329ad
RR
383 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
384 idx, ucast ? &uhash : &mhash, false);
fc08a01a
HS
385 if (ret < 0)
386 goto out;
387 /* if hash != 0, then add the addr to hash addr list
388 * so on the end we will calculate the hash for the
389 * list and program it
390 */
391 if (uhash || mhash) {
392 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
393 if (!new_entry)
394 return -ENOMEM;
395 ether_addr_copy(new_entry->addr, mac_addr);
396 list_add_tail(&new_entry->list, &adap->mac_hlist);
397 ret = cxgb4_set_addr_hash(pi);
b8ff05a9 398 }
fc08a01a
HS
399out:
400 return ret < 0 ? ret : 0;
401}
b8ff05a9 402
fc08a01a
HS
403static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
404{
405 struct port_info *pi = netdev_priv(netdev);
406 struct adapter *adap = pi->adapter;
407 int ret;
408 const u8 *maclist[1] = {mac_addr};
409 struct hash_mac_addr *entry, *tmp;
b8ff05a9 410
fc08a01a
HS
411 /* If the MAC address to be removed is in the hash addr
412 * list, delete it from the list and update hash vector
413 */
414 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
415 if (ether_addr_equal(entry->addr, mac_addr)) {
416 list_del(&entry->list);
417 kfree(entry);
418 return cxgb4_set_addr_hash(pi);
b8ff05a9
DM
419 }
420 }
421
f9f329ad 422 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
fc08a01a 423 return ret < 0 ? -EINVAL : 0;
b8ff05a9
DM
424}
425
426/*
427 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
428 * If @mtu is -1 it is left unchanged.
429 */
430static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
431{
b8ff05a9 432 struct port_info *pi = netdev_priv(dev);
fc08a01a 433 struct adapter *adapter = pi->adapter;
b8ff05a9 434
d01f7abc
HS
435 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
436 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
fc08a01a 437
696c278f
RL
438 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
439 mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
fc08a01a
HS
440 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
441 sleep_ok);
b8ff05a9
DM
442}
443
444/**
3f8cfd0d
AV
445 * cxgb4_change_mac - Update match filter for a MAC address.
446 * @pi: the port_info
447 * @viid: the VI id
448 * @tcam_idx: TCAM index of existing filter for old value of MAC address,
449 * or -1
450 * @addr: the new MAC address value
451 * @persist: whether a new MAC allocation should be persistent
29bbf5d7 452 * @smt_idx: the destination to store the new SMT index.
3f8cfd0d
AV
453 *
454 * Modifies an MPS filter and sets it to the new MAC address if
455 * @tcam_idx >= 0, or adds the MAC address to a new filter if
456 * @tcam_idx < 0. In the latter case the address is added persistently
457 * if @persist is %true.
458 * Addresses are programmed to hash region, if tcam runs out of entries.
459 *
460 */
2f0b9406
RR
461int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
462 int *tcam_idx, const u8 *addr, bool persist,
463 u8 *smt_idx)
3f8cfd0d
AV
464{
465 struct adapter *adapter = pi->adapter;
466 struct hash_mac_addr *entry, *new_entry;
467 int ret;
468
469 ret = t4_change_mac(adapter, adapter->mbox, viid,
470 *tcam_idx, addr, persist, smt_idx);
471 /* We ran out of TCAM entries. try programming hash region. */
472 if (ret == -ENOMEM) {
473 /* If the MAC address to be updated is in the hash addr
474 * list, update it from the list
475 */
476 list_for_each_entry(entry, &adapter->mac_hlist, list) {
477 if (entry->iface_mac) {
478 ether_addr_copy(entry->addr, addr);
479 goto set_hash;
480 }
481 }
482 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
483 if (!new_entry)
484 return -ENOMEM;
485 ether_addr_copy(new_entry->addr, addr);
486 new_entry->iface_mac = true;
487 list_add_tail(&new_entry->list, &adapter->mac_hlist);
488set_hash:
489 ret = cxgb4_set_addr_hash(pi);
490 } else if (ret >= 0) {
491 *tcam_idx = ret;
492 ret = 0;
493 }
494
495 return ret;
496}
497
498/*
b8ff05a9
DM
499 * link_start - enable a port
500 * @dev: the port to enable
501 *
502 * Performs the MAC and PHY actions needed to enable a port.
503 */
504static int link_start(struct net_device *dev)
505{
b8ff05a9 506 struct port_info *pi = netdev_priv(dev);
696c278f
RL
507 unsigned int mb = pi->adapter->mbox;
508 int ret;
b8ff05a9
DM
509
510 /*
511 * We do not set address filters and promiscuity here, the stack does
512 * that step explicitly.
513 */
696c278f
RL
514 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
515 dev->mtu, -1, -1, -1,
f646968f 516 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
3f8cfd0d 517 if (ret == 0)
2f0b9406
RR
518 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
519 dev->dev_addr, true, &pi->smt_idx);
b8ff05a9 520 if (ret == 0)
4036da90 521 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
060e0c75 522 &pi->link_cfg);
30f00847
AB
523 if (ret == 0) {
524 local_bh_disable();
e2f4f4e9 525 ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
688848b1 526 true, CXGB4_DCB_ENABLED);
30f00847
AB
527 local_bh_enable();
528 }
688848b1 529
b8ff05a9
DM
530 return ret;
531}
532
688848b1
AB
533#ifdef CONFIG_CHELSIO_T4_DCB
534/* Handle a Data Center Bridging update message from the firmware. */
535static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
536{
2b5fb1f2 537 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
134491fd 538 struct net_device *dev = adap->port[adap->chan_map[port]];
688848b1
AB
539 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
540 int new_dcb_enabled;
541
542 cxgb4_dcb_handle_fw_update(adap, pcmd);
543 new_dcb_enabled = cxgb4_dcb_enabled(dev);
544
545 /* If the DCB has become enabled or disabled on the port then we're
546 * going to need to set up/tear down DCB Priority parameters for the
547 * TX Queues associated with the port.
548 */
549 if (new_dcb_enabled != old_dcb_enabled)
550 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
551}
552#endif /* CONFIG_CHELSIO_T4_DCB */
553
f2b7e78d 554/* Response queue handler for the FW event queue.
b8ff05a9
DM
555 */
556static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
557 const struct pkt_gl *gl)
558{
559 u8 opcode = ((const struct rss_header *)rsp)->opcode;
560
561 rsp++; /* skip RSS header */
b407a4a9
VP
562
563 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
564 */
565 if (unlikely(opcode == CPL_FW4_MSG &&
566 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
567 rsp++;
568 opcode = ((const struct rss_header *)rsp)->opcode;
569 rsp++;
570 if (opcode != CPL_SGE_EGR_UPDATE) {
571 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
572 , opcode);
573 goto out;
574 }
575 }
576
b8ff05a9
DM
577 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
578 const struct cpl_sge_egr_update *p = (void *)rsp;
bdc590b9 579 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
e46dab4d 580 struct sge_txq *txq;
b8ff05a9 581
e46dab4d 582 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 583 txq->restarts++;
ab677ff4 584 if (txq->q_type == CXGB4_TXQ_ETH) {
b8ff05a9
DM
585 struct sge_eth_txq *eq;
586
587 eq = container_of(txq, struct sge_eth_txq, q);
d429005f 588 t4_sge_eth_txq_egress_update(q->adap, eq, -1);
b8ff05a9 589 } else {
ab677ff4 590 struct sge_uld_txq *oq;
b8ff05a9 591
ab677ff4 592 oq = container_of(txq, struct sge_uld_txq, q);
b8ff05a9
DM
593 tasklet_schedule(&oq->qresume_tsk);
594 }
595 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
596 const struct cpl_fw6_msg *p = (void *)rsp;
597
688848b1
AB
598#ifdef CONFIG_CHELSIO_T4_DCB
599 const struct fw_port_cmd *pcmd = (const void *)p->data;
e2ac9628 600 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
688848b1 601 unsigned int action =
2b5fb1f2 602 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
688848b1
AB
603
604 if (cmd == FW_PORT_CMD &&
c3168cab
GG
605 (action == FW_PORT_ACTION_GET_PORT_INFO ||
606 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
2b5fb1f2 607 int port = FW_PORT_CMD_PORTID_G(
688848b1 608 be32_to_cpu(pcmd->op_to_portid));
c3168cab
GG
609 struct net_device *dev;
610 int dcbxdis, state_input;
611
612 dev = q->adap->port[q->adap->chan_map[port]];
613 dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
90d4c5bb
GG
614 ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
615 : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
616 & FW_PORT_CMD_DCBXDIS32_F));
c3168cab
GG
617 state_input = (dcbxdis
618 ? CXGB4_DCB_INPUT_FW_DISABLED
619 : CXGB4_DCB_INPUT_FW_ENABLED);
688848b1
AB
620
621 cxgb4_dcb_state_fsm(dev, state_input);
622 }
623
624 if (cmd == FW_PORT_CMD &&
625 action == FW_PORT_ACTION_L2_DCB_CFG)
626 dcb_rpl(q->adap, pcmd);
627 else
628#endif
629 if (p->type == 0)
630 t4_handle_fw_rpl(q->adap, p->data);
b8ff05a9
DM
631 } else if (opcode == CPL_L2T_WRITE_RPL) {
632 const struct cpl_l2t_write_rpl *p = (void *)rsp;
633
634 do_l2t_write_rpl(q->adap, p);
3bdb376e
KS
635 } else if (opcode == CPL_SMT_WRITE_RPL) {
636 const struct cpl_smt_write_rpl *p = (void *)rsp;
637
638 do_smt_write_rpl(q->adap, p);
f2b7e78d
VP
639 } else if (opcode == CPL_SET_TCB_RPL) {
640 const struct cpl_set_tcb_rpl *p = (void *)rsp;
641
642 filter_rpl(q->adap, p);
12b276fb
KS
643 } else if (opcode == CPL_ACT_OPEN_RPL) {
644 const struct cpl_act_open_rpl *p = (void *)rsp;
645
646 hash_filter_rpl(q->adap, p);
3b0b3bee
KS
647 } else if (opcode == CPL_ABORT_RPL_RSS) {
648 const struct cpl_abort_rpl_rss *p = (void *)rsp;
649
650 hash_del_filter_rpl(q->adap, p);
c68644ef
RR
651 } else if (opcode == CPL_SRQ_TABLE_RPL) {
652 const struct cpl_srq_table_rpl *p = (void *)rsp;
653
654 do_srq_table_rpl(q->adap, p);
b8ff05a9
DM
655 } else
656 dev_err(q->adap->pdev_dev,
657 "unexpected CPL %#x on FW event queue\n", opcode);
b407a4a9 658out:
b8ff05a9
DM
659 return 0;
660}
661
b8ff05a9
DM
662static void disable_msi(struct adapter *adapter)
663{
80f61f19 664 if (adapter->flags & CXGB4_USING_MSIX) {
b8ff05a9 665 pci_disable_msix(adapter->pdev);
80f61f19
AV
666 adapter->flags &= ~CXGB4_USING_MSIX;
667 } else if (adapter->flags & CXGB4_USING_MSI) {
b8ff05a9 668 pci_disable_msi(adapter->pdev);
80f61f19 669 adapter->flags &= ~CXGB4_USING_MSI;
b8ff05a9
DM
670 }
671}
672
673/*
674 * Interrupt handler for non-data events used with MSI-X.
675 */
676static irqreturn_t t4_nondata_intr(int irq, void *cookie)
677{
678 struct adapter *adap = cookie;
0d804338 679 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
b8ff05a9 680
0d804338 681 if (v & PFSW_F) {
b8ff05a9 682 adap->swintr = 1;
0d804338 683 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
b8ff05a9 684 }
80f61f19 685 if (adap->flags & CXGB4_MASTER_PF)
c3c7b121 686 t4_slow_intr_handler(adap);
b8ff05a9
DM
687 return IRQ_HANDLED;
688}
689
c9765074
NK
690int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
691 cpumask_var_t *aff_mask, int idx)
692{
693 int rv;
694
695 if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
696 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
697 return -ENOMEM;
698 }
699
700 cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
701 *aff_mask);
702
703 rv = irq_set_affinity_hint(vec, *aff_mask);
704 if (rv)
705 dev_warn(adap->pdev_dev,
706 "irq_set_affinity_hint %u failed %d\n",
707 vec, rv);
708
709 return 0;
710}
711
712void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
713{
714 irq_set_affinity_hint(vec, NULL);
715 free_cpumask_var(aff_mask);
716}
717
b8ff05a9
DM
718static int request_msix_queue_irqs(struct adapter *adap)
719{
720 struct sge *s = &adap->sge;
c9765074 721 struct msix_info *minfo;
0fbc81b3 722 int err, ethqidx;
b8ff05a9 723
76c3a552
RL
724 if (s->fwevtq_msix_idx < 0)
725 return -ENOMEM;
726
727 err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
728 t4_sge_intr_msix, 0,
729 adap->msix_info[s->fwevtq_msix_idx].desc,
730 &s->fw_evtq);
b8ff05a9
DM
731 if (err)
732 return err;
733
734 for_each_ethrxq(s, ethqidx) {
76c3a552 735 minfo = s->ethrxq[ethqidx].msix;
c9765074 736 err = request_irq(minfo->vec,
404d9e3f 737 t4_sge_intr_msix, 0,
c9765074 738 minfo->desc,
b8ff05a9
DM
739 &s->ethrxq[ethqidx].rspq);
740 if (err)
741 goto unwind;
c9765074
NK
742
743 cxgb4_set_msix_aff(adap, minfo->vec,
744 &minfo->aff_mask, ethqidx);
b8ff05a9 745 }
b8ff05a9
DM
746 return 0;
747
748unwind:
c9765074 749 while (--ethqidx >= 0) {
76c3a552 750 minfo = s->ethrxq[ethqidx].msix;
c9765074
NK
751 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
752 free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
753 }
76c3a552 754 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
b8ff05a9
DM
755 return err;
756}
757
758static void free_msix_queue_irqs(struct adapter *adap)
759{
b8ff05a9 760 struct sge *s = &adap->sge;
c9765074 761 struct msix_info *minfo;
76c3a552 762 int i;
b8ff05a9 763
76c3a552 764 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
c9765074 765 for_each_ethrxq(s, i) {
76c3a552 766 minfo = s->ethrxq[i].msix;
c9765074
NK
767 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
768 free_irq(minfo->vec, &s->ethrxq[i].rspq);
769 }
b8ff05a9
DM
770}
771
a248384e
VP
772static int setup_ppod_edram(struct adapter *adap)
773{
774 unsigned int param, val;
775 int ret;
776
777 /* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check
778 * if firmware supports ppod edram feature or not. If firmware
779 * returns 1, then driver can enable this feature by sending
780 * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to
781 * enable ppod edram feature.
782 */
783 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
784 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));
785
786 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
787 if (ret < 0) {
788 dev_warn(adap->pdev_dev,
789 "querying PPOD_EDRAM support failed: %d\n",
790 ret);
791 return -1;
792 }
793
794 if (val != 1)
795 return -1;
796
797 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
798 if (ret < 0) {
799 dev_err(adap->pdev_dev,
800 "setting PPOD_EDRAM failed: %d\n", ret);
801 return -1;
802 }
803 return 0;
804}
805
c2193999
SAH
806static void adap_config_hpfilter(struct adapter *adapter)
807{
808 u32 param, val = 0;
809 int ret;
810
811 /* Enable HP filter region. Older fw will fail this request and
812 * it is fine.
813 */
814 param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
815 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
816 1, &param, &val);
817
818 /* An error means FW doesn't know about HP filter support,
819 * it's not a problem, don't return an error.
820 */
821 if (ret < 0)
822 dev_err(adapter->pdev_dev,
823 "HP filter region isn't supported by FW\n");
824}
825
2b465ed0
RL
826static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
827 u16 rss_size, u16 viid)
828{
829 struct adapter *adap = pi->adapter;
830 int ret;
831
832 ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
833 rss_size);
834 if (ret)
835 return ret;
836
837 /* If Tunnel All Lookup isn't specified in the global RSS
838 * Configuration, then we need to specify a default Ingress
839 * Queue for any ingress packets which aren't hashed. We'll
840 * use our first ingress queue ...
841 */
842 return t4_config_vi_rss(adap, adap->mbox, viid,
843 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
844 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
845 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
846 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
847 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
848 rss[0]);
849}
850
671b0060 851/**
812034f1 852 * cxgb4_write_rss - write the RSS table for a given port
671b0060
DM
853 * @pi: the port
854 * @queues: array of queue indices for RSS
855 *
856 * Sets up the portion of the HW RSS table for the port's VI to distribute
857 * packets to the Rx queues in @queues.
c035e183 858 * Should never be called before setting up sge eth rx queues
671b0060 859 */
812034f1 860int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
671b0060 861{
c035e183
HS
862 struct adapter *adapter = pi->adapter;
863 const struct sge_eth_rxq *rxq;
2b465ed0
RL
864 int i, err;
865 u16 *rss;
671b0060 866
c035e183 867 rxq = &adapter->sge.ethrxq[pi->first_qset];
6da2ec56 868 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
671b0060
DM
869 if (!rss)
870 return -ENOMEM;
871
872 /* map the queue indices to queue ids */
873 for (i = 0; i < pi->rss_size; i++, queues++)
c035e183 874 rss[i] = rxq[*queues].rspq.abs_id;
671b0060 875
2b465ed0 876 err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
671b0060
DM
877 kfree(rss);
878 return err;
879}
880
b8ff05a9
DM
881/**
882 * setup_rss - configure RSS
883 * @adap: the adapter
884 *
671b0060 885 * Sets up RSS for each port.
b8ff05a9
DM
886 */
887static int setup_rss(struct adapter *adap)
888{
c035e183 889 int i, j, err;
b8ff05a9
DM
890
891 for_each_port(adap, i) {
892 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 893
c035e183
HS
894 /* Fill default values with equal distribution */
895 for (j = 0; j < pi->rss_size; j++)
896 pi->rss[j] = j % pi->nqsets;
897
812034f1 898 err = cxgb4_write_rss(pi, pi->rss);
b8ff05a9
DM
899 if (err)
900 return err;
901 }
902 return 0;
903}
904
e46dab4d
DM
905/*
906 * Return the channel of the ingress queue with the given qid.
907 */
908static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
909{
910 qid -= p->ingr_start;
911 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
912}
913
2d0cb84d
RL
914void cxgb4_quiesce_rx(struct sge_rspq *q)
915{
916 if (q->handler)
917 napi_disable(&q->napi);
918}
919
b8ff05a9
DM
920/*
921 * Wait until all NAPI handlers are descheduled.
922 */
923static void quiesce_rx(struct adapter *adap)
924{
925 int i;
926
4b8e27a8 927 for (i = 0; i < adap->sge.ingr_sz; i++) {
b8ff05a9
DM
928 struct sge_rspq *q = adap->sge.ingr_map[i];
929
2d0cb84d
RL
930 if (!q)
931 continue;
932
933 cxgb4_quiesce_rx(q);
b8ff05a9
DM
934 }
935}
936
b37987e8
HS
937/* Disable interrupt and napi handler */
938static void disable_interrupts(struct adapter *adap)
939{
76c3a552
RL
940 struct sge *s = &adap->sge;
941
80f61f19 942 if (adap->flags & CXGB4_FULL_INIT_DONE) {
b37987e8 943 t4_intr_disable(adap);
80f61f19 944 if (adap->flags & CXGB4_USING_MSIX) {
b37987e8 945 free_msix_queue_irqs(adap);
76c3a552
RL
946 free_irq(adap->msix_info[s->nd_msix_idx].vec,
947 adap);
b37987e8
HS
948 } else {
949 free_irq(adap->pdev->irq, adap);
950 }
951 quiesce_rx(adap);
952 }
953}
954
2d0cb84d
RL
955void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
956{
957 if (q->handler)
958 napi_enable(&q->napi);
959
960 /* 0-increment GTS to start the timer and enable interrupts */
961 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
962 SEINTARM_V(q->intr_params) |
963 INGRESSQID_V(q->cntxt_id));
964}
965
b8ff05a9
DM
966/*
967 * Enable NAPI scheduling and interrupt generation for all Rx queues.
968 */
969static void enable_rx(struct adapter *adap)
970{
971 int i;
972
4b8e27a8 973 for (i = 0; i < adap->sge.ingr_sz; i++) {
b8ff05a9
DM
974 struct sge_rspq *q = adap->sge.ingr_map[i];
975
976 if (!q)
977 continue;
5226b791 978
2d0cb84d 979 cxgb4_enable_rx(adap, q);
b8ff05a9
DM
980 }
981}
982
76c3a552
RL
983static int setup_non_data_intr(struct adapter *adap)
984{
985 int msix;
986
987 adap->sge.nd_msix_idx = -1;
988 if (!(adap->flags & CXGB4_USING_MSIX))
989 return 0;
990
991 /* Request MSI-X vector for non-data interrupt */
992 msix = cxgb4_get_msix_idx_from_bmap(adap);
993 if (msix < 0)
994 return -ENOMEM;
995
996 snprintf(adap->msix_info[msix].desc,
997 sizeof(adap->msix_info[msix].desc),
998 "%s", adap->port[0]->name);
999
1000 adap->sge.nd_msix_idx = msix;
1001 return 0;
1002}
1c6a5b0e 1003
0fbc81b3 1004static int setup_fw_sge_queues(struct adapter *adap)
b8ff05a9 1005{
b8ff05a9 1006 struct sge *s = &adap->sge;
76c3a552 1007 int msix, err = 0;
b8ff05a9 1008
4b8e27a8
HS
1009 bitmap_zero(s->starving_fl, s->egr_sz);
1010 bitmap_zero(s->txq_maperr, s->egr_sz);
b8ff05a9 1011
76c3a552
RL
1012 if (adap->flags & CXGB4_USING_MSIX) {
1013 s->fwevtq_msix_idx = -1;
1014 msix = cxgb4_get_msix_idx_from_bmap(adap);
1015 if (msix < 0)
1016 return -ENOMEM;
1017
1018 snprintf(adap->msix_info[msix].desc,
1019 sizeof(adap->msix_info[msix].desc),
1020 "%s-FWeventq", adap->port[0]->name);
1021 } else {
b8ff05a9 1022 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
2337ba42 1023 NULL, NULL, NULL, -1);
b8ff05a9
DM
1024 if (err)
1025 return err;
76c3a552 1026 msix = -((int)s->intrq.abs_id + 1);
b8ff05a9
DM
1027 }
1028
1029 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
76c3a552
RL
1030 msix, NULL, fwevtq_handler, NULL, -1);
1031 if (err && msix >= 0)
1032 cxgb4_free_msix_idx_in_bmap(adap, msix);
1033
1034 s->fwevtq_msix_idx = msix;
0fbc81b3
HS
1035 return err;
1036}
1037
1038/**
1039 * setup_sge_queues - configure SGE Tx/Rx/response queues
1040 * @adap: the adapter
1041 *
1042 * Determines how many sets of SGE queues to use and initializes them.
1043 * We support multiple queue sets per port if we have MSI-X, otherwise
1044 * just one queue set per port.
1045 */
1046static int setup_sge_queues(struct adapter *adap)
1047{
d427caee 1048 struct sge_uld_rxq_info *rxq_info = NULL;
76c3a552 1049 struct sge *s = &adap->sge;
0fbc81b3 1050 unsigned int cmplqid = 0;
76c3a552 1051 int err, i, j, msix = 0;
b8ff05a9 1052
d427caee
GG
1053 if (is_uld(adap))
1054 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
1055
76c3a552
RL
1056 if (!(adap->flags & CXGB4_USING_MSIX))
1057 msix = -((int)s->intrq.abs_id + 1);
1058
b8ff05a9
DM
1059 for_each_port(adap, i) {
1060 struct net_device *dev = adap->port[i];
1061 struct port_info *pi = netdev_priv(dev);
1062 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1063 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1064
1065 for (j = 0; j < pi->nqsets; j++, q++) {
76c3a552
RL
1066 if (msix >= 0) {
1067 msix = cxgb4_get_msix_idx_from_bmap(adap);
1068 if (msix < 0) {
1069 err = msix;
1070 goto freeout;
1071 }
1072
1073 snprintf(adap->msix_info[msix].desc,
1074 sizeof(adap->msix_info[msix].desc),
1075 "%s-Rx%d", dev->name, j);
1076 q->msix = &adap->msix_info[msix];
1077 }
1078
b8ff05a9 1079 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
76c3a552 1080 msix, &q->fl,
145ef8a5 1081 t4_ethrx_handler,
2337ba42 1082 NULL,
193c4c28
AV
1083 t4_get_tp_ch_map(adap,
1084 pi->tx_chan));
b8ff05a9
DM
1085 if (err)
1086 goto freeout;
1087 q->rspq.idx = j;
1088 memset(&q->stats, 0, sizeof(q->stats));
1089 }
d429005f
VK
1090
1091 q = &s->ethrxq[pi->first_qset];
1092 for (j = 0; j < pi->nqsets; j++, t++, q++) {
b8ff05a9
DM
1093 err = t4_sge_alloc_eth_txq(adap, t, dev,
1094 netdev_get_tx_queue(dev, j),
d429005f 1095 q->rspq.cntxt_id,
80f61f19 1096 !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
b8ff05a9
DM
1097 if (err)
1098 goto freeout;
1099 }
1100 }
1101
b8ff05a9 1102 for_each_port(adap, i) {
0fbc81b3 1103 /* Note that cmplqid below is 0 if we don't
b8ff05a9
DM
1104 * have RDMA queues, and that's the right value.
1105 */
0fbc81b3
HS
1106 if (rxq_info)
1107 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
1108
b8ff05a9 1109 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
0fbc81b3 1110 s->fw_evtq.cntxt_id, cmplqid);
b8ff05a9
DM
1111 if (err)
1112 goto freeout;
1113 }
1114
a4569504
AG
1115 if (!is_t4(adap->params.chip)) {
1116 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
1117 netdev_get_tx_queue(adap->port[0], 0)
d429005f 1118 , s->fw_evtq.cntxt_id, false);
a4569504
AG
1119 if (err)
1120 goto freeout;
1121 }
1122
9bb59b96 1123 t4_write_reg(adap, is_t4(adap->params.chip) ?
837e4a42
HS
1124 MPS_TRC_RSS_CONTROL_A :
1125 MPS_T5_TRC_RSS_CONTROL_A,
1126 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1127 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
b8ff05a9 1128 return 0;
0fbc81b3 1129freeout:
0eaec62a 1130 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
0fbc81b3
HS
1131 t4_free_sge_resources(adap);
1132 return err;
b8ff05a9
DM
1133}
1134
688848b1 1135static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
a350ecce 1136 struct net_device *sb_dev)
688848b1
AB
1137{
1138 int txq;
1139
1140#ifdef CONFIG_CHELSIO_T4_DCB
1141 /* If a Data Center Bridging has been successfully negotiated on this
1142 * link then we'll use the skb's priority to map it to a TX Queue.
1143 * The skb's priority is determined via the VLAN Tag Priority Code
1144 * Point field.
1145 */
85eacf3f 1146 if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
688848b1
AB
1147 u16 vlan_tci;
1148 int err;
1149
1150 err = vlan_get_tag(skb, &vlan_tci);
1151 if (unlikely(err)) {
1152 if (net_ratelimit())
1153 netdev_warn(dev,
1154 "TX Packet without VLAN Tag on DCB Link\n");
1155 txq = 0;
1156 } else {
1157 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
84a200b3
VP
1158#ifdef CONFIG_CHELSIO_T4_FCOE
1159 if (skb->protocol == htons(ETH_P_FCOE))
1160 txq = skb->priority & 0x7;
1161#endif /* CONFIG_CHELSIO_T4_FCOE */
688848b1
AB
1162 }
1163 return txq;
1164 }
1165#endif /* CONFIG_CHELSIO_T4_DCB */
1166
b1396c2b
RL
1167 if (dev->num_tc) {
1168 struct port_info *pi = netdev2pinfo(dev);
1a2a14fb
RL
1169 u8 ver, proto;
1170
1171 ver = ip_hdr(skb)->version;
1172 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
1173 ip_hdr(skb)->protocol;
b1396c2b
RL
1174
1175 /* Send unsupported traffic pattern to normal NIC queues. */
1176 txq = netdev_pick_tx(dev, skb, sb_dev);
1177 if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
1a2a14fb 1178 skb->encapsulation ||
9d2e5e9e 1179 cxgb4_is_ktls_skb(skb) ||
1a2a14fb 1180 (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
b1396c2b
RL
1181 txq = txq % pi->nqsets;
1182
1183 return txq;
1184 }
1185
688848b1
AB
1186 if (select_queue) {
1187 txq = (skb_rx_queue_recorded(skb)
1188 ? skb_get_rx_queue(skb)
1189 : smp_processor_id());
1190
1191 while (unlikely(txq >= dev->real_num_tx_queues))
1192 txq -= dev->real_num_tx_queues;
1193
1194 return txq;
1195 }
1196
a350ecce 1197 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
688848b1
AB
1198}
1199
b8ff05a9
DM
1200static int closest_timer(const struct sge *s, int time)
1201{
1202 int i, delta, match = 0, min_delta = INT_MAX;
1203
1204 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1205 delta = time - s->timer_val[i];
1206 if (delta < 0)
1207 delta = -delta;
1208 if (delta < min_delta) {
1209 min_delta = delta;
1210 match = i;
1211 }
1212 }
1213 return match;
1214}
1215
1216static int closest_thres(const struct sge *s, int thres)
1217{
1218 int i, delta, match = 0, min_delta = INT_MAX;
1219
1220 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1221 delta = thres - s->counter_val[i];
1222 if (delta < 0)
1223 delta = -delta;
1224 if (delta < min_delta) {
1225 min_delta = delta;
1226 match = i;
1227 }
1228 }
1229 return match;
1230}
1231
b8ff05a9 1232/**
812034f1 1233 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
b8ff05a9
DM
1234 * @q: the Rx queue
1235 * @us: the hold-off time in us, or 0 to disable timer
1236 * @cnt: the hold-off packet count, or 0 to disable counter
1237 *
1238 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1239 * one of the two needs to be enabled for the queue to generate interrupts.
1240 */
812034f1
HS
1241int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1242 unsigned int us, unsigned int cnt)
b8ff05a9 1243{
c887ad0e
HS
1244 struct adapter *adap = q->adap;
1245
b8ff05a9
DM
1246 if ((us | cnt) == 0)
1247 cnt = 1;
1248
1249 if (cnt) {
1250 int err;
1251 u32 v, new_idx;
1252
1253 new_idx = closest_thres(&adap->sge, cnt);
1254 if (q->desc && q->pktcnt_idx != new_idx) {
1255 /* the queue has already been created, update it */
5167865a
HS
1256 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1257 FW_PARAMS_PARAM_X_V(
1258 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1259 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
b2612722
HS
1260 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1261 &v, &new_idx);
b8ff05a9
DM
1262 if (err)
1263 return err;
1264 }
1265 q->pktcnt_idx = new_idx;
1266 }
1267
1268 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1ecc7b7a 1269 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
b8ff05a9
DM
1270 return 0;
1271}
1272
c8f44aff 1273static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 1274{
c8f44aff 1275 netdev_features_t changed = dev->features ^ features;
696c278f 1276 const struct port_info *pi = netdev_priv(dev);
19ecae2c 1277 int err;
19ecae2c 1278
f646968f 1279 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2ed28baa 1280 return 0;
19ecae2c 1281
696c278f
RL
1282 err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
1283 pi->viid_mirror, -1, -1, -1, -1,
f646968f 1284 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2ed28baa 1285 if (unlikely(err))
f646968f 1286 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
19ecae2c 1287 return err;
87b6cf51
DM
1288}
1289
91744948 1290static int setup_debugfs(struct adapter *adap)
b8ff05a9 1291{
b8ff05a9
DM
1292 if (IS_ERR_OR_NULL(adap->debugfs_root))
1293 return -1;
1294
fd88b31a
HS
1295#ifdef CONFIG_DEBUG_FS
1296 t4_setup_debugfs(adap);
1297#endif
b8ff05a9
DM
1298 return 0;
1299}
1300
2b465ed0
RL
1301static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
1302 struct sge_eth_rxq *mirror_rxq)
1303{
1304 if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
1305 !(adap->flags & CXGB4_SHUTTING_DOWN))
1306 cxgb4_quiesce_rx(&mirror_rxq->rspq);
1307
1308 if (adap->flags & CXGB4_USING_MSIX) {
1309 cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
1310 mirror_rxq->msix->aff_mask);
1311 free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
1312 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1313 }
1314
1315 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1316}
1317
1318static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
1319{
1320 struct port_info *pi = netdev2pinfo(dev);
1321 struct adapter *adap = netdev2adap(dev);
1322 struct sge_eth_rxq *mirror_rxq;
1323 struct sge *s = &adap->sge;
1324 int ret = 0, msix = 0;
1325 u16 i, rxqid;
1326 u16 *rss;
1327
1328 if (!pi->vi_mirror_count)
1329 return 0;
1330
1331 if (s->mirror_rxq[pi->port_id])
1332 return 0;
1333
1334 mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
1335 if (!mirror_rxq)
1336 return -ENOMEM;
1337
1338 s->mirror_rxq[pi->port_id] = mirror_rxq;
1339
1340 if (!(adap->flags & CXGB4_USING_MSIX))
1341 msix = -((int)adap->sge.intrq.abs_id + 1);
1342
1343 for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
1344 mirror_rxq = &s->mirror_rxq[pi->port_id][i];
1345
1346 /* Allocate Mirror Rxqs */
1347 if (msix >= 0) {
1348 msix = cxgb4_get_msix_idx_from_bmap(adap);
1349 if (msix < 0) {
1350 ret = msix;
1351 goto out_free_queues;
1352 }
1353
1354 mirror_rxq->msix = &adap->msix_info[msix];
1355 snprintf(mirror_rxq->msix->desc,
1356 sizeof(mirror_rxq->msix->desc),
1357 "%s-mirrorrxq%d", dev->name, i);
1358 }
1359
1360 init_rspq(adap, &mirror_rxq->rspq,
1361 CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC,
1362 CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT,
1363 CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM,
1364 CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE);
1365
1366 mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;
1367
1368 ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
1369 dev, msix, &mirror_rxq->fl,
1370 t4_ethrx_handler, NULL, 0);
1371 if (ret)
1372 goto out_free_msix_idx;
1373
1374 /* Setup MSI-X vectors for Mirror Rxqs */
1375 if (adap->flags & CXGB4_USING_MSIX) {
1376 ret = request_irq(mirror_rxq->msix->vec,
1377 t4_sge_intr_msix, 0,
1378 mirror_rxq->msix->desc,
1379 &mirror_rxq->rspq);
1380 if (ret)
1381 goto out_free_rxq;
1382
1383 cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
1384 &mirror_rxq->msix->aff_mask, i);
1385 }
1386
1387 /* Start NAPI for Mirror Rxqs */
1388 cxgb4_enable_rx(adap, &mirror_rxq->rspq);
1389 }
1390
1391 /* Setup RSS for Mirror Rxqs */
1392 rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
1393 if (!rss) {
1394 ret = -ENOMEM;
1395 goto out_free_queues;
1396 }
1397
1398 mirror_rxq = &s->mirror_rxq[pi->port_id][0];
1399 for (i = 0; i < pi->rss_size; i++)
1400 rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;
1401
1402 ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
1403 kfree(rss);
1404 if (ret)
1405 goto out_free_queues;
1406
1407 return 0;
1408
1409out_free_rxq:
1410 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1411
1412out_free_msix_idx:
1413 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1414
1415out_free_queues:
1416 while (rxqid-- > 0)
1417 cxgb4_port_mirror_free_rxq(adap,
1418 &s->mirror_rxq[pi->port_id][rxqid]);
1419
1420 kfree(s->mirror_rxq[pi->port_id]);
1421 s->mirror_rxq[pi->port_id] = NULL;
1422 return ret;
1423}
1424
1425static void cxgb4_port_mirror_free_queues(struct net_device *dev)
1426{
1427 struct port_info *pi = netdev2pinfo(dev);
1428 struct adapter *adap = netdev2adap(dev);
1429 struct sge *s = &adap->sge;
1430 u16 i;
1431
1432 if (!pi->vi_mirror_count)
1433 return;
1434
1435 if (!s->mirror_rxq[pi->port_id])
1436 return;
1437
1438 for (i = 0; i < pi->nmirrorqsets; i++)
1439 cxgb4_port_mirror_free_rxq(adap,
1440 &s->mirror_rxq[pi->port_id][i]);
1441
1442 kfree(s->mirror_rxq[pi->port_id]);
1443 s->mirror_rxq[pi->port_id] = NULL;
1444}
1445
696c278f
RL
1446static int cxgb4_port_mirror_start(struct net_device *dev)
1447{
1448 struct port_info *pi = netdev2pinfo(dev);
1449 struct adapter *adap = netdev2adap(dev);
1450 int ret, idx = -1;
1451
1452 if (!pi->vi_mirror_count)
1453 return 0;
1454
1455 /* Mirror VIs can be created dynamically after stack had
1456 * already setup Rx modes like MTU, promisc, allmulti, etc.
1457 * on main VI. So, parse what the stack had setup on the
1458 * main VI and update the same on the mirror VI.
1459 */
1460 ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
1461 dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
1462 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
1463 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
1464 if (ret) {
1465 dev_err(adap->pdev_dev,
1466 "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
1467 pi->viid_mirror, ret);
1468 return ret;
1469 }
1470
1471 /* Enable replication bit for the device's MAC address
1472 * in MPS TCAM, so that the packets for the main VI are
1473 * replicated to mirror VI.
1474 */
1475 ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
1476 dev->dev_addr, true, NULL);
1477 if (ret) {
1478 dev_err(adap->pdev_dev,
1479 "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
1480 pi->viid_mirror, ret);
1481 return ret;
1482 }
1483
1484 /* Enabling a Virtual Interface can result in an interrupt
1485 * during the processing of the VI Enable command and, in some
1486 * paths, result in an attempt to issue another command in the
1487 * interrupt context. Thus, we disable interrupts during the
1488 * course of the VI Enable command ...
1489 */
1490 local_bh_disable();
1491 ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
1492 false);
1493 local_bh_enable();
1494 if (ret)
1495 dev_err(adap->pdev_dev,
1496 "Failed starting Mirror VI 0x%x, ret: %d\n",
1497 pi->viid_mirror, ret);
1498
1499 return ret;
1500}
1501
1502static void cxgb4_port_mirror_stop(struct net_device *dev)
1503{
1504 struct port_info *pi = netdev2pinfo(dev);
1505 struct adapter *adap = netdev2adap(dev);
1506
1507 if (!pi->vi_mirror_count)
1508 return;
1509
1510 t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
1511 false);
1512}
1513
fd2261d8
RL
1514int cxgb4_port_mirror_alloc(struct net_device *dev)
1515{
1516 struct port_info *pi = netdev2pinfo(dev);
1517 struct adapter *adap = netdev2adap(dev);
1518 int ret = 0;
1519
1520 if (!pi->nmirrorqsets)
1521 return -EOPNOTSUPP;
1522
1523 mutex_lock(&pi->vi_mirror_mutex);
1524 if (pi->viid_mirror) {
1525 pi->vi_mirror_count++;
1526 goto out_unlock;
1527 }
1528
1529 ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
1530 &pi->viid_mirror);
1531 if (ret)
1532 goto out_unlock;
1533
1534 pi->vi_mirror_count = 1;
1535
2b465ed0
RL
1536 if (adap->flags & CXGB4_FULL_INIT_DONE) {
1537 ret = cxgb4_port_mirror_alloc_queues(dev);
1538 if (ret)
1539 goto out_free_vi;
696c278f
RL
1540
1541 ret = cxgb4_port_mirror_start(dev);
1542 if (ret)
1543 goto out_free_queues;
2b465ed0
RL
1544 }
1545
1546 mutex_unlock(&pi->vi_mirror_mutex);
1547 return 0;
1548
696c278f
RL
1549out_free_queues:
1550 cxgb4_port_mirror_free_queues(dev);
1551
2b465ed0
RL
1552out_free_vi:
1553 pi->vi_mirror_count = 0;
1554 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1555 pi->viid_mirror = 0;
1556
fd2261d8
RL
1557out_unlock:
1558 mutex_unlock(&pi->vi_mirror_mutex);
1559 return ret;
1560}
1561
1562void cxgb4_port_mirror_free(struct net_device *dev)
1563{
1564 struct port_info *pi = netdev2pinfo(dev);
1565 struct adapter *adap = netdev2adap(dev);
1566
1567 mutex_lock(&pi->vi_mirror_mutex);
1568 if (!pi->viid_mirror)
1569 goto out_unlock;
1570
1571 if (pi->vi_mirror_count > 1) {
1572 pi->vi_mirror_count--;
1573 goto out_unlock;
1574 }
1575
696c278f 1576 cxgb4_port_mirror_stop(dev);
2b465ed0
RL
1577 cxgb4_port_mirror_free_queues(dev);
1578
fd2261d8
RL
1579 pi->vi_mirror_count = 0;
1580 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1581 pi->viid_mirror = 0;
1582
1583out_unlock:
1584 mutex_unlock(&pi->vi_mirror_mutex);
1585}
1586
b8ff05a9
DM
1587/*
1588 * upper-layer driver support
1589 */
1590
1591/*
1592 * Allocate an active-open TID and set it to the supplied value.
1593 */
1594int cxgb4_alloc_atid(struct tid_info *t, void *data)
1595{
1596 int atid = -1;
1597
1598 spin_lock_bh(&t->atid_lock);
1599 if (t->afree) {
1600 union aopen_entry *p = t->afree;
1601
f2b7e78d 1602 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
1603 t->afree = p->next;
1604 p->data = data;
1605 t->atids_in_use++;
1606 }
1607 spin_unlock_bh(&t->atid_lock);
1608 return atid;
1609}
1610EXPORT_SYMBOL(cxgb4_alloc_atid);
1611
1612/*
1613 * Release an active-open TID.
1614 */
1615void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1616{
f2b7e78d 1617 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
1618
1619 spin_lock_bh(&t->atid_lock);
1620 p->next = t->afree;
1621 t->afree = p;
1622 t->atids_in_use--;
1623 spin_unlock_bh(&t->atid_lock);
1624}
1625EXPORT_SYMBOL(cxgb4_free_atid);
1626
1627/*
1628 * Allocate a server TID and set it to the supplied value.
1629 */
1630int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1631{
1632 int stid;
1633
1634 spin_lock_bh(&t->stid_lock);
1635 if (family == PF_INET) {
1636 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1637 if (stid < t->nstids)
1638 __set_bit(stid, t->stid_bmap);
1639 else
1640 stid = -1;
1641 } else {
a99c683e 1642 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
b8ff05a9
DM
1643 if (stid < 0)
1644 stid = -1;
1645 }
1646 if (stid >= 0) {
1647 t->stid_tab[stid].data = data;
1648 stid += t->stid_base;
15f63b74
KS
1649 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1650 * This is equivalent to 4 TIDs. With CLIP enabled it
1651 * needs 2 TIDs.
1652 */
1dec4cec 1653 if (family == PF_INET6) {
a99c683e 1654 t->stids_in_use += 2;
1dec4cec
GG
1655 t->v6_stids_in_use += 2;
1656 } else {
1657 t->stids_in_use++;
1658 }
b8ff05a9
DM
1659 }
1660 spin_unlock_bh(&t->stid_lock);
1661 return stid;
1662}
1663EXPORT_SYMBOL(cxgb4_alloc_stid);
1664
dca4faeb
VP
1665/* Allocate a server filter TID and set it to the supplied value.
1666 */
1667int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1668{
1669 int stid;
1670
1671 spin_lock_bh(&t->stid_lock);
1672 if (family == PF_INET) {
1673 stid = find_next_zero_bit(t->stid_bmap,
1674 t->nstids + t->nsftids, t->nstids);
1675 if (stid < (t->nstids + t->nsftids))
1676 __set_bit(stid, t->stid_bmap);
1677 else
1678 stid = -1;
1679 } else {
1680 stid = -1;
1681 }
1682 if (stid >= 0) {
1683 t->stid_tab[stid].data = data;
470c60c4
KS
1684 stid -= t->nstids;
1685 stid += t->sftid_base;
2248b293 1686 t->sftids_in_use++;
dca4faeb
VP
1687 }
1688 spin_unlock_bh(&t->stid_lock);
1689 return stid;
1690}
1691EXPORT_SYMBOL(cxgb4_alloc_sftid);
1692
1693/* Release a server TID.
b8ff05a9
DM
1694 */
1695void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1696{
470c60c4
KS
1697 /* Is it a server filter TID? */
1698 if (t->nsftids && (stid >= t->sftid_base)) {
1699 stid -= t->sftid_base;
1700 stid += t->nstids;
1701 } else {
1702 stid -= t->stid_base;
1703 }
1704
b8ff05a9
DM
1705 spin_lock_bh(&t->stid_lock);
1706 if (family == PF_INET)
1707 __clear_bit(stid, t->stid_bmap);
1708 else
a99c683e 1709 bitmap_release_region(t->stid_bmap, stid, 1);
b8ff05a9 1710 t->stid_tab[stid].data = NULL;
2248b293 1711 if (stid < t->nstids) {
1dec4cec 1712 if (family == PF_INET6) {
a99c683e 1713 t->stids_in_use -= 2;
1dec4cec
GG
1714 t->v6_stids_in_use -= 2;
1715 } else {
1716 t->stids_in_use--;
1717 }
2248b293
HS
1718 } else {
1719 t->sftids_in_use--;
1720 }
1dec4cec 1721
b8ff05a9
DM
1722 spin_unlock_bh(&t->stid_lock);
1723}
1724EXPORT_SYMBOL(cxgb4_free_stid);
1725
1726/*
1727 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1728 */
1729static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1730 unsigned int tid)
1731{
1732 struct cpl_tid_release *req;
1733
1734 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
4df864c1 1735 req = __skb_put(skb, sizeof(*req));
b8ff05a9
DM
1736 INIT_TP_WR(req, tid);
1737 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1738}
1739
1740/*
1741 * Queue a TID release request and if necessary schedule a work queue to
1742 * process it.
1743 */
31b9c19b 1744static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1745 unsigned int tid)
b8ff05a9 1746{
b8ff05a9 1747 struct adapter *adap = container_of(t, struct adapter, tids);
59437d78 1748 void **p = &t->tid_tab[tid - t->tid_base];
b8ff05a9
DM
1749
1750 spin_lock_bh(&adap->tid_release_lock);
1751 *p = adap->tid_release_head;
1752 /* Low 2 bits encode the Tx channel number */
1753 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1754 if (!adap->tid_release_task_busy) {
1755 adap->tid_release_task_busy = true;
29aaee65 1756 queue_work(adap->workq, &adap->tid_release_task);
b8ff05a9
DM
1757 }
1758 spin_unlock_bh(&adap->tid_release_lock);
1759}
b8ff05a9
DM
1760
1761/*
1762 * Process the list of pending TID release requests.
1763 */
1764static void process_tid_release_list(struct work_struct *work)
1765{
1766 struct sk_buff *skb;
1767 struct adapter *adap;
1768
1769 adap = container_of(work, struct adapter, tid_release_task);
1770
1771 spin_lock_bh(&adap->tid_release_lock);
1772 while (adap->tid_release_head) {
1773 void **p = adap->tid_release_head;
1774 unsigned int chan = (uintptr_t)p & 3;
1775 p = (void *)p - chan;
1776
1777 adap->tid_release_head = *p;
1778 *p = NULL;
1779 spin_unlock_bh(&adap->tid_release_lock);
1780
1781 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1782 GFP_KERNEL)))
1783 schedule_timeout_uninterruptible(1);
1784
1785 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1786 t4_ofld_send(adap, skb);
1787 spin_lock_bh(&adap->tid_release_lock);
1788 }
1789 adap->tid_release_task_busy = false;
1790 spin_unlock_bh(&adap->tid_release_lock);
1791}
1792
1793/*
1794 * Release a TID and inform HW. If we are unable to allocate the release
1795 * message we defer to a work queue.
1796 */
1dec4cec
GG
1797void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1798 unsigned short family)
b8ff05a9 1799{
b8ff05a9 1800 struct adapter *adap = container_of(t, struct adapter, tids);
59437d78 1801 struct sk_buff *skb;
b8ff05a9 1802
59437d78 1803 WARN_ON(tid_out_of_range(&adap->tids, tid));
9a1bb9f6 1804
59437d78
SAH
1805 if (t->tid_tab[tid - adap->tids.tid_base]) {
1806 t->tid_tab[tid - adap->tids.tid_base] = NULL;
1dec4cec
GG
1807 atomic_dec(&t->conns_in_use);
1808 if (t->hash_base && (tid >= t->hash_base)) {
1809 if (family == AF_INET6)
1810 atomic_sub(2, &t->hash_tids_in_use);
1811 else
1812 atomic_dec(&t->hash_tids_in_use);
1813 } else {
1814 if (family == AF_INET6)
1815 atomic_sub(2, &t->tids_in_use);
1816 else
1817 atomic_dec(&t->tids_in_use);
1818 }
9a1bb9f6
HS
1819 }
1820
b8ff05a9
DM
1821 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1822 if (likely(skb)) {
b8ff05a9
DM
1823 mk_tid_release(skb, chan, tid);
1824 t4_ofld_send(adap, skb);
1825 } else
1826 cxgb4_queue_tid_release(t, chan, tid);
b8ff05a9
DM
1827}
1828EXPORT_SYMBOL(cxgb4_remove_tid);
1829
1830/*
1831 * Allocate and initialize the TID tables. Returns 0 on success.
1832 */
1833static int tid_init(struct tid_info *t)
1834{
b6f8eaec 1835 struct adapter *adap = container_of(t, struct adapter, tids);
578b46b9
RL
1836 unsigned int max_ftids = t->nftids + t->nsftids;
1837 unsigned int natids = t->natids;
c2193999 1838 unsigned int hpftid_bmap_size;
ab0367ea 1839 unsigned int eotid_bmap_size;
578b46b9
RL
1840 unsigned int stid_bmap_size;
1841 unsigned int ftid_bmap_size;
1842 size_t size;
b8ff05a9 1843
dca4faeb 1844 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
578b46b9 1845 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
c2193999 1846 hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
ab0367ea 1847 eotid_bmap_size = BITS_TO_LONGS(t->neotids);
f2b7e78d
VP
1848 size = t->ntids * sizeof(*t->tid_tab) +
1849 natids * sizeof(*t->atid_tab) +
b8ff05a9 1850 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 1851 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 1852 stid_bmap_size * sizeof(long) +
c2193999
SAH
1853 t->nhpftids * sizeof(*t->hpftid_tab) +
1854 hpftid_bmap_size * sizeof(long) +
578b46b9 1855 max_ftids * sizeof(*t->ftid_tab) +
ab0367ea
RL
1856 ftid_bmap_size * sizeof(long) +
1857 t->neotids * sizeof(*t->eotid_tab) +
1858 eotid_bmap_size * sizeof(long);
f2b7e78d 1859
752ade68 1860 t->tid_tab = kvzalloc(size, GFP_KERNEL);
b8ff05a9
DM
1861 if (!t->tid_tab)
1862 return -ENOMEM;
1863
1864 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1865 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 1866 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
c2193999
SAH
1867 t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1868 t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
1869 t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
578b46b9 1870 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
ab0367ea
RL
1871 t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
1872 t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
b8ff05a9
DM
1873 spin_lock_init(&t->stid_lock);
1874 spin_lock_init(&t->atid_lock);
578b46b9 1875 spin_lock_init(&t->ftid_lock);
b8ff05a9
DM
1876
1877 t->stids_in_use = 0;
1dec4cec 1878 t->v6_stids_in_use = 0;
2248b293 1879 t->sftids_in_use = 0;
b8ff05a9
DM
1880 t->afree = NULL;
1881 t->atids_in_use = 0;
1882 atomic_set(&t->tids_in_use, 0);
1dec4cec 1883 atomic_set(&t->conns_in_use, 0);
9a1bb9f6 1884 atomic_set(&t->hash_tids_in_use, 0);
5148e595 1885 atomic_set(&t->eotids_in_use, 0);
b8ff05a9
DM
1886
1887 /* Setup the free list for atid_tab and clear the stid bitmap. */
1888 if (natids) {
1889 while (--natids)
1890 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1891 t->afree = t->atid_tab;
1892 }
b6f8eaec 1893
578b46b9
RL
1894 if (is_offload(adap)) {
1895 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1896 /* Reserve stid 0 for T4/T5 adapters */
1897 if (!t->stid_base &&
1898 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1899 __set_bit(0, t->stid_bmap);
ab0367ea
RL
1900
1901 if (t->neotids)
1902 bitmap_zero(t->eotid_bmap, t->neotids);
578b46b9
RL
1903 }
1904
c2193999
SAH
1905 if (t->nhpftids)
1906 bitmap_zero(t->hpftid_bmap, t->nhpftids);
578b46b9 1907 bitmap_zero(t->ftid_bmap, t->nftids);
b8ff05a9
DM
1908 return 0;
1909}
1910
1911/**
1912 * cxgb4_create_server - create an IP server
1913 * @dev: the device
1914 * @stid: the server TID
1915 * @sip: local IP address to bind server to
1916 * @sport: the server's TCP port
29bbf5d7 1917 * @vlan: the VLAN header information
b8ff05a9
DM
1918 * @queue: queue to direct messages from this server to
1919 *
1920 * Create an IP server for the given port and address.
1921 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1922 */
1923int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
793dad94
VP
1924 __be32 sip, __be16 sport, __be16 vlan,
1925 unsigned int queue)
b8ff05a9
DM
1926{
1927 unsigned int chan;
1928 struct sk_buff *skb;
1929 struct adapter *adap;
1930 struct cpl_pass_open_req *req;
80f40c1f 1931 int ret;
b8ff05a9
DM
1932
1933 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1934 if (!skb)
1935 return -ENOMEM;
1936
1937 adap = netdev2adap(dev);
4df864c1 1938 req = __skb_put(skb, sizeof(*req));
b8ff05a9
DM
1939 INIT_TP_WR(req, 0);
1940 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1941 req->local_port = sport;
1942 req->peer_port = htons(0);
1943 req->local_ip = sip;
1944 req->peer_ip = htonl(0);
e46dab4d 1945 chan = rxq_to_chan(&adap->sge, queue);
d7990b0c 1946 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
6c53e938
HS
1947 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1948 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
80f40c1f
VP
1949 ret = t4_mgmt_tx(adap, skb);
1950 return net_xmit_eval(ret);
b8ff05a9
DM
1951}
1952EXPORT_SYMBOL(cxgb4_create_server);
1953
80f40c1f
VP
1954/* cxgb4_create_server6 - create an IPv6 server
1955 * @dev: the device
1956 * @stid: the server TID
1957 * @sip: local IPv6 address to bind server to
1958 * @sport: the server's TCP port
1959 * @queue: queue to direct messages from this server to
1960 *
1961 * Create an IPv6 server for the given port and address.
1962 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1963 */
1964int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1965 const struct in6_addr *sip, __be16 sport,
1966 unsigned int queue)
1967{
1968 unsigned int chan;
1969 struct sk_buff *skb;
1970 struct adapter *adap;
1971 struct cpl_pass_open_req6 *req;
1972 int ret;
1973
1974 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1975 if (!skb)
1976 return -ENOMEM;
1977
1978 adap = netdev2adap(dev);
4df864c1 1979 req = __skb_put(skb, sizeof(*req));
80f40c1f
VP
1980 INIT_TP_WR(req, 0);
1981 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1982 req->local_port = sport;
1983 req->peer_port = htons(0);
1984 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1985 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1986 req->peer_ip_hi = cpu_to_be64(0);
1987 req->peer_ip_lo = cpu_to_be64(0);
1988 chan = rxq_to_chan(&adap->sge, queue);
d7990b0c 1989 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
6c53e938
HS
1990 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1991 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
80f40c1f
VP
1992 ret = t4_mgmt_tx(adap, skb);
1993 return net_xmit_eval(ret);
1994}
1995EXPORT_SYMBOL(cxgb4_create_server6);
1996
1997int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1998 unsigned int queue, bool ipv6)
1999{
2000 struct sk_buff *skb;
2001 struct adapter *adap;
2002 struct cpl_close_listsvr_req *req;
2003 int ret;
2004
2005 adap = netdev2adap(dev);
2006
2007 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2008 if (!skb)
2009 return -ENOMEM;
2010
4df864c1 2011 req = __skb_put(skb, sizeof(*req));
80f40c1f
VP
2012 INIT_TP_WR(req, 0);
2013 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
bdc590b9
HS
2014 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
2015 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
80f40c1f
VP
2016 ret = t4_mgmt_tx(adap, skb);
2017 return net_xmit_eval(ret);
2018}
2019EXPORT_SYMBOL(cxgb4_remove_server);
2020
b8ff05a9
DM
2021/**
2022 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2023 * @mtus: the HW MTU table
2024 * @mtu: the target MTU
2025 * @idx: index of selected entry in the MTU table
2026 *
2027 * Returns the index and the value in the HW MTU table that is closest to
2028 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2029 * table, in which case that smallest available value is selected.
2030 */
2031unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2032 unsigned int *idx)
2033{
2034 unsigned int i = 0;
2035
2036 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2037 ++i;
2038 if (idx)
2039 *idx = i;
2040 return mtus[i];
2041}
2042EXPORT_SYMBOL(cxgb4_best_mtu);
2043
92e7ae71
HS
2044/**
2045 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
2046 * @mtus: the HW MTU table
2047 * @header_size: Header Size
2048 * @data_size_max: maximum Data Segment Size
2049 * @data_size_align: desired Data Segment Size Alignment (2^N)
2050 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
2051 *
2052 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
2053 * MTU Table based solely on a Maximum MTU parameter, we break that
2054 * parameter up into a Header Size and Maximum Data Segment Size, and
2055 * provide a desired Data Segment Size Alignment. If we find an MTU in
2056 * the Hardware MTU Table which will result in a Data Segment Size with
2057 * the requested alignment _and_ that MTU isn't "too far" from the
2058 * closest MTU, then we'll return that rather than the closest MTU.
2059 */
2060unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
2061 unsigned short header_size,
2062 unsigned short data_size_max,
2063 unsigned short data_size_align,
2064 unsigned int *mtu_idxp)
2065{
2066 unsigned short max_mtu = header_size + data_size_max;
2067 unsigned short data_size_align_mask = data_size_align - 1;
2068 int mtu_idx, aligned_mtu_idx;
2069
2070 /* Scan the MTU Table till we find an MTU which is larger than our
2071 * Maximum MTU or we reach the end of the table. Along the way,
2072 * record the last MTU found, if any, which will result in a Data
2073 * Segment Length matching the requested alignment.
2074 */
2075 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
2076 unsigned short data_size = mtus[mtu_idx] - header_size;
2077
2078 /* If this MTU minus the Header Size would result in a
2079 * Data Segment Size of the desired alignment, remember it.
2080 */
2081 if ((data_size & data_size_align_mask) == 0)
2082 aligned_mtu_idx = mtu_idx;
2083
2084 /* If we're not at the end of the Hardware MTU Table and the
2085 * next element is larger than our Maximum MTU, drop out of
2086 * the loop.
2087 */
2088 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
2089 break;
2090 }
2091
2092 /* If we fell out of the loop because we ran to the end of the table,
2093 * then we just have to use the last [largest] entry.
2094 */
2095 if (mtu_idx == NMTUS)
2096 mtu_idx--;
2097
2098 /* If we found an MTU which resulted in the requested Data Segment
2099 * Length alignment and that's "not far" from the largest MTU which is
2100 * less than or equal to the maximum MTU, then use that.
2101 */
2102 if (aligned_mtu_idx >= 0 &&
2103 mtu_idx - aligned_mtu_idx <= 1)
2104 mtu_idx = aligned_mtu_idx;
2105
2106 /* If the caller has passed in an MTU Index pointer, pass the
2107 * MTU Index back. Return the MTU value.
2108 */
2109 if (mtu_idxp)
2110 *mtu_idxp = mtu_idx;
2111 return mtus[mtu_idx];
2112}
2113EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
2114
b8ff05a9
DM
2115/**
2116 * cxgb4_port_chan - get the HW channel of a port
2117 * @dev: the net device for the port
2118 *
2119 * Return the HW Tx channel of the given port.
2120 */
2121unsigned int cxgb4_port_chan(const struct net_device *dev)
2122{
2123 return netdev2pinfo(dev)->tx_chan;
2124}
2125EXPORT_SYMBOL(cxgb4_port_chan);
2126
74dd5aa1
VK
2127/**
2128 * cxgb4_port_e2cchan - get the HW c-channel of a port
2129 * @dev: the net device for the port
2130 *
2131 * Return the HW RX c-channel of the given port.
2132 */
2133unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
2134{
2135 return netdev2pinfo(dev)->rx_cchan;
2136}
2137EXPORT_SYMBOL(cxgb4_port_e2cchan);
2138
881806bc
VP
2139unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2140{
2141 struct adapter *adap = netdev2adap(dev);
2cc301d2 2142 u32 v1, v2, lp_count, hp_count;
881806bc 2143
f061de42
HS
2144 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2145 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
d14807dd 2146 if (is_t4(adap->params.chip)) {
f061de42
HS
2147 lp_count = LP_COUNT_G(v1);
2148 hp_count = HP_COUNT_G(v1);
2cc301d2 2149 } else {
f061de42
HS
2150 lp_count = LP_COUNT_T5_G(v1);
2151 hp_count = HP_COUNT_T5_G(v2);
2cc301d2
SR
2152 }
2153 return lpfifo ? lp_count : hp_count;
881806bc
VP
2154}
2155EXPORT_SYMBOL(cxgb4_dbfifo_count);
2156
b8ff05a9
DM
2157/**
2158 * cxgb4_port_viid - get the VI id of a port
2159 * @dev: the net device for the port
2160 *
2161 * Return the VI id of the given port.
2162 */
2163unsigned int cxgb4_port_viid(const struct net_device *dev)
2164{
2165 return netdev2pinfo(dev)->viid;
2166}
2167EXPORT_SYMBOL(cxgb4_port_viid);
2168
2169/**
2170 * cxgb4_port_idx - get the index of a port
2171 * @dev: the net device for the port
2172 *
2173 * Return the index of the given port.
2174 */
2175unsigned int cxgb4_port_idx(const struct net_device *dev)
2176{
2177 return netdev2pinfo(dev)->port_id;
2178}
2179EXPORT_SYMBOL(cxgb4_port_idx);
2180
b8ff05a9
DM
2181void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2182 struct tp_tcp_stats *v6)
2183{
2184 struct adapter *adap = pci_get_drvdata(pdev);
2185
2186 spin_lock(&adap->stats_lock);
5ccf9d04 2187 t4_tp_get_tcp_stats(adap, v4, v6, false);
b8ff05a9
DM
2188 spin_unlock(&adap->stats_lock);
2189}
2190EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2191
2192void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2193 const unsigned int *pgsz_order)
2194{
2195 struct adapter *adap = netdev2adap(dev);
2196
0d804338
HS
2197 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2198 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2199 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2200 HPZ3_V(pgsz_order[3]));
b8ff05a9
DM
2201}
2202EXPORT_SYMBOL(cxgb4_iscsi_init);
2203
3069ee9b
VP
2204int cxgb4_flush_eq_cache(struct net_device *dev)
2205{
2206 struct adapter *adap = netdev2adap(dev);
3069ee9b 2207
736c3b94 2208 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
3069ee9b
VP
2209}
2210EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2211
2212static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2213{
f061de42 2214 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
3069ee9b
VP
2215 __be64 indices;
2216 int ret;
2217
fc5ab020
HS
2218 spin_lock(&adap->win0_lock);
2219 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2220 sizeof(indices), (__be32 *)&indices,
2221 T4_MEMORY_READ);
2222 spin_unlock(&adap->win0_lock);
3069ee9b 2223 if (!ret) {
404d9e3f
VP
2224 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2225 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
2226 }
2227 return ret;
2228}
2229
2230int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2231 u16 size)
2232{
2233 struct adapter *adap = netdev2adap(dev);
2234 u16 hw_pidx, hw_cidx;
2235 int ret;
2236
2237 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2238 if (ret)
2239 goto out;
2240
2241 if (pidx != hw_pidx) {
2242 u16 delta;
f612b815 2243 u32 val;
3069ee9b
VP
2244
2245 if (pidx >= hw_pidx)
2246 delta = pidx - hw_pidx;
2247 else
2248 delta = size - hw_pidx + pidx;
f612b815
HS
2249
2250 if (is_t4(adap->params.chip))
2251 val = PIDX_V(delta);
2252 else
2253 val = PIDX_T5_V(delta);
3069ee9b 2254 wmb();
f612b815
HS
2255 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2256 QID_V(qid) | val);
3069ee9b
VP
2257 }
2258out:
2259 return ret;
2260}
2261EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2262
031cf476
HS
2263int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2264{
6559a7e8 2265 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
031cf476 2266 u32 edc0_end, edc1_end, mc0_end, mc1_end;
8b4e6b3c
AV
2267 u32 offset, memtype, memaddr;
2268 struct adapter *adap;
2269 u32 hma_size = 0;
031cf476
HS
2270 int ret;
2271
2272 adap = netdev2adap(dev);
2273
2274 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2275
2276 /* Figure out where the offset lands in the Memory Type/Address scheme.
2277 * This code assumes that the memory is laid out starting at offset 0
2278 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2279 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
2280 * MC0, and some have both MC0 and MC1.
2281 */
6559a7e8
HS
2282 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2283 edc0_size = EDRAM0_SIZE_G(size) << 20;
2284 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2285 edc1_size = EDRAM1_SIZE_G(size) << 20;
2286 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2287 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
031cf476 2288
8b4e6b3c
AV
2289 if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
2290 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2291 hma_size = EXT_MEM1_SIZE_G(size) << 20;
2292 }
031cf476
HS
2293 edc0_end = edc0_size;
2294 edc1_end = edc0_end + edc1_size;
2295 mc0_end = edc1_end + mc0_size;
2296
2297 if (offset < edc0_end) {
2298 memtype = MEM_EDC0;
2299 memaddr = offset;
2300 } else if (offset < edc1_end) {
2301 memtype = MEM_EDC1;
2302 memaddr = offset - edc0_end;
2303 } else {
8b4e6b3c
AV
2304 if (hma_size && (offset < (edc1_end + hma_size))) {
2305 memtype = MEM_HMA;
2306 memaddr = offset - edc1_end;
2307 } else if (offset < mc0_end) {
031cf476
HS
2308 memtype = MEM_MC0;
2309 memaddr = offset - edc1_end;
3ccc6cf7 2310 } else if (is_t5(adap->params.chip)) {
6559a7e8
HS
2311 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2312 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
031cf476
HS
2313 mc1_end = mc0_end + mc1_size;
2314 if (offset < mc1_end) {
2315 memtype = MEM_MC1;
2316 memaddr = offset - mc0_end;
2317 } else {
2318 /* offset beyond the end of any memory */
2319 goto err;
2320 }
3ccc6cf7
HS
2321 } else {
2322 /* T4/T6 only has a single memory channel */
2323 goto err;
031cf476
HS
2324 }
2325 }
2326
2327 spin_lock(&adap->win0_lock);
2328 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2329 spin_unlock(&adap->win0_lock);
2330 return ret;
2331
2332err:
2333 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2334 stag, offset);
2335 return -EINVAL;
2336}
2337EXPORT_SYMBOL(cxgb4_read_tpte);
2338
7730b4c7
HS
2339u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2340{
2341 u32 hi, lo;
2342 struct adapter *adap;
2343
2344 adap = netdev2adap(dev);
f612b815
HS
2345 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2346 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
7730b4c7
HS
2347
2348 return ((u64)hi << 32) | (u64)lo;
2349}
2350EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2351
df64e4d3
HS
2352int cxgb4_bar2_sge_qregs(struct net_device *dev,
2353 unsigned int qid,
2354 enum cxgb4_bar2_qtype qtype,
66cf188e 2355 int user,
df64e4d3
HS
2356 u64 *pbar2_qoffset,
2357 unsigned int *pbar2_qid)
2358{
b2612722 2359 return t4_bar2_sge_qregs(netdev2adap(dev),
df64e4d3
HS
2360 qid,
2361 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2362 ? T4_BAR2_QTYPE_EGRESS
2363 : T4_BAR2_QTYPE_INGRESS),
66cf188e 2364 user,
df64e4d3
HS
2365 pbar2_qoffset,
2366 pbar2_qid);
2367}
2368EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2369
b8ff05a9
DM
2370static struct pci_driver cxgb4_driver;
2371
2372static void check_neigh_update(struct neighbour *neigh)
2373{
2374 const struct device *parent;
2375 const struct net_device *netdev = neigh->dev;
2376
d0d7b10b 2377 if (is_vlan_dev(netdev))
b8ff05a9
DM
2378 netdev = vlan_dev_real_dev(netdev);
2379 parent = netdev->dev.parent;
2380 if (parent && parent->driver == &cxgb4_driver.driver)
2381 t4_l2t_update(dev_get_drvdata(parent), neigh);
2382}
2383
2384static int netevent_cb(struct notifier_block *nb, unsigned long event,
2385 void *data)
2386{
2387 switch (event) {
2388 case NETEVENT_NEIGH_UPDATE:
2389 check_neigh_update(data);
2390 break;
b8ff05a9
DM
2391 case NETEVENT_REDIRECT:
2392 default:
2393 break;
2394 }
2395 return 0;
2396}
2397
2398static bool netevent_registered;
2399static struct notifier_block cxgb4_netevent_nb = {
2400 .notifier_call = netevent_cb
2401};
2402
3069ee9b
VP
2403static void drain_db_fifo(struct adapter *adap, int usecs)
2404{
2cc301d2 2405 u32 v1, v2, lp_count, hp_count;
3069ee9b
VP
2406
2407 do {
f061de42
HS
2408 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2409 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
d14807dd 2410 if (is_t4(adap->params.chip)) {
f061de42
HS
2411 lp_count = LP_COUNT_G(v1);
2412 hp_count = HP_COUNT_G(v1);
2cc301d2 2413 } else {
f061de42
HS
2414 lp_count = LP_COUNT_T5_G(v1);
2415 hp_count = HP_COUNT_T5_G(v2);
2cc301d2
SR
2416 }
2417
2418 if (lp_count == 0 && hp_count == 0)
2419 break;
3069ee9b
VP
2420 set_current_state(TASK_UNINTERRUPTIBLE);
2421 schedule_timeout(usecs_to_jiffies(usecs));
3069ee9b
VP
2422 } while (1);
2423}
2424
2425static void disable_txq_db(struct sge_txq *q)
2426{
05eb2389
SW
2427 unsigned long flags;
2428
2429 spin_lock_irqsave(&q->db_lock, flags);
3069ee9b 2430 q->db_disabled = 1;
05eb2389 2431 spin_unlock_irqrestore(&q->db_lock, flags);
3069ee9b
VP
2432}
2433
05eb2389 2434static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3069ee9b
VP
2435{
2436 spin_lock_irq(&q->db_lock);
05eb2389
SW
2437 if (q->db_pidx_inc) {
2438 /* Make sure that all writes to the TX descriptors
2439 * are committed before we tell HW about them.
2440 */
2441 wmb();
f612b815
HS
2442 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2443 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
05eb2389
SW
2444 q->db_pidx_inc = 0;
2445 }
3069ee9b
VP
2446 q->db_disabled = 0;
2447 spin_unlock_irq(&q->db_lock);
2448}
2449
2450static void disable_dbs(struct adapter *adap)
2451{
2452 int i;
2453
2454 for_each_ethrxq(&adap->sge, i)
2455 disable_txq_db(&adap->sge.ethtxq[i].q);
ab677ff4
HS
2456 if (is_offload(adap)) {
2457 struct sge_uld_txq_info *txq_info =
2458 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2459
2460 if (txq_info) {
2461 for_each_ofldtxq(&adap->sge, i) {
2462 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2463
2464 disable_txq_db(&txq->q);
2465 }
2466 }
2467 }
3069ee9b
VP
2468 for_each_port(adap, i)
2469 disable_txq_db(&adap->sge.ctrlq[i].q);
2470}
2471
2472static void enable_dbs(struct adapter *adap)
2473{
2474 int i;
2475
2476 for_each_ethrxq(&adap->sge, i)
05eb2389 2477 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
ab677ff4
HS
2478 if (is_offload(adap)) {
2479 struct sge_uld_txq_info *txq_info =
2480 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2481
2482 if (txq_info) {
2483 for_each_ofldtxq(&adap->sge, i) {
2484 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2485
2486 enable_txq_db(adap, &txq->q);
2487 }
2488 }
2489 }
3069ee9b 2490 for_each_port(adap, i)
05eb2389
SW
2491 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2492}
2493
2494static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2495{
0fbc81b3
HS
2496 enum cxgb4_uld type = CXGB4_ULD_RDMA;
2497
2498 if (adap->uld && adap->uld[type].handle)
2499 adap->uld[type].control(adap->uld[type].handle, cmd);
05eb2389
SW
2500}
2501
2502static void process_db_full(struct work_struct *work)
2503{
2504 struct adapter *adap;
2505
2506 adap = container_of(work, struct adapter, db_full_task);
2507
2508 drain_db_fifo(adap, dbfifo_drain_delay);
2509 enable_dbs(adap);
2510 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3ccc6cf7
HS
2511 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2512 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2513 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2514 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2515 else
2516 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2517 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
3069ee9b
VP
2518}
2519
2520static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2521{
2522 u16 hw_pidx, hw_cidx;
2523 int ret;
2524
05eb2389 2525 spin_lock_irq(&q->db_lock);
3069ee9b
VP
2526 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2527 if (ret)
2528 goto out;
2529 if (q->db_pidx != hw_pidx) {
2530 u16 delta;
f612b815 2531 u32 val;
3069ee9b
VP
2532
2533 if (q->db_pidx >= hw_pidx)
2534 delta = q->db_pidx - hw_pidx;
2535 else
2536 delta = q->size - hw_pidx + q->db_pidx;
f612b815
HS
2537
2538 if (is_t4(adap->params.chip))
2539 val = PIDX_V(delta);
2540 else
2541 val = PIDX_T5_V(delta);
3069ee9b 2542 wmb();
f612b815
HS
2543 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2544 QID_V(q->cntxt_id) | val);
3069ee9b
VP
2545 }
2546out:
2547 q->db_disabled = 0;
05eb2389
SW
2548 q->db_pidx_inc = 0;
2549 spin_unlock_irq(&q->db_lock);
3069ee9b
VP
2550 if (ret)
2551 CH_WARN(adap, "DB drop recovery failed.\n");
2552}
0fbc81b3 2553
3069ee9b
VP
2554static void recover_all_queues(struct adapter *adap)
2555{
2556 int i;
2557
2558 for_each_ethrxq(&adap->sge, i)
2559 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
ab677ff4
HS
2560 if (is_offload(adap)) {
2561 struct sge_uld_txq_info *txq_info =
2562 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2563 if (txq_info) {
2564 for_each_ofldtxq(&adap->sge, i) {
2565 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2566
2567 sync_txq_pidx(adap, &txq->q);
2568 }
2569 }
2570 }
3069ee9b
VP
2571 for_each_port(adap, i)
2572 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2573}
2574
881806bc
VP
2575static void process_db_drop(struct work_struct *work)
2576{
2577 struct adapter *adap;
881806bc 2578
3069ee9b 2579 adap = container_of(work, struct adapter, db_drop_task);
881806bc 2580
d14807dd 2581 if (is_t4(adap->params.chip)) {
05eb2389 2582 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2583 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
05eb2389 2584 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2585 recover_all_queues(adap);
05eb2389 2586 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2587 enable_dbs(adap);
05eb2389 2588 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3ccc6cf7 2589 } else if (is_t5(adap->params.chip)) {
2cc301d2
SR
2590 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2591 u16 qid = (dropped_db >> 15) & 0x1ffff;
2592 u16 pidx_inc = dropped_db & 0x1fff;
df64e4d3
HS
2593 u64 bar2_qoffset;
2594 unsigned int bar2_qid;
2595 int ret;
2cc301d2 2596
b2612722 2597 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
e0456717 2598 0, &bar2_qoffset, &bar2_qid);
df64e4d3
HS
2599 if (ret)
2600 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2601 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2602 else
f612b815 2603 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
df64e4d3 2604 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2cc301d2
SR
2605
2606 /* Re-enable BAR2 WC */
2607 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2608 }
2609
3ccc6cf7
HS
2610 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2611 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
881806bc
VP
2612}
2613
2614void t4_db_full(struct adapter *adap)
2615{
d14807dd 2616 if (is_t4(adap->params.chip)) {
05eb2389
SW
2617 disable_dbs(adap);
2618 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
f612b815
HS
2619 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2620 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
29aaee65 2621 queue_work(adap->workq, &adap->db_full_task);
2cc301d2 2622 }
881806bc
VP
2623}
2624
2625void t4_db_dropped(struct adapter *adap)
2626{
05eb2389
SW
2627 if (is_t4(adap->params.chip)) {
2628 disable_dbs(adap);
2629 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2630 }
29aaee65 2631 queue_work(adap->workq, &adap->db_drop_task);
881806bc
VP
2632}
2633
0fbc81b3
HS
2634void t4_register_netevent_notifier(void)
2635{
b8ff05a9
DM
2636 if (!netevent_registered) {
2637 register_netevent_notifier(&cxgb4_netevent_nb);
2638 netevent_registered = true;
2639 }
b8ff05a9
DM
2640}
2641
2642static void detach_ulds(struct adapter *adap)
2643{
2644 unsigned int i;
2645
015fe6fd
SAH
2646 if (!is_uld(adap))
2647 return;
2648
b8ff05a9
DM
2649 mutex_lock(&uld_mutex);
2650 list_del(&adap->list_node);
6a146f3a 2651
b8ff05a9 2652 for (i = 0; i < CXGB4_ULD_MAX; i++)
6a146f3a 2653 if (adap->uld && adap->uld[i].handle)
94cdb8bb
HS
2654 adap->uld[i].state_change(adap->uld[i].handle,
2655 CXGB4_STATE_DETACH);
6a146f3a 2656
b8ff05a9
DM
2657 if (netevent_registered && list_empty(&adapter_list)) {
2658 unregister_netevent_notifier(&cxgb4_netevent_nb);
2659 netevent_registered = false;
2660 }
2661 mutex_unlock(&uld_mutex);
2662}
2663
2664static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2665{
2666 unsigned int i;
2667
2668 mutex_lock(&uld_mutex);
2669 for (i = 0; i < CXGB4_ULD_MAX; i++)
94cdb8bb
HS
2670 if (adap->uld && adap->uld[i].handle)
2671 adap->uld[i].state_change(adap->uld[i].handle,
2672 new_state);
b8ff05a9
DM
2673 mutex_unlock(&uld_mutex);
2674}
2675
1bb60376 2676#if IS_ENABLED(CONFIG_IPV6)
b5a02f50
AB
2677static int cxgb4_inet6addr_handler(struct notifier_block *this,
2678 unsigned long event, void *data)
01bcca68 2679{
b5a02f50
AB
2680 struct inet6_ifaddr *ifa = data;
2681 struct net_device *event_dev = ifa->idev->dev;
2682 const struct device *parent = NULL;
2683#if IS_ENABLED(CONFIG_BONDING)
01bcca68 2684 struct adapter *adap;
b5a02f50 2685#endif
d0d7b10b 2686 if (is_vlan_dev(event_dev))
b5a02f50
AB
2687 event_dev = vlan_dev_real_dev(event_dev);
2688#if IS_ENABLED(CONFIG_BONDING)
2689 if (event_dev->flags & IFF_MASTER) {
2690 list_for_each_entry(adap, &adapter_list, list_node) {
2691 switch (event) {
2692 case NETDEV_UP:
2693 cxgb4_clip_get(adap->port[0],
2694 (const u32 *)ifa, 1);
2695 break;
2696 case NETDEV_DOWN:
2697 cxgb4_clip_release(adap->port[0],
2698 (const u32 *)ifa, 1);
2699 break;
2700 default:
2701 break;
2702 }
2703 }
2704 return NOTIFY_OK;
2705 }
2706#endif
01bcca68 2707
b5a02f50
AB
2708 if (event_dev)
2709 parent = event_dev->dev.parent;
01bcca68 2710
b5a02f50 2711 if (parent && parent->driver == &cxgb4_driver.driver) {
01bcca68
VP
2712 switch (event) {
2713 case NETDEV_UP:
b5a02f50 2714 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
01bcca68
VP
2715 break;
2716 case NETDEV_DOWN:
b5a02f50 2717 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
01bcca68
VP
2718 break;
2719 default:
2720 break;
2721 }
2722 }
b5a02f50 2723 return NOTIFY_OK;
01bcca68
VP
2724}
2725
b5a02f50 2726static bool inet6addr_registered;
01bcca68
VP
2727static struct notifier_block cxgb4_inet6addr_notifier = {
2728 .notifier_call = cxgb4_inet6addr_handler
2729};
2730
01bcca68
VP
2731static void update_clip(const struct adapter *adap)
2732{
2733 int i;
2734 struct net_device *dev;
2735 int ret;
2736
2737 rcu_read_lock();
2738
2739 for (i = 0; i < MAX_NPORTS; i++) {
2740 dev = adap->port[i];
2741 ret = 0;
2742
2743 if (dev)
b5a02f50 2744 ret = cxgb4_update_root_dev_clip(dev);
01bcca68
VP
2745
2746 if (ret < 0)
2747 break;
2748 }
2749 rcu_read_unlock();
2750}
1bb60376 2751#endif /* IS_ENABLED(CONFIG_IPV6) */
01bcca68 2752
b8ff05a9
DM
2753/**
2754 * cxgb_up - enable the adapter
2755 * @adap: adapter being enabled
2756 *
2757 * Called when the first port is enabled, this function performs the
2758 * actions necessary to make an adapter operational, such as completing
2759 * the initialization of HW modules, and enabling interrupts.
2760 *
2761 * Must be called with the rtnl lock held.
2762 */
2763static int cxgb_up(struct adapter *adap)
2764{
76c3a552 2765 struct sge *s = &adap->sge;
aaefae9b 2766 int err;
b8ff05a9 2767
91060381 2768 mutex_lock(&uld_mutex);
aaefae9b
DM
2769 err = setup_sge_queues(adap);
2770 if (err)
91060381 2771 goto rel_lock;
aaefae9b
DM
2772 err = setup_rss(adap);
2773 if (err)
2774 goto freeq;
b8ff05a9 2775
80f61f19 2776 if (adap->flags & CXGB4_USING_MSIX) {
76c3a552
RL
2777 if (s->nd_msix_idx < 0) {
2778 err = -ENOMEM;
2779 goto irq_err;
2780 }
2781
2782 err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
2783 t4_nondata_intr, 0,
2784 adap->msix_info[s->nd_msix_idx].desc, adap);
b8ff05a9
DM
2785 if (err)
2786 goto irq_err;
76c3a552 2787
b8ff05a9 2788 err = request_msix_queue_irqs(adap);
76c3a552
RL
2789 if (err)
2790 goto irq_err_free_nd_msix;
b8ff05a9
DM
2791 } else {
2792 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
80f61f19
AV
2793 (adap->flags & CXGB4_USING_MSI) ? 0
2794 : IRQF_SHARED,
b1a3c2b6 2795 adap->port[0]->name, adap);
b8ff05a9
DM
2796 if (err)
2797 goto irq_err;
2798 }
e7519f99 2799
b8ff05a9
DM
2800 enable_rx(adap);
2801 t4_sge_start(adap);
2802 t4_intr_enable(adap);
80f61f19 2803 adap->flags |= CXGB4_FULL_INIT_DONE;
e7519f99
GG
2804 mutex_unlock(&uld_mutex);
2805
b8ff05a9 2806 notify_ulds(adap, CXGB4_STATE_UP);
1bb60376 2807#if IS_ENABLED(CONFIG_IPV6)
01bcca68 2808 update_clip(adap);
1bb60376 2809#endif
b8ff05a9 2810 return err;
91060381 2811
76c3a552
RL
2812irq_err_free_nd_msix:
2813 free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
2814irq_err:
b8ff05a9 2815 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
76c3a552 2816freeq:
aaefae9b 2817 t4_free_sge_resources(adap);
76c3a552 2818rel_lock:
91060381
RR
2819 mutex_unlock(&uld_mutex);
2820 return err;
b8ff05a9
DM
2821}
2822
2823static void cxgb_down(struct adapter *adapter)
2824{
b8ff05a9 2825 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
2826 cancel_work_sync(&adapter->db_full_task);
2827 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 2828 adapter->tid_release_task_busy = false;
204dc3c0 2829 adapter->tid_release_head = NULL;
b8ff05a9 2830
aaefae9b
DM
2831 t4_sge_stop(adapter);
2832 t4_free_sge_resources(adapter);
2a8d84bf 2833
80f61f19 2834 adapter->flags &= ~CXGB4_FULL_INIT_DONE;
b8ff05a9
DM
2835}
2836
2837/*
2838 * net_device operations
2839 */
3822d067 2840static int cxgb_open(struct net_device *dev)
b8ff05a9 2841{
b8ff05a9
DM
2842 struct port_info *pi = netdev_priv(dev);
2843 struct adapter *adapter = pi->adapter;
b1396c2b 2844 int err;
b8ff05a9 2845
6a3c869a
DM
2846 netif_carrier_off(dev);
2847
80f61f19 2848 if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
aaefae9b
DM
2849 err = cxgb_up(adapter);
2850 if (err < 0)
2851 return err;
2852 }
b8ff05a9 2853
2061ec3f
GG
2854 /* It's possible that the basic port information could have
2855 * changed since we first read it.
2856 */
2857 err = t4_update_port_info(pi);
2858 if (err < 0)
2859 return err;
2860
f68707b8 2861 err = link_start(dev);
2b465ed0
RL
2862 if (err)
2863 return err;
2864
2865 if (pi->nmirrorqsets) {
2866 mutex_lock(&pi->vi_mirror_mutex);
2867 err = cxgb4_port_mirror_alloc_queues(dev);
2868 if (err)
2869 goto out_unlock;
696c278f
RL
2870
2871 err = cxgb4_port_mirror_start(dev);
2872 if (err)
2873 goto out_free_queues;
2b465ed0
RL
2874 mutex_unlock(&pi->vi_mirror_mutex);
2875 }
2876
2877 netif_tx_start_all_queues(dev);
2878 return 0;
2879
696c278f
RL
2880out_free_queues:
2881 cxgb4_port_mirror_free_queues(dev);
2882
2b465ed0
RL
2883out_unlock:
2884 mutex_unlock(&pi->vi_mirror_mutex);
f68707b8 2885 return err;
b8ff05a9
DM
2886}
2887
3822d067 2888static int cxgb_close(struct net_device *dev)
b8ff05a9 2889{
b8ff05a9
DM
2890 struct port_info *pi = netdev_priv(dev);
2891 struct adapter *adapter = pi->adapter;
ba581f77 2892 int ret;
b8ff05a9
DM
2893
2894 netif_tx_stop_all_queues(dev);
2895 netif_carrier_off(dev);
e2f4f4e9
AV
2896 ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2897 false, false, false);
ba581f77
GG
2898#ifdef CONFIG_CHELSIO_T4_DCB
2899 cxgb4_dcb_reset(dev);
2900 dcb_tx_queue_prio_enable(dev, false);
2901#endif
2b465ed0
RL
2902 if (ret)
2903 return ret;
2904
2905 if (pi->nmirrorqsets) {
2906 mutex_lock(&pi->vi_mirror_mutex);
696c278f 2907 cxgb4_port_mirror_stop(dev);
2b465ed0
RL
2908 cxgb4_port_mirror_free_queues(dev);
2909 mutex_unlock(&pi->vi_mirror_mutex);
2910 }
2911
2912 return 0;
b8ff05a9
DM
2913}
2914
dca4faeb 2915int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
793dad94
VP
2916 __be32 sip, __be16 sport, __be16 vlan,
2917 unsigned int queue, unsigned char port, unsigned char mask)
dca4faeb
VP
2918{
2919 int ret;
2920 struct filter_entry *f;
2921 struct adapter *adap;
2922 int i;
2923 u8 *val;
2924
2925 adap = netdev2adap(dev);
2926
1cab775c 2927 /* Adjust stid to correct filter index */
470c60c4 2928 stid -= adap->tids.sftid_base;
1cab775c
VP
2929 stid += adap->tids.nftids;
2930
dca4faeb
VP
2931 /* Check to make sure the filter requested is writable ...
2932 */
2933 f = &adap->tids.ftid_tab[stid];
2934 ret = writable_filter(f);
2935 if (ret)
2936 return ret;
2937
2938 /* Clear out any old resources being used by the filter before
2939 * we start constructing the new filter.
2940 */
2941 if (f->valid)
2942 clear_filter(adap, f);
2943
2944 /* Clear out filter specifications */
2945 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
63b53b0b 2946 f->fs.val.lport = be16_to_cpu(sport);
dca4faeb
VP
2947 f->fs.mask.lport = ~0;
2948 val = (u8 *)&sip;
793dad94 2949 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
dca4faeb
VP
2950 for (i = 0; i < 4; i++) {
2951 f->fs.val.lip[i] = val[i];
2952 f->fs.mask.lip[i] = ~0;
2953 }
0d804338 2954 if (adap->params.tp.vlan_pri_map & PORT_F) {
793dad94
VP
2955 f->fs.val.iport = port;
2956 f->fs.mask.iport = mask;
2957 }
2958 }
dca4faeb 2959
0d804338 2960 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
7c89e555
KS
2961 f->fs.val.proto = IPPROTO_TCP;
2962 f->fs.mask.proto = ~0;
2963 }
2964
dca4faeb
VP
2965 f->fs.dirsteer = 1;
2966 f->fs.iq = queue;
2967 /* Mark filter as locked */
2968 f->locked = 1;
2969 f->fs.rpttid = 1;
2970
6b254afd
GG
2971 /* Save the actual tid. We need this to get the corresponding
2972 * filter entry structure in filter_rpl.
2973 */
2974 f->tid = stid + adap->tids.ftid_base;
dca4faeb
VP
2975 ret = set_filter_wr(adap, stid);
2976 if (ret) {
2977 clear_filter(adap, f);
2978 return ret;
2979 }
2980
2981 return 0;
2982}
2983EXPORT_SYMBOL(cxgb4_create_server_filter);
2984
2985int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2986 unsigned int queue, bool ipv6)
2987{
dca4faeb
VP
2988 struct filter_entry *f;
2989 struct adapter *adap;
2990
2991 adap = netdev2adap(dev);
1cab775c
VP
2992
2993 /* Adjust stid to correct filter index */
470c60c4 2994 stid -= adap->tids.sftid_base;
1cab775c
VP
2995 stid += adap->tids.nftids;
2996
dca4faeb
VP
2997 f = &adap->tids.ftid_tab[stid];
2998 /* Unlock the filter */
2999 f->locked = 0;
3000
8c14846d 3001 return delete_filter(adap, stid);
dca4faeb
VP
3002}
3003EXPORT_SYMBOL(cxgb4_remove_server_filter);
3004
bc1f4470 3005static void cxgb_get_stats(struct net_device *dev,
3006 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
3007{
3008 struct port_stats stats;
3009 struct port_info *p = netdev_priv(dev);
3010 struct adapter *adapter = p->adapter;
b8ff05a9 3011
9fe6cb58
GS
3012 /* Block retrieving statistics during EEH error
3013 * recovery. Otherwise, the recovery might fail
3014 * and the PCI device will be removed permanently
3015 */
b8ff05a9 3016 spin_lock(&adapter->stats_lock);
9fe6cb58
GS
3017 if (!netif_device_present(dev)) {
3018 spin_unlock(&adapter->stats_lock);
bc1f4470 3019 return;
9fe6cb58 3020 }
a4cfd929
HS
3021 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
3022 &p->stats_base);
b8ff05a9
DM
3023 spin_unlock(&adapter->stats_lock);
3024
3025 ns->tx_bytes = stats.tx_octets;
3026 ns->tx_packets = stats.tx_frames;
3027 ns->rx_bytes = stats.rx_octets;
3028 ns->rx_packets = stats.rx_frames;
3029 ns->multicast = stats.rx_mcast_frames;
3030
3031 /* detailed rx_errors */
3032 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3033 stats.rx_runt;
3034 ns->rx_over_errors = 0;
3035 ns->rx_crc_errors = stats.rx_fcs_err;
3036 ns->rx_frame_errors = stats.rx_symbol_err;
b93f79be 3037 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
b8ff05a9
DM
3038 stats.rx_ovflow2 + stats.rx_ovflow3 +
3039 stats.rx_trunc0 + stats.rx_trunc1 +
3040 stats.rx_trunc2 + stats.rx_trunc3;
3041 ns->rx_missed_errors = 0;
3042
3043 /* detailed tx_errors */
3044 ns->tx_aborted_errors = 0;
3045 ns->tx_carrier_errors = 0;
3046 ns->tx_fifo_errors = 0;
3047 ns->tx_heartbeat_errors = 0;
3048 ns->tx_window_errors = 0;
3049
3050 ns->tx_errors = stats.tx_error_frames;
3051 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3052 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
b8ff05a9
DM
3053}
3054
3055static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3056{
060e0c75 3057 unsigned int mbox;
b8ff05a9
DM
3058 int ret = 0, prtad, devad;
3059 struct port_info *pi = netdev_priv(dev);
a4569504 3060 struct adapter *adapter = pi->adapter;
b8ff05a9
DM
3061 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3062
3063 switch (cmd) {
3064 case SIOCGMIIPHY:
3065 if (pi->mdio_addr < 0)
3066 return -EOPNOTSUPP;
3067 data->phy_id = pi->mdio_addr;
3068 break;
3069 case SIOCGMIIREG:
3070 case SIOCSMIIREG:
3071 if (mdio_phy_id_is_c45(data->phy_id)) {
3072 prtad = mdio_phy_id_prtad(data->phy_id);
3073 devad = mdio_phy_id_devad(data->phy_id);
3074 } else if (data->phy_id < 32) {
3075 prtad = data->phy_id;
3076 devad = 0;
3077 data->reg_num &= 0x1f;
3078 } else
3079 return -EINVAL;
3080
b2612722 3081 mbox = pi->adapter->pf;
b8ff05a9 3082 if (cmd == SIOCGMIIREG)
060e0c75 3083 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
3084 data->reg_num, &data->val_out);
3085 else
060e0c75 3086 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
3087 data->reg_num, data->val_in);
3088 break;
5e2a5ebc
HS
3089 case SIOCGHWTSTAMP:
3090 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3091 sizeof(pi->tstamp_config)) ?
3092 -EFAULT : 0;
3093 case SIOCSHWTSTAMP:
3094 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
3095 sizeof(pi->tstamp_config)))
3096 return -EFAULT;
3097
a4569504
AG
3098 if (!is_t4(adapter->params.chip)) {
3099 switch (pi->tstamp_config.tx_type) {
3100 case HWTSTAMP_TX_OFF:
3101 case HWTSTAMP_TX_ON:
3102 break;
3103 default:
3104 return -ERANGE;
3105 }
3106
3107 switch (pi->tstamp_config.rx_filter) {
3108 case HWTSTAMP_FILTER_NONE:
3109 pi->rxtstamp = false;
3110 break;
3111 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3112 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3113 cxgb4_ptprx_timestamping(pi, pi->port_id,
3114 PTP_TS_L4);
3115 break;
3116 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3117 cxgb4_ptprx_timestamping(pi, pi->port_id,
3118 PTP_TS_L2_L4);
3119 break;
3120 case HWTSTAMP_FILTER_ALL:
3121 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3122 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3123 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3124 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3125 pi->rxtstamp = true;
3126 break;
3127 default:
3128 pi->tstamp_config.rx_filter =
3129 HWTSTAMP_FILTER_NONE;
3130 return -ERANGE;
3131 }
3132
3133 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
3134 (pi->tstamp_config.rx_filter ==
3135 HWTSTAMP_FILTER_NONE)) {
3136 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
3137 pi->ptp_enable = false;
3138 }
3139
3140 if (pi->tstamp_config.rx_filter !=
3141 HWTSTAMP_FILTER_NONE) {
3142 if (cxgb4_ptp_redirect_rx_packet(adapter,
3143 pi) >= 0)
3144 pi->ptp_enable = true;
3145 }
3146 } else {
3147 /* For T4 Adapters */
3148 switch (pi->tstamp_config.rx_filter) {
3149 case HWTSTAMP_FILTER_NONE:
5e2a5ebc
HS
3150 pi->rxtstamp = false;
3151 break;
a4569504 3152 case HWTSTAMP_FILTER_ALL:
5e2a5ebc
HS
3153 pi->rxtstamp = true;
3154 break;
a4569504
AG
3155 default:
3156 pi->tstamp_config.rx_filter =
3157 HWTSTAMP_FILTER_NONE;
5e2a5ebc 3158 return -ERANGE;
a4569504 3159 }
5e2a5ebc 3160 }
5e2a5ebc
HS
3161 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3162 sizeof(pi->tstamp_config)) ?
3163 -EFAULT : 0;
b8ff05a9
DM
3164 default:
3165 return -EOPNOTSUPP;
3166 }
3167 return ret;
3168}
3169
3170static void cxgb_set_rxmode(struct net_device *dev)
3171{
3172 /* unfortunately we can't return errors to the stack */
3173 set_rxmode(dev, -1, false);
3174}
3175
3176static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3177{
b8ff05a9 3178 struct port_info *pi = netdev_priv(dev);
696c278f 3179 int ret;
b8ff05a9 3180
696c278f
RL
3181 ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
3182 pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
b8ff05a9
DM
3183 if (!ret)
3184 dev->mtu = new_mtu;
3185 return ret;
3186}
3187
858aa65c 3188#ifdef CONFIG_PCI_IOV
baf50868 3189static int cxgb4_mgmt_open(struct net_device *dev)
e7b48a32
HS
3190{
3191 /* Turn carrier off since we don't have to transmit anything on this
3192 * interface.
3193 */
3194 netif_carrier_off(dev);
3195 return 0;
3196}
3197
661dbeb9 3198/* Fill MAC address that will be assigned by the FW */
baf50868 3199static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
661dbeb9 3200{
661dbeb9 3201 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
baf50868
GG
3202 unsigned int i, vf, nvfs;
3203 u16 a, b;
661dbeb9
HS
3204 int err;
3205 u8 *na;
661dbeb9
HS
3206
3207 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
baf50868
GG
3208 if (err)
3209 return;
3210
3211 na = adap->params.vpd.na;
3212 for (i = 0; i < ETH_ALEN; i++)
3213 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
3214 hex2val(na[2 * i + 1]));
3215
3216 a = (hw_addr[0] << 8) | hw_addr[1];
3217 b = (hw_addr[1] << 8) | hw_addr[2];
3218 a ^= b;
3219 a |= 0x0200; /* locally assigned Ethernet MAC address */
3220 a &= ~0x0100; /* not a multicast Ethernet MAC address */
3221 macaddr[0] = a >> 8;
3222 macaddr[1] = a & 0xff;
3223
3224 for (i = 2; i < 5; i++)
3225 macaddr[i] = hw_addr[i + 1];
3226
3227 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
3228 vf < nvfs; vf++) {
1b974aa4 3229 macaddr[5] = adap->pf * nvfs + vf;
baf50868 3230 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
661dbeb9
HS
3231 }
3232}
3233
baf50868 3234static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
858aa65c
HS
3235{
3236 struct port_info *pi = netdev_priv(dev);
3237 struct adapter *adap = pi->adapter;
661dbeb9 3238 int ret;
858aa65c
HS
3239
3240 /* verify MAC addr is valid */
3241 if (!is_valid_ether_addr(mac)) {
3242 dev_err(pi->adapter->pdev_dev,
3243 "Invalid Ethernet address %pM for VF %d\n",
3244 mac, vf);
3245 return -EINVAL;
3246 }
3247
3248 dev_info(pi->adapter->pdev_dev,
3249 "Setting MAC %pM on VF %d\n", mac, vf);
661dbeb9
HS
3250 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
3251 if (!ret)
3252 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
3253 return ret;
3254}
3255
baf50868
GG
3256static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
3257 int vf, struct ifla_vf_info *ivi)
661dbeb9
HS
3258{
3259 struct port_info *pi = netdev_priv(dev);
3260 struct adapter *adap = pi->adapter;
bd79acee 3261 struct vf_info *vfinfo;
661dbeb9
HS
3262
3263 if (vf >= adap->num_vfs)
3264 return -EINVAL;
bd79acee
AV
3265 vfinfo = &adap->vfinfo[vf];
3266
661dbeb9 3267 ivi->vf = vf;
bd79acee 3268 ivi->max_tx_rate = vfinfo->tx_rate;
8ea4fae9 3269 ivi->min_tx_rate = 0;
bd79acee
AV
3270 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
3271 ivi->vlan = vfinfo->vlan;
8b965f3f 3272 ivi->linkstate = vfinfo->link_state;
661dbeb9 3273 return 0;
858aa65c 3274}
96fe11f2 3275
baf50868
GG
3276static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
3277 struct netdev_phys_item_id *ppid)
96fe11f2
GG
3278{
3279 struct port_info *pi = netdev_priv(dev);
3280 unsigned int phy_port_id;
3281
3282 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
3283 ppid->id_len = sizeof(phy_port_id);
3284 memcpy(ppid->id, &phy_port_id, ppid->id_len);
3285 return 0;
3286}
3287
baf50868
GG
3288static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
3289 int min_tx_rate, int max_tx_rate)
8ea4fae9
GG
3290{
3291 struct port_info *pi = netdev_priv(dev);
3292 struct adapter *adap = pi->adapter;
c3168cab 3293 unsigned int link_ok, speed, mtu;
8ea4fae9
GG
3294 u32 fw_pfvf, fw_class;
3295 int class_id = vf;
c3168cab 3296 int ret;
8ea4fae9
GG
3297 u16 pktsize;
3298
3299 if (vf >= adap->num_vfs)
3300 return -EINVAL;
3301
3302 if (min_tx_rate) {
3303 dev_err(adap->pdev_dev,
3304 "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
3305 min_tx_rate, vf);
3306 return -EINVAL;
3307 }
c3168cab 3308
b5e281ab
GG
3309 if (max_tx_rate == 0) {
3310 /* unbind VF to to any Traffic Class */
3311 fw_pfvf =
3312 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3313 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3314 fw_class = 0xffffffff;
3315 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3316 &fw_pfvf, &fw_class);
3317 if (ret) {
3318 dev_err(adap->pdev_dev,
3319 "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
3320 ret, adap->pf, vf);
3321 return -EINVAL;
3322 }
3323 dev_info(adap->pdev_dev,
3324 "PF %d VF %d is unbound from TX Rate Limiting\n",
3325 adap->pf, vf);
3326 adap->vfinfo[vf].tx_rate = 0;
3327 return 0;
3328 }
3329
c3168cab 3330 ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
8ea4fae9
GG
3331 if (ret != FW_SUCCESS) {
3332 dev_err(adap->pdev_dev,
c3168cab 3333 "Failed to get link information for VF %d\n", vf);
8ea4fae9
GG
3334 return -EINVAL;
3335 }
c3168cab 3336
8ea4fae9
GG
3337 if (!link_ok) {
3338 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
3339 return -EINVAL;
3340 }
8ea4fae9
GG
3341
3342 if (max_tx_rate > speed) {
3343 dev_err(adap->pdev_dev,
3344 "Max tx rate %d for VF %d can't be > link-speed %u",
3345 max_tx_rate, vf, speed);
3346 return -EINVAL;
3347 }
c3168cab
GG
3348
3349 pktsize = mtu;
8ea4fae9
GG
3350 /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
3351 pktsize = pktsize - sizeof(struct ethhdr) - 4;
3352 /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
3353 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
3354 /* configure Traffic Class for rate-limiting */
3355 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
3356 SCHED_CLASS_LEVEL_CL_RL,
3357 SCHED_CLASS_MODE_CLASS,
3358 SCHED_CLASS_RATEUNIT_BITS,
3359 SCHED_CLASS_RATEMODE_ABS,
c3168cab 3360 pi->tx_chan, class_id, 0,
4bccfc03 3361 max_tx_rate * 1000, 0, pktsize, 0);
8ea4fae9
GG
3362 if (ret) {
3363 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
3364 ret);
3365 return -EINVAL;
3366 }
3367 dev_info(adap->pdev_dev,
3368 "Class %d with MSS %u configured with rate %u\n",
3369 class_id, pktsize, max_tx_rate);
3370
3371 /* bind VF to configured Traffic Class */
3372 fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3373 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3374 fw_class = class_id;
3375 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
3376 &fw_class);
3377 if (ret) {
3378 dev_err(adap->pdev_dev,
b5e281ab
GG
3379 "Err %d in binding PF %d VF %d to Traffic Class %d\n",
3380 ret, adap->pf, vf, class_id);
8ea4fae9
GG
3381 return -EINVAL;
3382 }
3383 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
3384 adap->pf, vf, class_id);
3385 adap->vfinfo[vf].tx_rate = max_tx_rate;
3386 return 0;
3387}
3388
9d5fd927
GG
3389static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
3390 u16 vlan, u8 qos, __be16 vlan_proto)
3391{
3392 struct port_info *pi = netdev_priv(dev);
3393 struct adapter *adap = pi->adapter;
3394 int ret;
3395
3396 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
3397 return -EINVAL;
3398
3399 if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
3400 return -EPROTONOSUPPORT;
3401
3402 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
3403 if (!ret) {
3404 adap->vfinfo[vf].vlan = vlan;
3405 return 0;
3406 }
3407
3408 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
3409 ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
3410 return ret;
3411}
8b965f3f
AV
3412
3413static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
3414 int link)
3415{
3416 struct port_info *pi = netdev_priv(dev);
3417 struct adapter *adap = pi->adapter;
3418 u32 param, val;
3419 int ret = 0;
3420
3421 if (vf >= adap->num_vfs)
3422 return -EINVAL;
3423
3424 switch (link) {
3425 case IFLA_VF_LINK_STATE_AUTO:
3426 val = FW_VF_LINK_STATE_AUTO;
3427 break;
3428
3429 case IFLA_VF_LINK_STATE_ENABLE:
3430 val = FW_VF_LINK_STATE_ENABLE;
3431 break;
3432
3433 case IFLA_VF_LINK_STATE_DISABLE:
3434 val = FW_VF_LINK_STATE_DISABLE;
3435 break;
3436
3437 default:
3438 return -EINVAL;
3439 }
3440
3441 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3442 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
3443 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3444 &param, &val);
3445 if (ret) {
3446 dev_err(adap->pdev_dev,
3447 "Error %d in setting PF %d VF %d link state\n",
3448 ret, adap->pf, vf);
3449 return -EINVAL;
3450 }
3451
3452 adap->vfinfo[vf].link_state = link;
3453 return ret;
3454}
9d5fd927 3455#endif /* CONFIG_PCI_IOV */
858aa65c 3456
b8ff05a9
DM
3457static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3458{
3459 int ret;
3460 struct sockaddr *addr = p;
3461 struct port_info *pi = netdev_priv(dev);
3462
3463 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 3464 return -EADDRNOTAVAIL;
b8ff05a9 3465
2f0b9406
RR
3466 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
3467 addr->sa_data, true, &pi->smt_idx);
b8ff05a9
DM
3468 if (ret < 0)
3469 return ret;
3470
3471 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
b8ff05a9
DM
3472 return 0;
3473}
3474
b8ff05a9
DM
3475#ifdef CONFIG_NET_POLL_CONTROLLER
3476static void cxgb_netpoll(struct net_device *dev)
3477{
3478 struct port_info *pi = netdev_priv(dev);
3479 struct adapter *adap = pi->adapter;
3480
80f61f19 3481 if (adap->flags & CXGB4_USING_MSIX) {
b8ff05a9
DM
3482 int i;
3483 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3484
3485 for (i = pi->nqsets; i; i--, rx++)
3486 t4_sge_intr_msix(0, &rx->rspq);
3487 } else
3488 t4_intr_handler(adap)(0, adap);
3489}
3490#endif
3491
10a2604e
RL
3492static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
3493{
3494 struct port_info *pi = netdev_priv(dev);
3495 struct adapter *adap = pi->adapter;
c856e2b6
RL
3496 struct ch_sched_queue qe = { 0 };
3497 struct ch_sched_params p = { 0 };
10a2604e 3498 struct sched_class *e;
10a2604e
RL
3499 u32 req_rate;
3500 int err = 0;
3501
3502 if (!can_sched(dev))
3503 return -ENOTSUPP;
3504
3505 if (index < 0 || index > pi->nqsets - 1)
3506 return -EINVAL;
3507
80f61f19 3508 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
10a2604e
RL
3509 dev_err(adap->pdev_dev,
3510 "Failed to rate limit on queue %d. Link Down?\n",
3511 index);
3512 return -EINVAL;
3513 }
3514
c856e2b6
RL
3515 qe.queue = index;
3516 e = cxgb4_sched_queue_lookup(dev, &qe);
3517 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
3518 dev_err(adap->pdev_dev,
3519 "Queue %u already bound to class %u of type: %u\n",
3520 index, e->idx, e->info.u.params.level);
3521 return -EBUSY;
3522 }
3523
10a2604e 3524 /* Convert from Mbps to Kbps */
b3c594ab 3525 req_rate = rate * 1000;
10a2604e 3526
d185efc1 3527 /* Max rate is 100 Gbps */
b3c594ab 3528 if (req_rate > SCHED_MAX_RATE_KBPS) {
10a2604e 3529 dev_err(adap->pdev_dev,
d185efc1 3530 "Invalid rate %u Mbps, Max rate is %u Mbps\n",
b3c594ab 3531 rate, SCHED_MAX_RATE_KBPS / 1000);
10a2604e
RL
3532 return -ERANGE;
3533 }
3534
3535 /* First unbind the queue from any existing class */
3536 memset(&qe, 0, sizeof(qe));
3537 qe.queue = index;
3538 qe.class = SCHED_CLS_NONE;
3539
3540 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
3541 if (err) {
3542 dev_err(adap->pdev_dev,
3543 "Unbinding Queue %d on port %d fail. Err: %d\n",
3544 index, pi->port_id, err);
3545 return err;
3546 }
3547
3548 /* Queue already unbound */
3549 if (!req_rate)
3550 return 0;
3551
3552 /* Fetch any available unused or matching scheduling class */
10a2604e
RL
3553 p.type = SCHED_CLASS_TYPE_PACKET;
3554 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
3555 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
3556 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
3557 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
3558 p.u.params.channel = pi->tx_chan;
3559 p.u.params.class = SCHED_CLS_NONE;
3560 p.u.params.minrate = 0;
3561 p.u.params.maxrate = req_rate;
3562 p.u.params.weight = 0;
3563 p.u.params.pktsize = dev->mtu;
3564
3565 e = cxgb4_sched_class_alloc(dev, &p);
3566 if (!e)
3567 return -ENOMEM;
3568
3569 /* Bind the queue to a scheduling class */
3570 memset(&qe, 0, sizeof(qe));
3571 qe.queue = index;
3572 qe.class = e->idx;
3573
3574 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
3575 if (err)
3576 dev_err(adap->pdev_dev,
3577 "Queue rate limiting failed. Err: %d\n", err);
3578 return err;
3579}
3580
6a345b3d 3581static int cxgb_setup_tc_flower(struct net_device *dev,
f9e30088 3582 struct flow_cls_offload *cls_flower)
6a345b3d 3583{
6a345b3d 3584 switch (cls_flower->command) {
f9e30088 3585 case FLOW_CLS_REPLACE:
6a345b3d 3586 return cxgb4_tc_flower_replace(dev, cls_flower);
f9e30088 3587 case FLOW_CLS_DESTROY:
6a345b3d 3588 return cxgb4_tc_flower_destroy(dev, cls_flower);
f9e30088 3589 case FLOW_CLS_STATS:
6a345b3d
KS
3590 return cxgb4_tc_flower_stats(dev, cls_flower);
3591 default:
3592 return -EOPNOTSUPP;
3593 }
3594}
3595
f7323043 3596static int cxgb_setup_tc_cls_u32(struct net_device *dev,
f7323043
JP
3597 struct tc_cls_u32_offload *cls_u32)
3598{
f7323043
JP
3599 switch (cls_u32->command) {
3600 case TC_CLSU32_NEW_KNODE:
3601 case TC_CLSU32_REPLACE_KNODE:
5fd9fc4e 3602 return cxgb4_config_knode(dev, cls_u32);
f7323043 3603 case TC_CLSU32_DELETE_KNODE:
5fd9fc4e 3604 return cxgb4_delete_knode(dev, cls_u32);
f7323043
JP
3605 default:
3606 return -EOPNOTSUPP;
3607 }
3608}
3609
4ec4762d 3610static int cxgb_setup_tc_matchall(struct net_device *dev,
21c4c60b
RL
3611 struct tc_cls_matchall_offload *cls_matchall,
3612 bool ingress)
4ec4762d
RL
3613{
3614 struct adapter *adap = netdev2adap(dev);
3615
3616 if (!adap->tc_matchall)
3617 return -ENOMEM;
3618
3619 switch (cls_matchall->command) {
3620 case TC_CLSMATCHALL_REPLACE:
21c4c60b 3621 return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
4ec4762d 3622 case TC_CLSMATCHALL_DESTROY:
21c4c60b
RL
3623 return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
3624 case TC_CLSMATCHALL_STATS:
3625 if (ingress)
3626 return cxgb4_tc_matchall_stats(dev, cls_matchall);
3627 break;
4ec4762d
RL
3628 default:
3629 break;
3630 }
3631
3632 return -EOPNOTSUPP;
3633}
3634
3635static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
3636 void *type_data, void *cb_priv)
d8931847 3637{
cd019e91 3638 struct net_device *dev = cb_priv;
d8931847
RL
3639 struct port_info *pi = netdev2pinfo(dev);
3640 struct adapter *adap = netdev2adap(dev);
3641
80f61f19 3642 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
d8931847
RL
3643 dev_err(adap->pdev_dev,
3644 "Failed to setup tc on port %d. Link Down?\n",
3645 pi->port_id);
3646 return -EINVAL;
3647 }
3648
2a84bbaf 3649 if (!tc_cls_can_offload_and_chain0(dev, type_data))
44ae12a7
JP
3650 return -EOPNOTSUPP;
3651
f7323043
JP
3652 switch (type) {
3653 case TC_SETUP_CLSU32:
de4784ca 3654 return cxgb_setup_tc_cls_u32(dev, type_data);
6a345b3d
KS
3655 case TC_SETUP_CLSFLOWER:
3656 return cxgb_setup_tc_flower(dev, type_data);
21c4c60b
RL
3657 case TC_SETUP_CLSMATCHALL:
3658 return cxgb_setup_tc_matchall(dev, type_data, true);
f7323043
JP
3659 default:
3660 return -EOPNOTSUPP;
d8931847 3661 }
d8931847
RL
3662}
3663
4ec4762d
RL
3664static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
3665 void *type_data, void *cb_priv)
3666{
3667 struct net_device *dev = cb_priv;
3668 struct port_info *pi = netdev2pinfo(dev);
3669 struct adapter *adap = netdev2adap(dev);
3670
3671 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3672 dev_err(adap->pdev_dev,
3673 "Failed to setup tc on port %d. Link Down?\n",
3674 pi->port_id);
3675 return -EINVAL;
3676 }
3677
3678 if (!tc_cls_can_offload_and_chain0(dev, type_data))
3679 return -EOPNOTSUPP;
3680
3681 switch (type) {
3682 case TC_SETUP_CLSMATCHALL:
21c4c60b 3683 return cxgb_setup_tc_matchall(dev, type_data, false);
4ec4762d
RL
3684 default:
3685 break;
3686 }
3687
3688 return -EOPNOTSUPP;
3689}
3690
b1396c2b
RL
3691static int cxgb_setup_tc_mqprio(struct net_device *dev,
3692 struct tc_mqprio_qopt_offload *mqprio)
3693{
3694 struct adapter *adap = netdev2adap(dev);
3695
3696 if (!is_ethofld(adap) || !adap->tc_mqprio)
3697 return -ENOMEM;
3698
3699 return cxgb4_setup_tc_mqprio(dev, mqprio);
3700}
3701
955bcb6e
PNA
3702static LIST_HEAD(cxgb_block_cb_list);
3703
4ec4762d
RL
3704static int cxgb_setup_tc_block(struct net_device *dev,
3705 struct flow_block_offload *f)
3706{
3707 struct port_info *pi = netdev_priv(dev);
3708 flow_setup_cb_t *cb;
3709 bool ingress_only;
3710
3711 pi->tc_block_shared = f->block_shared;
3712 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
3713 cb = cxgb_setup_tc_block_egress_cb;
3714 ingress_only = false;
3715 } else {
3716 cb = cxgb_setup_tc_block_ingress_cb;
3717 ingress_only = true;
3718 }
3719
3720 return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
3721 cb, pi, dev, ingress_only);
3722}
3723
cd019e91
JP
3724static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3725 void *type_data)
3726{
3727 switch (type) {
b1396c2b
RL
3728 case TC_SETUP_QDISC_MQPRIO:
3729 return cxgb_setup_tc_mqprio(dev, type_data);
cd019e91 3730 case TC_SETUP_BLOCK:
4ec4762d 3731 return cxgb_setup_tc_block(dev, type_data);
cd019e91
JP
3732 default:
3733 return -EOPNOTSUPP;
3734 }
3735}
3736
ad166a8e
JK
3737static int cxgb_udp_tunnel_unset_port(struct net_device *netdev,
3738 unsigned int table, unsigned int entry,
3739 struct udp_tunnel_info *ti)
846eac3f
GG
3740{
3741 struct port_info *pi = netdev_priv(netdev);
3742 struct adapter *adapter = pi->adapter;
846eac3f
GG
3743 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3744 int ret = 0, i;
3745
846eac3f
GG
3746 switch (ti->type) {
3747 case UDP_TUNNEL_TYPE_VXLAN:
846eac3f
GG
3748 adapter->vxlan_port = 0;
3749 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3750 break;
c746fc0e 3751 case UDP_TUNNEL_TYPE_GENEVE:
c746fc0e
GG
3752 adapter->geneve_port = 0;
3753 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
942a656f 3754 break;
846eac3f 3755 default:
ad166a8e 3756 return -EINVAL;
846eac3f
GG
3757 }
3758
3759 /* Matchall mac entries can be deleted only after all tunnel ports
3760 * are brought down or removed.
3761 */
3762 if (!adapter->rawf_cnt)
ad166a8e 3763 return 0;
846eac3f
GG
3764 for_each_port(adapter, i) {
3765 pi = adap2pinfo(adapter, i);
3766 ret = t4_free_raw_mac_filt(adapter, pi->viid,
3767 match_all_mac, match_all_mac,
ad166a8e 3768 adapter->rawf_start + pi->port_id,
443e2dab 3769 1, pi->port_id, false);
846eac3f
GG
3770 if (ret < 0) {
3771 netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3772 i);
ad166a8e 3773 return ret;
846eac3f 3774 }
846eac3f 3775 }
ad166a8e
JK
3776
3777 return 0;
846eac3f
GG
3778}
3779
ad166a8e
JK
3780static int cxgb_udp_tunnel_set_port(struct net_device *netdev,
3781 unsigned int table, unsigned int entry,
3782 struct udp_tunnel_info *ti)
846eac3f
GG
3783{
3784 struct port_info *pi = netdev_priv(netdev);
3785 struct adapter *adapter = pi->adapter;
846eac3f
GG
3786 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3787 int i, ret;
3788
846eac3f
GG
3789 switch (ti->type) {
3790 case UDP_TUNNEL_TYPE_VXLAN:
846eac3f 3791 adapter->vxlan_port = ti->port;
846eac3f
GG
3792 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3793 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3794 break;
c746fc0e 3795 case UDP_TUNNEL_TYPE_GENEVE:
c746fc0e 3796 adapter->geneve_port = ti->port;
c746fc0e
GG
3797 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3798 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
942a656f 3799 break;
846eac3f 3800 default:
ad166a8e 3801 return -EINVAL;
846eac3f
GG
3802 }
3803
3804 /* Create a 'match all' mac filter entry for inner mac,
3805 * if raw mac interface is supported. Once the linux kernel provides
3806 * driver entry points for adding/deleting the inner mac addresses,
3807 * we will remove this 'match all' entry and fallback to adding
3808 * exact match filters.
3809 */
c746fc0e
GG
3810 for_each_port(adapter, i) {
3811 pi = adap2pinfo(adapter, i);
3812
3813 ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3814 match_all_mac,
3815 match_all_mac,
ad166a8e 3816 adapter->rawf_start + pi->port_id,
443e2dab 3817 1, pi->port_id, false);
c746fc0e
GG
3818 if (ret < 0) {
3819 netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3820 be16_to_cpu(ti->port));
ad166a8e 3821 return ret;
846eac3f
GG
3822 }
3823 }
ad166a8e
JK
3824
3825 return 0;
846eac3f
GG
3826}
3827
ad166a8e
JK
3828static const struct udp_tunnel_nic_info cxgb_udp_tunnels = {
3829 .set_port = cxgb_udp_tunnel_set_port,
3830 .unset_port = cxgb_udp_tunnel_unset_port,
3831 .tables = {
3832 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
3833 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
3834 },
3835};
3836
4621ffd6
GG
3837static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3838 struct net_device *dev,
3839 netdev_features_t features)
3840{
3841 struct port_info *pi = netdev_priv(dev);
3842 struct adapter *adapter = pi->adapter;
3843
3844 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3845 return features;
3846
3847 /* Check if hw supports offload for this packet */
3848 if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3849 return features;
3850
3851 /* Offload is not supported for this encapsulated packet */
3852 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3853}
3854
90592b9a
AV
3855static netdev_features_t cxgb_fix_features(struct net_device *dev,
3856 netdev_features_t features)
3857{
3858 /* Disable GRO, if RX_CSUM is disabled */
3859 if (!(features & NETIF_F_RXCSUM))
3860 features &= ~NETIF_F_GRO;
3861
3862 return features;
3863}
3864
b8ff05a9
DM
3865static const struct net_device_ops cxgb4_netdev_ops = {
3866 .ndo_open = cxgb_open,
3867 .ndo_stop = cxgb_close,
d5fbda61 3868 .ndo_start_xmit = t4_start_xmit,
688848b1 3869 .ndo_select_queue = cxgb_select_queue,
9be793bf 3870 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
3871 .ndo_set_rx_mode = cxgb_set_rxmode,
3872 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 3873 .ndo_set_features = cxgb_set_features,
b8ff05a9 3874 .ndo_validate_addr = eth_validate_addr,
a7605370 3875 .ndo_eth_ioctl = cxgb_ioctl,
b8ff05a9 3876 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
3877#ifdef CONFIG_NET_POLL_CONTROLLER
3878 .ndo_poll_controller = cxgb_netpoll,
3879#endif
84a200b3
VP
3880#ifdef CONFIG_CHELSIO_T4_FCOE
3881 .ndo_fcoe_enable = cxgb_fcoe_enable,
3882 .ndo_fcoe_disable = cxgb_fcoe_disable,
3883#endif /* CONFIG_CHELSIO_T4_FCOE */
10a2604e 3884 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
d8931847 3885 .ndo_setup_tc = cxgb_setup_tc,
4621ffd6 3886 .ndo_features_check = cxgb_features_check,
90592b9a 3887 .ndo_fix_features = cxgb_fix_features,
b8ff05a9
DM
3888};
3889
858aa65c 3890#ifdef CONFIG_PCI_IOV
e7b48a32 3891static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
8b965f3f
AV
3892 .ndo_open = cxgb4_mgmt_open,
3893 .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
3894 .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
3895 .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
3896 .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
3897 .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
3898 .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
7829451c
HS
3899};
3900
baf50868
GG
3901static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3902 struct ethtool_drvinfo *info)
7829451c
HS
3903{
3904 struct adapter *adapter = netdev2adap(dev);
3905
3906 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
7829451c
HS
3907 strlcpy(info->bus_info, pci_name(adapter->pdev),
3908 sizeof(info->bus_info));
3909}
3910
3911static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
baf50868 3912 .get_drvinfo = cxgb4_mgmt_get_drvinfo,
7829451c 3913};
6990c7f4 3914#endif
7829451c 3915
8b7372c1
GG
3916static void notify_fatal_err(struct work_struct *work)
3917{
3918 struct adapter *adap;
3919
3920 adap = container_of(work, struct adapter, fatal_err_notify_task);
3921 notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3922}
3923
b8ff05a9
DM
3924void t4_fatal_err(struct adapter *adap)
3925{
3be0679b
HS
3926 int port;
3927
025d0973
GP
3928 if (pci_channel_offline(adap->pdev))
3929 return;
3930
3be0679b
HS
3931 /* Disable the SGE since ULDs are going to free resources that
3932 * could be exposed to the adapter. RDMA MWs for example...
3933 */
3934 t4_shutdown_adapter(adap);
3935 for_each_port(adap, port) {
3936 struct net_device *dev = adap->port[port];
3937
3938 /* If we get here in very early initialization the network
3939 * devices may not have been set up yet.
3940 */
3941 if (!dev)
3942 continue;
3943
3944 netif_tx_stop_all_queues(dev);
3945 netif_carrier_off(dev);
3946 }
b8ff05a9 3947 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
8b7372c1 3948 queue_work(adap->workq, &adap->fatal_err_notify_task);
b8ff05a9
DM
3949}
3950
3951static void setup_memwin(struct adapter *adap)
3952{
b562fc37 3953 u32 nic_win_base = t4_get_util_window(adap);
b8ff05a9 3954
b562fc37 3955 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
636f9d37
VP
3956}
3957
3958static void setup_memwin_rdma(struct adapter *adap)
3959{
1ae970e0 3960 if (adap->vres.ocq.size) {
0abfd152
HS
3961 u32 start;
3962 unsigned int sz_kb;
1ae970e0 3963
0abfd152
HS
3964 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3965 start &= PCI_BASE_ADDRESS_MEM_MASK;
3966 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
1ae970e0
DM
3967 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3968 t4_write_reg(adap,
f061de42
HS
3969 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3970 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
1ae970e0 3971 t4_write_reg(adap,
f061de42 3972 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
1ae970e0
DM
3973 adap->vres.ocq.start);
3974 t4_read_reg(adap,
f061de42 3975 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
1ae970e0 3976 }
b8ff05a9
DM
3977}
3978
8b4e6b3c
AV
3979/* HMA Definitions */
3980
3981/* The maximum number of address that can be send in a single FW cmd */
3982#define HMA_MAX_ADDR_IN_CMD 5
3983
3984#define HMA_PAGE_SIZE PAGE_SIZE
3985
3986#define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
3987
3988#define HMA_PAGE_ORDER \
3989 ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
3990 ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3991
3992/* The minimum and maximum possible HMA sizes that can be specified in the FW
3993 * configuration(in units of MB).
3994 */
3995#define HMA_MIN_TOTAL_SIZE 1
3996#define HMA_MAX_TOTAL_SIZE \
3997 (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
3998 HMA_MAX_NO_FW_ADDRESS) >> 20)
3999
4000static void adap_free_hma_mem(struct adapter *adapter)
4001{
4002 struct scatterlist *iter;
4003 struct page *page;
4004 int i;
4005
4006 if (!adapter->hma.sgt)
4007 return;
4008
4009 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
4010 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
4011 adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
4012 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
4013 }
4014
4015 for_each_sg(adapter->hma.sgt->sgl, iter,
4016 adapter->hma.sgt->orig_nents, i) {
4017 page = sg_page(iter);
4018 if (page)
4019 __free_pages(page, HMA_PAGE_ORDER);
4020 }
4021
4022 kfree(adapter->hma.phy_addr);
4023 sg_free_table(adapter->hma.sgt);
4024 kfree(adapter->hma.sgt);
4025 adapter->hma.sgt = NULL;
4026}
4027
4028static int adap_config_hma(struct adapter *adapter)
4029{
4030 struct scatterlist *sgl, *iter;
4031 struct sg_table *sgt;
4032 struct page *newpage;
4033 unsigned int i, j, k;
4034 u32 param, hma_size;
4035 unsigned int ncmds;
4036 size_t page_size;
4037 u32 page_order;
4038 int node, ret;
4039
4040 /* HMA is supported only for T6+ cards.
4041 * Avoid initializing HMA in kdump kernels.
4042 */
4043 if (is_kdump_kernel() ||
4044 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
4045 return 0;
4046
4047 /* Get the HMA region size required by fw */
4048 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4049 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
4050 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
4051 1, &param, &hma_size);
4052 /* An error means card has its own memory or HMA is not supported by
4053 * the firmware. Return without any errors.
4054 */
4055 if (ret || !hma_size)
4056 return 0;
4057
4058 if (hma_size < HMA_MIN_TOTAL_SIZE ||
4059 hma_size > HMA_MAX_TOTAL_SIZE) {
4060 dev_err(adapter->pdev_dev,
4061 "HMA size %uMB beyond bounds(%u-%lu)MB\n",
4062 hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
4063 return -EINVAL;
4064 }
4065
4066 page_size = HMA_PAGE_SIZE;
4067 page_order = HMA_PAGE_ORDER;
4068 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
4069 if (unlikely(!adapter->hma.sgt)) {
4070 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
4071 return -ENOMEM;
4072 }
4073 sgt = adapter->hma.sgt;
4074 /* FW returned value will be in MB's
4075 */
4076 sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
4077 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
4078 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
4079 kfree(adapter->hma.sgt);
4080 adapter->hma.sgt = NULL;
4081 return -ENOMEM;
4082 }
4083
4084 sgl = adapter->hma.sgt->sgl;
4085 node = dev_to_node(adapter->pdev_dev);
4086 for_each_sg(sgl, iter, sgt->orig_nents, i) {
2b928749
GG
4087 newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
4088 __GFP_ZERO, page_order);
8b4e6b3c
AV
4089 if (!newpage) {
4090 dev_err(adapter->pdev_dev,
4091 "Not enough memory for HMA page allocation\n");
4092 ret = -ENOMEM;
4093 goto free_hma;
4094 }
4095 sg_set_page(iter, newpage, page_size << page_order, 0);
4096 }
4097
4098 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
4099 DMA_BIDIRECTIONAL);
4100 if (!sgt->nents) {
4101 dev_err(adapter->pdev_dev,
4102 "Not enough memory for HMA DMA mapping");
4103 ret = -ENOMEM;
4104 goto free_hma;
4105 }
4106 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
4107
4108 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
4109 GFP_KERNEL);
4110 if (unlikely(!adapter->hma.phy_addr))
4111 goto free_hma;
4112
4113 for_each_sg(sgl, iter, sgt->nents, i) {
4114 newpage = sg_page(iter);
4115 adapter->hma.phy_addr[i] = sg_dma_address(iter);
4116 }
4117
4118 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
4119 /* Pass on the addresses to firmware */
4120 for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
4121 struct fw_hma_cmd hma_cmd;
4122 u8 naddr = HMA_MAX_ADDR_IN_CMD;
4123 u8 soc = 0, eoc = 0;
4124 u8 hma_mode = 1; /* Presently we support only Page table mode */
4125
4126 soc = (i == 0) ? 1 : 0;
4127 eoc = (i == ncmds - 1) ? 1 : 0;
4128
4129 /* For last cmd, set naddr corresponding to remaining
4130 * addresses
4131 */
4132 if (i == ncmds - 1) {
4133 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
4134 naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
4135 }
4136 memset(&hma_cmd, 0, sizeof(hma_cmd));
4137 hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
4138 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4139 hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
4140
4141 hma_cmd.mode_to_pcie_params =
4142 htonl(FW_HMA_CMD_MODE_V(hma_mode) |
4143 FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
4144
4145 /* HMA cmd size specified in MB's */
4146 hma_cmd.naddr_size =
4147 htonl(FW_HMA_CMD_SIZE_V(hma_size) |
4148 FW_HMA_CMD_NADDR_V(naddr));
4149
4150 /* Total Page size specified in units of 4K */
4151 hma_cmd.addr_size_pkd =
4152 htonl(FW_HMA_CMD_ADDR_SIZE_V
4153 ((page_size << page_order) >> 12));
4154
4155 /* Fill the 5 addresses */
4156 for (j = 0; j < naddr; j++) {
4157 hma_cmd.phy_address[j] =
4158 cpu_to_be64(adapter->hma.phy_addr[j + k]);
4159 }
4160 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
4161 sizeof(hma_cmd), &hma_cmd);
4162 if (ret) {
4163 dev_err(adapter->pdev_dev,
4164 "HMA FW command failed with err %d\n", ret);
4165 goto free_hma;
4166 }
4167 }
4168
4169 if (!ret)
4170 dev_info(adapter->pdev_dev,
4171 "Reserved %uMB host memory for HMA\n", hma_size);
4172 return ret;
4173
4174free_hma:
4175 adap_free_hma_mem(adapter);
4176 return ret;
4177}
4178
02b5fb8e
DM
4179static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4180{
4181 u32 v;
4182 int ret;
4183
0eaec62a
CL
4184 /* Now that we've successfully configured and initialized the adapter
4185 * can ask the Firmware what resources it has provisioned for us.
4186 */
4187 ret = t4_get_pfres(adap);
4188 if (ret) {
4189 dev_err(adap->pdev_dev,
4190 "Unable to retrieve resource provisioning information\n");
4191 return ret;
4192 }
4193
02b5fb8e
DM
4194 /* get device capabilities */
4195 memset(c, 0, sizeof(*c));
e2ac9628
HS
4196 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4197 FW_CMD_REQUEST_F | FW_CMD_READ_F);
ce91a923 4198 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
b2612722 4199 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
02b5fb8e
DM
4200 if (ret < 0)
4201 return ret;
4202
e2ac9628
HS
4203 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4204 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
b2612722 4205 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
02b5fb8e
DM
4206 if (ret < 0)
4207 return ret;
4208
b2612722 4209 ret = t4_config_glbl_rss(adap, adap->pf,
02b5fb8e 4210 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
b2e1a3f0
HS
4211 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
4212 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
02b5fb8e
DM
4213 if (ret < 0)
4214 return ret;
4215
b2612722 4216 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
4b8e27a8
HS
4217 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
4218 FW_CMD_CAP_PF);
02b5fb8e
DM
4219 if (ret < 0)
4220 return ret;
4221
4222 t4_sge_init(adap);
4223
02b5fb8e 4224 /* tweak some settings */
837e4a42 4225 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
0d804338 4226 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
837e4a42
HS
4227 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
4228 v = t4_read_reg(adap, TP_PIO_DATA_A);
4229 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
060e0c75 4230
dca4faeb
VP
4231 /* first 4 Tx modulation queues point to consecutive Tx channels */
4232 adap->params.tp.tx_modq_map = 0xE4;
0d804338
HS
4233 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
4234 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
dca4faeb
VP
4235
4236 /* associate each Tx modulation queue with consecutive Tx channels */
4237 v = 0x84218421;
837e4a42 4238 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 4239 &v, 1, TP_TX_SCHED_HDR_A);
837e4a42 4240 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 4241 &v, 1, TP_TX_SCHED_FIFO_A);
837e4a42 4242 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 4243 &v, 1, TP_TX_SCHED_PCMD_A);
dca4faeb
VP
4244
4245#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4246 if (is_offload(adap)) {
0d804338
HS
4247 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
4248 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4249 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4250 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4251 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4252 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
4253 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4254 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4255 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4256 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
dca4faeb
VP
4257 }
4258
060e0c75 4259 /* get basic stuff going */
b2612722 4260 return t4_early_init(adap, adap->pf);
02b5fb8e
DM
4261}
4262
b8ff05a9
DM
4263/*
4264 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4265 */
4266#define MAX_ATIDS 8192U
4267
636f9d37
VP
4268/*
4269 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4270 *
4271 * If the firmware we're dealing with has Configuration File support, then
4272 * we use that to perform all configuration
4273 */
4274
4275/*
4276 * Tweak configuration based on module parameters, etc. Most of these have
4277 * defaults assigned to them by Firmware Configuration Files (if we're using
4278 * them) but need to be explicitly set if we're using hard-coded
4279 * initialization. But even in the case of using Firmware Configuration
4280 * Files, we'd like to expose the ability to change these via module
4281 * parameters so these are essentially common tweaks/settings for
4282 * Configuration Files and hard-coded initialization ...
4283 */
4284static int adap_init0_tweaks(struct adapter *adapter)
4285{
4286 /*
4287 * Fix up various Host-Dependent Parameters like Page Size, Cache
4288 * Line Size, etc. The firmware default is for a 4KB Page Size and
4289 * 64B Cache Line Size ...
4290 */
4291 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4292
4293 /*
4294 * Process module parameters which affect early initialization.
4295 */
4296 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4297 dev_err(&adapter->pdev->dev,
4298 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4299 rx_dma_offset);
4300 rx_dma_offset = 2;
4301 }
f612b815
HS
4302 t4_set_reg_field(adapter, SGE_CONTROL_A,
4303 PKTSHIFT_V(PKTSHIFT_M),
4304 PKTSHIFT_V(rx_dma_offset));
636f9d37
VP
4305
4306 /*
4307 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4308 * adds the pseudo header itself.
4309 */
837e4a42
HS
4310 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
4311 CSUM_HAS_PSEUDO_HDR_F, 0);
636f9d37
VP
4312
4313 return 0;
4314}
4315
01b69614
HS
4316/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
4317 * unto themselves and they contain their own firmware to perform their
4318 * tasks ...
4319 */
4320static int phy_aq1202_version(const u8 *phy_fw_data,
4321 size_t phy_fw_size)
4322{
4323 int offset;
4324
4325 /* At offset 0x8 you're looking for the primary image's
4326 * starting offset which is 3 Bytes wide
4327 *
4328 * At offset 0xa of the primary image, you look for the offset
4329 * of the DRAM segment which is 3 Bytes wide.
4330 *
4331 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
4332 * wide
4333 */
4334 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
4335 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
4336 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
4337
4338 offset = le24(phy_fw_data + 0x8) << 12;
4339 offset = le24(phy_fw_data + offset + 0xa);
4340 return be16(phy_fw_data + offset + 0x27e);
4341
4342 #undef be16
4343 #undef le16
4344 #undef le24
4345}
4346
4347static struct info_10gbt_phy_fw {
4348 unsigned int phy_fw_id; /* PCI Device ID */
4349 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
4350 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
4351 int phy_flash; /* Has FLASH for PHY Firmware */
4352} phy_info_array[] = {
4353 {
4354 PHY_AQ1202_DEVICEID,
4355 PHY_AQ1202_FIRMWARE,
4356 phy_aq1202_version,
4357 1,
4358 },
4359 {
4360 PHY_BCM84834_DEVICEID,
4361 PHY_BCM84834_FIRMWARE,
4362 NULL,
4363 0,
4364 },
4365 { 0, NULL, NULL },
4366};
4367
4368static struct info_10gbt_phy_fw *find_phy_info(int devid)
4369{
4370 int i;
4371
4372 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
4373 if (phy_info_array[i].phy_fw_id == devid)
4374 return &phy_info_array[i];
4375 }
4376 return NULL;
4377}
4378
4379/* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
4380 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
4381 * we return a negative error number. If we transfer new firmware we return 1
4382 * (from t4_load_phy_fw()). If we don't do anything we return 0.
4383 */
4384static int adap_init0_phy(struct adapter *adap)
4385{
4386 const struct firmware *phyf;
4387 int ret;
4388 struct info_10gbt_phy_fw *phy_info;
4389
4390 /* Use the device ID to determine which PHY file to flash.
4391 */
4392 phy_info = find_phy_info(adap->pdev->device);
4393 if (!phy_info) {
4394 dev_warn(adap->pdev_dev,
4395 "No PHY Firmware file found for this PHY\n");
4396 return -EOPNOTSUPP;
4397 }
4398
4399 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
4400 * use that. The adapter firmware provides us with a memory buffer
4401 * where we can load a PHY firmware file from the host if we want to
4402 * override the PHY firmware File in flash.
4403 */
4404 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
4405 adap->pdev_dev);
4406 if (ret < 0) {
4407 /* For adapters without FLASH attached to PHY for their
4408 * firmware, it's obviously a fatal error if we can't get the
4409 * firmware to the adapter. For adapters with PHY firmware
4410 * FLASH storage, it's worth a warning if we can't find the
4411 * PHY Firmware but we'll neuter the error ...
4412 */
4413 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
4414 "/lib/firmware/%s, error %d\n",
4415 phy_info->phy_fw_file, -ret);
4416 if (phy_info->phy_flash) {
4417 int cur_phy_fw_ver = 0;
4418
4419 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4420 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
4421 "FLASH copy, version %#x\n", cur_phy_fw_ver);
4422 ret = 0;
4423 }
4424
4425 return ret;
4426 }
4427
4428 /* Load PHY Firmware onto adapter.
4429 */
5fff701c 4430 ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
01b69614
HS
4431 (u8 *)phyf->data, phyf->size);
4432 if (ret < 0)
4433 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
4434 -ret);
4435 else if (ret > 0) {
4436 int new_phy_fw_ver = 0;
4437
4438 if (phy_info->phy_fw_version)
4439 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
4440 phyf->size);
4441 dev_info(adap->pdev_dev, "Successfully transferred PHY "
4442 "Firmware /lib/firmware/%s, version %#x\n",
4443 phy_info->phy_fw_file, new_phy_fw_ver);
4444 }
4445
4446 release_firmware(phyf);
4447
4448 return ret;
4449}
4450
636f9d37
VP
4451/*
4452 * Attempt to initialize the adapter via a Firmware Configuration File.
4453 */
4454static int adap_init0_config(struct adapter *adapter, int reset)
4455{
74dd5aa1
VK
4456 char *fw_config_file, fw_config_file_path[256];
4457 u32 finiver, finicsum, cfcsum, param, val;
636f9d37 4458 struct fw_caps_config_cmd caps_cmd;
636f9d37 4459 unsigned long mtype = 0, maddr = 0;
74dd5aa1 4460 const struct firmware *cf;
16e47624 4461 char *config_name = NULL;
74dd5aa1
VK
4462 int config_issued = 0;
4463 int ret;
636f9d37
VP
4464
4465 /*
4466 * Reset device if necessary.
4467 */
4468 if (reset) {
4469 ret = t4_fw_reset(adapter, adapter->mbox,
0d804338 4470 PIORSTMODE_F | PIORST_F);
636f9d37
VP
4471 if (ret < 0)
4472 goto bye;
4473 }
4474
01b69614
HS
4475 /* If this is a 10Gb/s-BT adapter make sure the chip-external
4476 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
4477 * to be performed after any global adapter RESET above since some
4478 * PHYs only have local RAM copies of the PHY firmware.
4479 */
4480 if (is_10gbt_device(adapter->pdev->device)) {
4481 ret = adap_init0_phy(adapter);
4482 if (ret < 0)
4483 goto bye;
4484 }
636f9d37
VP
4485 /*
4486 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4487 * then use that. Otherwise, use the configuration file stored
4488 * in the adapter flash ...
4489 */
d14807dd 4490 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
0a57a536 4491 case CHELSIO_T4:
16e47624 4492 fw_config_file = FW4_CFNAME;
0a57a536
SR
4493 break;
4494 case CHELSIO_T5:
4495 fw_config_file = FW5_CFNAME;
4496 break;
3ccc6cf7
HS
4497 case CHELSIO_T6:
4498 fw_config_file = FW6_CFNAME;
4499 break;
0a57a536
SR
4500 default:
4501 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4502 adapter->pdev->device);
4503 ret = -EINVAL;
4504 goto bye;
4505 }
4506
4507 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
636f9d37 4508 if (ret < 0) {
16e47624 4509 config_name = "On FLASH";
636f9d37
VP
4510 mtype = FW_MEMTYPE_CF_FLASH;
4511 maddr = t4_flash_cfg_addr(adapter);
4512 } else {
4513 u32 params[7], val[7];
4514
16e47624
HS
4515 sprintf(fw_config_file_path,
4516 "/lib/firmware/%s", fw_config_file);
4517 config_name = fw_config_file_path;
4518
636f9d37
VP
4519 if (cf->size >= FLASH_CFG_MAX_SIZE)
4520 ret = -ENOMEM;
4521 else {
5167865a
HS
4522 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4523 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
636f9d37 4524 ret = t4_query_params(adapter, adapter->mbox,
b2612722 4525 adapter->pf, 0, 1, params, val);
636f9d37
VP
4526 if (ret == 0) {
4527 /*
fc5ab020 4528 * For t4_memory_rw() below addresses and
636f9d37
VP
4529 * sizes have to be in terms of multiples of 4
4530 * bytes. So, if the Configuration File isn't
4531 * a multiple of 4 bytes in length we'll have
4532 * to write that out separately since we can't
4533 * guarantee that the bytes following the
4534 * residual byte in the buffer returned by
4535 * request_firmware() are zeroed out ...
4536 */
4537 size_t resid = cf->size & 0x3;
4538 size_t size = cf->size & ~0x3;
4539 __be32 *data = (__be32 *)cf->data;
4540
5167865a
HS
4541 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
4542 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
636f9d37 4543
fc5ab020
HS
4544 spin_lock(&adapter->win0_lock);
4545 ret = t4_memory_rw(adapter, 0, mtype, maddr,
4546 size, data, T4_MEMORY_WRITE);
636f9d37
VP
4547 if (ret == 0 && resid != 0) {
4548 union {
4549 __be32 word;
4550 char buf[4];
4551 } last;
4552 int i;
4553
4554 last.word = data[size >> 2];
4555 for (i = resid; i < 4; i++)
4556 last.buf[i] = 0;
fc5ab020
HS
4557 ret = t4_memory_rw(adapter, 0, mtype,
4558 maddr + size,
4559 4, &last.word,
4560 T4_MEMORY_WRITE);
636f9d37 4561 }
fc5ab020 4562 spin_unlock(&adapter->win0_lock);
636f9d37
VP
4563 }
4564 }
4565
4566 release_firmware(cf);
4567 if (ret)
4568 goto bye;
4569 }
4570
74dd5aa1
VK
4571 val = 0;
4572
4573 /* Ofld + Hash filter is supported. Older fw will fail this request and
4574 * it is fine.
4575 */
4576 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4577 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD));
4578 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
4579 1, &param, &val);
4580
4581 /* FW doesn't know about Hash filter + ofld support,
4582 * it's not a problem, don't return an error.
4583 */
4584 if (ret < 0) {
4585 dev_warn(adapter->pdev_dev,
4586 "Hash filter with ofld is not supported by FW\n");
4587 }
4588
636f9d37
VP
4589 /*
4590 * Issue a Capability Configuration command to the firmware to get it
4591 * to parse the Configuration File. We don't use t4_fw_config_file()
4592 * because we want the ability to modify various features after we've
4593 * processed the configuration file ...
4594 */
4595 memset(&caps_cmd, 0, sizeof(caps_cmd));
4596 caps_cmd.op_to_write =
e2ac9628
HS
4597 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4598 FW_CMD_REQUEST_F |
4599 FW_CMD_READ_F);
ce91a923 4600 caps_cmd.cfvalid_to_len16 =
5167865a
HS
4601 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
4602 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
4603 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
636f9d37
VP
4604 FW_LEN16(caps_cmd));
4605 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4606 &caps_cmd);
16e47624
HS
4607
4608 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4609 * Configuration File in FLASH), our last gasp effort is to use the
4610 * Firmware Configuration File which is embedded in the firmware. A
4611 * very few early versions of the firmware didn't have one embedded
4612 * but we can ignore those.
4613 */
4614 if (ret == -ENOENT) {
4615 memset(&caps_cmd, 0, sizeof(caps_cmd));
4616 caps_cmd.op_to_write =
e2ac9628
HS
4617 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4618 FW_CMD_REQUEST_F |
4619 FW_CMD_READ_F);
16e47624
HS
4620 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4621 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4622 sizeof(caps_cmd), &caps_cmd);
4623 config_name = "Firmware Default";
4624 }
4625
4626 config_issued = 1;
636f9d37
VP
4627 if (ret < 0)
4628 goto bye;
4629
4630 finiver = ntohl(caps_cmd.finiver);
4631 finicsum = ntohl(caps_cmd.finicsum);
4632 cfcsum = ntohl(caps_cmd.cfcsum);
4633 if (finicsum != cfcsum)
4634 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4635 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4636 finicsum, cfcsum);
4637
636f9d37
VP
4638 /*
4639 * And now tell the firmware to use the configuration we just loaded.
4640 */
4641 caps_cmd.op_to_write =
e2ac9628
HS
4642 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4643 FW_CMD_REQUEST_F |
4644 FW_CMD_WRITE_F);
ce91a923 4645 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
4646 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4647 NULL);
4648 if (ret < 0)
4649 goto bye;
4650
4651 /*
4652 * Tweak configuration based on system architecture, module
4653 * parameters, etc.
4654 */
4655 ret = adap_init0_tweaks(adapter);
4656 if (ret < 0)
4657 goto bye;
4658
8b4e6b3c
AV
4659 /* We will proceed even if HMA init fails. */
4660 ret = adap_config_hma(adapter);
4661 if (ret)
4662 dev_err(adapter->pdev_dev,
4663 "HMA configuration failed with error %d\n", ret);
4664
a248384e 4665 if (is_t6(adapter->params.chip)) {
c2193999 4666 adap_config_hpfilter(adapter);
a248384e
VP
4667 ret = setup_ppod_edram(adapter);
4668 if (!ret)
4669 dev_info(adapter->pdev_dev, "Successfully enabled "
4670 "ppod edram feature\n");
4671 }
4672
636f9d37
VP
4673 /*
4674 * And finally tell the firmware to initialize itself using the
4675 * parameters from the Configuration File.
4676 */
4677 ret = t4_fw_initialize(adapter, adapter->mbox);
4678 if (ret < 0)
4679 goto bye;
4680
06640310
HS
4681 /* Emit Firmware Configuration File information and return
4682 * successfully.
636f9d37 4683 */
636f9d37 4684 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
16e47624
HS
4685 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4686 config_name, finiver, cfcsum);
636f9d37
VP
4687 return 0;
4688
4689 /*
4690 * Something bad happened. Return the error ... (If the "error"
4691 * is that there's no Configuration File on the adapter we don't
4692 * want to issue a warning since this is fairly common.)
4693 */
4694bye:
16e47624
HS
4695 if (config_issued && ret != -ENOENT)
4696 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4697 config_name, -ret);
636f9d37
VP
4698 return ret;
4699}
4700
16e47624
HS
4701static struct fw_info fw_info_array[] = {
4702 {
4703 .chip = CHELSIO_T4,
4704 .fs_name = FW4_CFNAME,
4705 .fw_mod_name = FW4_FNAME,
4706 .fw_hdr = {
4707 .chip = FW_HDR_CHIP_T4,
4708 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4709 .intfver_nic = FW_INTFVER(T4, NIC),
4710 .intfver_vnic = FW_INTFVER(T4, VNIC),
4711 .intfver_ri = FW_INTFVER(T4, RI),
4712 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4713 .intfver_fcoe = FW_INTFVER(T4, FCOE),
4714 },
4715 }, {
4716 .chip = CHELSIO_T5,
4717 .fs_name = FW5_CFNAME,
4718 .fw_mod_name = FW5_FNAME,
4719 .fw_hdr = {
4720 .chip = FW_HDR_CHIP_T5,
4721 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4722 .intfver_nic = FW_INTFVER(T5, NIC),
4723 .intfver_vnic = FW_INTFVER(T5, VNIC),
4724 .intfver_ri = FW_INTFVER(T5, RI),
4725 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4726 .intfver_fcoe = FW_INTFVER(T5, FCOE),
4727 },
3ccc6cf7
HS
4728 }, {
4729 .chip = CHELSIO_T6,
4730 .fs_name = FW6_CFNAME,
4731 .fw_mod_name = FW6_FNAME,
4732 .fw_hdr = {
4733 .chip = FW_HDR_CHIP_T6,
4734 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4735 .intfver_nic = FW_INTFVER(T6, NIC),
4736 .intfver_vnic = FW_INTFVER(T6, VNIC),
4737 .intfver_ofld = FW_INTFVER(T6, OFLD),
4738 .intfver_ri = FW_INTFVER(T6, RI),
4739 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4740 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4741 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4742 .intfver_fcoe = FW_INTFVER(T6, FCOE),
4743 },
16e47624 4744 }
3ccc6cf7 4745
16e47624
HS
4746};
4747
4748static struct fw_info *find_fw_info(int chip)
4749{
4750 int i;
4751
4752 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4753 if (fw_info_array[i].chip == chip)
4754 return &fw_info_array[i];
4755 }
4756 return NULL;
4757}
4758
b8ff05a9
DM
4759/*
4760 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4761 */
86e8f298 4762static int adap_init0(struct adapter *adap, int vpd_skip)
b8ff05a9 4763{
9a4da2cd 4764 struct fw_caps_config_cmd caps_cmd;
86e8f298
VK
4765 u32 params[7], val[7];
4766 enum dev_state state;
4767 u32 v, port_vec;
dcf7b6f5 4768 int reset = 1;
86e8f298 4769 int ret;
b8ff05a9 4770
ae469b68
HS
4771 /* Grab Firmware Device Log parameters as early as possible so we have
4772 * access to it for debugging, etc.
4773 */
4774 ret = t4_init_devlog_params(adap);
4775 if (ret < 0)
4776 return ret;
4777
666224d4 4778 /* Contact FW, advertising Master capability */
c5a8c0f3
HS
4779 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4780 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
b8ff05a9
DM
4781 if (ret < 0) {
4782 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4783 ret);
4784 return ret;
4785 }
636f9d37 4786 if (ret == adap->mbox)
80f61f19 4787 adap->flags |= CXGB4_MASTER_PF;
b8ff05a9 4788
636f9d37
VP
4789 /*
4790 * If we're the Master PF Driver and the device is uninitialized,
4791 * then let's consider upgrading the firmware ... (We always want
4792 * to check the firmware version number in order to A. get it for
4793 * later reporting and B. to warn if the currently loaded firmware
4794 * is excessively mismatched relative to the driver.)
4795 */
0de72738 4796
760446f9 4797 t4_get_version_info(adap);
a69265e9
HS
4798 ret = t4_check_fw_version(adap);
4799 /* If firmware is too old (not supported by driver) force an update. */
21d11bd6 4800 if (ret)
a69265e9 4801 state = DEV_STATE_UNINIT;
80f61f19 4802 if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
16e47624
HS
4803 struct fw_info *fw_info;
4804 struct fw_hdr *card_fw;
4805 const struct firmware *fw;
4806 const u8 *fw_data = NULL;
4807 unsigned int fw_size = 0;
4808
4809 /* This is the firmware whose headers the driver was compiled
4810 * against
4811 */
4812 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4813 if (fw_info == NULL) {
4814 dev_err(adap->pdev_dev,
4815 "unable to get firmware info for chip %d.\n",
4816 CHELSIO_CHIP_VERSION(adap->params.chip));
4817 return -EINVAL;
636f9d37 4818 }
16e47624
HS
4819
4820 /* allocate memory to read the header of the firmware on the
4821 * card
4822 */
752ade68 4823 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
d624613e
Y
4824 if (!card_fw) {
4825 ret = -ENOMEM;
4826 goto bye;
4827 }
16e47624
HS
4828
4829 /* Get FW from from /lib/firmware/ */
4830 ret = request_firmware(&fw, fw_info->fw_mod_name,
4831 adap->pdev_dev);
4832 if (ret < 0) {
4833 dev_err(adap->pdev_dev,
4834 "unable to load firmware image %s, error %d\n",
4835 fw_info->fw_mod_name, ret);
4836 } else {
4837 fw_data = fw->data;
4838 fw_size = fw->size;
4839 }
4840
4841 /* upgrade FW logic */
4842 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4843 state, &reset);
4844
4845 /* Cleaning up */
0b5b6bee 4846 release_firmware(fw);
752ade68 4847 kvfree(card_fw);
16e47624 4848
636f9d37 4849 if (ret < 0)
16e47624 4850 goto bye;
636f9d37 4851 }
b8ff05a9 4852
06640310
HS
4853 /* If the firmware is initialized already, emit a simply note to that
4854 * effect. Otherwise, it's time to try initializing the adapter.
636f9d37
VP
4855 */
4856 if (state == DEV_STATE_INIT) {
8b4e6b3c
AV
4857 ret = adap_config_hma(adap);
4858 if (ret)
4859 dev_err(adap->pdev_dev,
4860 "HMA configuration failed with error %d\n",
4861 ret);
636f9d37
VP
4862 dev_info(adap->pdev_dev, "Coming up as %s: "\
4863 "Adapter already initialized\n",
80f61f19 4864 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
636f9d37
VP
4865 } else {
4866 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4867 "Initializing adapter\n");
06640310
HS
4868
4869 /* Find out whether we're dealing with a version of the
4870 * firmware which has configuration file support.
636f9d37 4871 */
06640310
HS
4872 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4873 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
b2612722 4874 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
06640310 4875 params, val);
13ee15d3 4876
06640310
HS
4877 /* If the firmware doesn't support Configuration Files,
4878 * return an error.
4879 */
4880 if (ret < 0) {
4881 dev_err(adap->pdev_dev, "firmware doesn't support "
4882 "Firmware Configuration Files\n");
4883 goto bye;
4884 }
4885
4886 /* The firmware provides us with a memory buffer where we can
4887 * load a Configuration File from the host if we want to
4888 * override the Configuration File in flash.
4889 */
4890 ret = adap_init0_config(adap, reset);
4891 if (ret == -ENOENT) {
4892 dev_err(adap->pdev_dev, "no Configuration File "
4893 "present on adapter.\n");
4894 goto bye;
636f9d37
VP
4895 }
4896 if (ret < 0) {
06640310
HS
4897 dev_err(adap->pdev_dev, "could not initialize "
4898 "adapter, error %d\n", -ret);
636f9d37
VP
4899 goto bye;
4900 }
4901 }
4902
0eaec62a
CL
4903 /* Now that we've successfully configured and initialized the adapter
4904 * (or found it already initialized), we can ask the Firmware what
4905 * resources it has provisioned for us.
4906 */
4907 ret = t4_get_pfres(adap);
4908 if (ret) {
4909 dev_err(adap->pdev_dev,
4910 "Unable to retrieve resource provisioning information\n");
4911 goto bye;
4912 }
4913
4914 /* Grab VPD parameters. This should be done after we establish a
4915 * connection to the firmware since some of the VPD parameters
4916 * (notably the Core Clock frequency) are retrieved via requests to
4917 * the firmware. On the other hand, we need these fairly early on
4918 * so we do this right after getting ahold of the firmware.
4919 *
4920 * We need to do this after initializing the adapter because someone
4921 * could have FLASHed a new VPD which won't be read by the firmware
4922 * until we do the RESET ...
4923 */
86e8f298
VK
4924 if (!vpd_skip) {
4925 ret = t4_get_vpd_params(adap, &adap->params.vpd);
4926 if (ret < 0)
4927 goto bye;
4928 }
0eaec62a
CL
4929
4930 /* Find out what ports are available to us. Note that we need to do
4931 * this before calling adap_init0_no_config() since it needs nports
4932 * and portvec ...
4933 */
4934 v =
4935 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4936 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
4937 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
4938 if (ret < 0)
4939 goto bye;
4940
4941 adap->params.nports = hweight32(port_vec);
4942 adap->params.portvec = port_vec;
4943
06640310
HS
4944 /* Give the SGE code a chance to pull in anything that it needs ...
4945 * Note that this must be called after we retrieve our VPD parameters
4946 * in order to know how to convert core ticks to seconds, etc.
636f9d37 4947 */
06640310
HS
4948 ret = t4_sge_init(adap);
4949 if (ret < 0)
4950 goto bye;
636f9d37 4951
d429005f
VK
4952 /* Grab the SGE Doorbell Queue Timer values. If successful, that
4953 * indicates that the Firmware and Hardware support this.
4954 */
543a1b85
VK
4955 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4956 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
4957 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4958 1, params, val);
4959
4960 if (!ret) {
4961 adap->sge.dbqtimer_tick = val[0];
4962 ret = t4_read_sge_dbqtimers(adap,
4963 ARRAY_SIZE(adap->sge.dbqtimer_val),
4964 adap->sge.dbqtimer_val);
4965 }
4966
d429005f 4967 if (!ret)
80f61f19 4968 adap->flags |= CXGB4_SGE_DBQ_TIMER;
d429005f 4969
9a4da2cd
VP
4970 if (is_bypass_device(adap->pdev->device))
4971 adap->params.bypass = 1;
4972
636f9d37
VP
4973 /*
4974 * Grab some of our basic fundamental operating parameters.
4975 */
636f9d37 4976 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
4977 params[1] = FW_PARAM_PFVF(L2T_START);
4978 params[2] = FW_PARAM_PFVF(L2T_END);
4979 params[3] = FW_PARAM_PFVF(FILTER_START);
4980 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 4981 params[5] = FW_PARAM_PFVF(IQFLINT_START);
b2612722 4982 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
b8ff05a9
DM
4983 if (ret < 0)
4984 goto bye;
636f9d37
VP
4985 adap->sge.egr_start = val[0];
4986 adap->l2t_start = val[1];
4987 adap->l2t_end = val[2];
b8ff05a9
DM
4988 adap->tids.ftid_base = val[3];
4989 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 4990 adap->sge.ingr_start = val[5];
b8ff05a9 4991
0e249898 4992 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
c2193999
SAH
4993 params[0] = FW_PARAM_PFVF(HPFILTER_START);
4994 params[1] = FW_PARAM_PFVF(HPFILTER_END);
4995 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4996 params, val);
4997 if (ret < 0)
4998 goto bye;
4999
5000 adap->tids.hpftid_base = val[0];
5001 adap->tids.nhpftids = val[1] - val[0] + 1;
5002
0e249898
AV
5003 /* Read the raw mps entries. In T6, the last 2 tcam entries
5004 * are reserved for raw mac addresses (rawf = 2, one per port).
5005 */
5006 params[0] = FW_PARAM_PFVF(RAWF_START);
5007 params[1] = FW_PARAM_PFVF(RAWF_END);
5008 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5009 params, val);
5010 if (ret == 0) {
5011 adap->rawf_start = val[0];
5012 adap->rawf_cnt = val[1] - val[0] + 1;
5013 }
59437d78
SAH
5014
5015 adap->tids.tid_base =
5016 t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
0e249898
AV
5017 }
5018
4b8e27a8
HS
5019 /* qids (ingress/egress) returned from firmware can be anywhere
5020 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
5021 * Hence driver needs to allocate memory for this range to
5022 * store the queue info. Get the highest IQFLINT/EQ index returned
5023 * in FW_EQ_*_CMD.alloc command.
5024 */
5025 params[0] = FW_PARAM_PFVF(EQ_END);
5026 params[1] = FW_PARAM_PFVF(IQFLINT_END);
b2612722 5027 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4b8e27a8
HS
5028 if (ret < 0)
5029 goto bye;
5030 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
5031 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
5032
5033 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
5034 sizeof(*adap->sge.egr_map), GFP_KERNEL);
5035 if (!adap->sge.egr_map) {
5036 ret = -ENOMEM;
5037 goto bye;
5038 }
5039
5040 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
5041 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
5042 if (!adap->sge.ingr_map) {
5043 ret = -ENOMEM;
5044 goto bye;
5045 }
5046
5047 /* Allocate the memory for the vaious egress queue bitmaps
5b377d11 5048 * ie starving_fl, txq_maperr and blocked_fl.
4b8e27a8
HS
5049 */
5050 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5051 sizeof(long), GFP_KERNEL);
5052 if (!adap->sge.starving_fl) {
5053 ret = -ENOMEM;
5054 goto bye;
5055 }
5056
5057 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5058 sizeof(long), GFP_KERNEL);
5059 if (!adap->sge.txq_maperr) {
5060 ret = -ENOMEM;
5061 goto bye;
5062 }
5063
5b377d11
HS
5064#ifdef CONFIG_DEBUG_FS
5065 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5066 sizeof(long), GFP_KERNEL);
5067 if (!adap->sge.blocked_fl) {
5068 ret = -ENOMEM;
5069 goto bye;
5070 }
5071#endif
5072
b5a02f50
AB
5073 params[0] = FW_PARAM_PFVF(CLIP_START);
5074 params[1] = FW_PARAM_PFVF(CLIP_END);
b2612722 5075 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
b5a02f50
AB
5076 if (ret < 0)
5077 goto bye;
5078 adap->clipt_start = val[0];
5079 adap->clipt_end = val[1];
5080
ab0367ea
RL
5081 /* Get the supported number of traffic classes */
5082 params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
5083 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5084 if (ret < 0) {
5085 /* We couldn't retrieve the number of Traffic Classes
5086 * supported by the hardware/firmware. So we hard
5087 * code it here.
5088 */
5089 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
5090 } else {
5091 adap->params.nsched_cls = val[0];
5092 }
b72a32da 5093
636f9d37
VP
5094 /* query params related to active filter region */
5095 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5096 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
b2612722 5097 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
636f9d37
VP
5098 /* If Active filter size is set we enable establishing
5099 * offload connection through firmware work request
5100 */
5101 if ((val[0] != val[1]) && (ret >= 0)) {
80f61f19 5102 adap->flags |= CXGB4_FW_OFLD_CONN;
636f9d37
VP
5103 adap->tids.aftid_base = val[0];
5104 adap->tids.aftid_end = val[1];
5105 }
5106
b407a4a9
VP
5107 /* If we're running on newer firmware, let it know that we're
5108 * prepared to deal with encapsulated CPL messages. Older
5109 * firmware won't understand this and we'll just get
5110 * unencapsulated messages ...
5111 */
5112 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5113 val[0] = 1;
b2612722 5114 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
b407a4a9 5115
1ac0f095
KS
5116 /*
5117 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5118 * capability. Earlier versions of the firmware didn't have the
5119 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5120 * permission to use ULPTX MEMWRITE DSGL.
5121 */
5122 if (is_t4(adap->params.chip)) {
5123 adap->params.ulptx_memwrite_dsgl = false;
5124 } else {
5125 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
b2612722 5126 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1ac0f095
KS
5127 1, params, val);
5128 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5129 }
5130
086de575
SW
5131 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
5132 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
5133 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5134 1, params, val);
5135 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
5136
0ff90994
KS
5137 /* See if FW supports FW_FILTER2 work request */
5138 if (is_t4(adap->params.chip)) {
fdb6b338 5139 adap->params.filter2_wr_support = false;
0ff90994
KS
5140 } else {
5141 params[0] = FW_PARAM_DEV(FILTER2_WR);
5142 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5143 1, params, val);
5144 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
5145 }
5146
02d805dc
SR
5147 /* Check if FW supports returning vin and smt index.
5148 * If this is not supported, driver will interpret
5149 * these values from viid.
5150 */
5151 params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
5152 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5153 1, params, val);
5154 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
5155
636f9d37
VP
5156 /*
5157 * Get device capabilities so we can determine what resources we need
5158 * to manage.
5159 */
5160 memset(&caps_cmd, 0, sizeof(caps_cmd));
e2ac9628
HS
5161 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5162 FW_CMD_REQUEST_F | FW_CMD_READ_F);
ce91a923 5163 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
5164 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5165 &caps_cmd);
5166 if (ret < 0)
5167 goto bye;
5168
74dd5aa1
VK
5169 /* hash filter has some mandatory register settings to be tested and for
5170 * that it needs to test whether offload is enabled or not, hence
5171 * checking and setting it here.
5172 */
5173 if (caps_cmd.ofldcaps)
5174 adap->params.offload = 1;
5175
5c31254e 5176 if (caps_cmd.ofldcaps ||
ab0367ea
RL
5177 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
5178 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
b8ff05a9
DM
5179 /* query offload-related parameters */
5180 params[0] = FW_PARAM_DEV(NTID);
5181 params[1] = FW_PARAM_PFVF(SERVER_START);
5182 params[2] = FW_PARAM_PFVF(SERVER_END);
5183 params[3] = FW_PARAM_PFVF(TDDP_START);
5184 params[4] = FW_PARAM_PFVF(TDDP_END);
5185 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
b2612722 5186 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
636f9d37 5187 params, val);
b8ff05a9
DM
5188 if (ret < 0)
5189 goto bye;
5190 adap->tids.ntids = val[0];
5191 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5192 adap->tids.stid_base = val[1];
5193 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37 5194 /*
dbedd44e 5195 * Setup server filter region. Divide the available filter
636f9d37
VP
5196 * region into two parts. Regular filters get 1/3rd and server
5197 * filters get 2/3rd part. This is only enabled if workarond
5198 * path is enabled.
5199 * 1. For regular filters.
5200 * 2. Server filter: This are special filters which are used
5201 * to redirect SYN packets to offload queue.
5202 */
80f61f19 5203 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
636f9d37
VP
5204 adap->tids.sftid_base = adap->tids.ftid_base +
5205 DIV_ROUND_UP(adap->tids.nftids, 3);
5206 adap->tids.nsftids = adap->tids.nftids -
5207 DIV_ROUND_UP(adap->tids.nftids, 3);
5208 adap->tids.nftids = adap->tids.sftid_base -
5209 adap->tids.ftid_base;
5210 }
b8ff05a9
DM
5211 adap->vres.ddp.start = val[3];
5212 adap->vres.ddp.size = val[4] - val[3] + 1;
5213 adap->params.ofldq_wr_cred = val[5];
636f9d37 5214
5c31254e 5215 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
74dd5aa1 5216 init_hash_filter(adap);
5c31254e 5217 } else {
5c31254e
KS
5218 adap->num_ofld_uld += 1;
5219 }
ab0367ea
RL
5220
5221 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
5222 params[0] = FW_PARAM_PFVF(ETHOFLD_START);
5223 params[1] = FW_PARAM_PFVF(ETHOFLD_END);
5224 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5225 params, val);
5226 if (!ret) {
5227 adap->tids.eotid_base = val[0];
5228 adap->tids.neotids = min_t(u32, MAX_ATIDS,
5229 val[1] - val[0] + 1);
5230 adap->params.ethofld = 1;
5231 }
5232 }
b8ff05a9 5233 }
636f9d37 5234 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
5235 params[0] = FW_PARAM_PFVF(STAG_START);
5236 params[1] = FW_PARAM_PFVF(STAG_END);
5237 params[2] = FW_PARAM_PFVF(RQ_START);
5238 params[3] = FW_PARAM_PFVF(RQ_END);
5239 params[4] = FW_PARAM_PFVF(PBL_START);
5240 params[5] = FW_PARAM_PFVF(PBL_END);
b2612722 5241 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
636f9d37 5242 params, val);
b8ff05a9
DM
5243 if (ret < 0)
5244 goto bye;
5245 adap->vres.stag.start = val[0];
5246 adap->vres.stag.size = val[1] - val[0] + 1;
5247 adap->vres.rq.start = val[2];
5248 adap->vres.rq.size = val[3] - val[2] + 1;
5249 adap->vres.pbl.start = val[4];
5250 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab 5251
c68644ef
RR
5252 params[0] = FW_PARAM_PFVF(SRQ_START);
5253 params[1] = FW_PARAM_PFVF(SRQ_END);
5254 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5255 params, val);
5256 if (!ret) {
5257 adap->vres.srq.start = val[0];
5258 adap->vres.srq.size = val[1] - val[0] + 1;
5259 }
5260 if (adap->vres.srq.size) {
5261 adap->srq = t4_init_srq(adap->vres.srq.size);
5262 if (!adap->srq)
5263 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
5264 }
5265
a0881cab
DM
5266 params[0] = FW_PARAM_PFVF(SQRQ_START);
5267 params[1] = FW_PARAM_PFVF(SQRQ_END);
5268 params[2] = FW_PARAM_PFVF(CQ_START);
5269 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
5270 params[4] = FW_PARAM_PFVF(OCQ_START);
5271 params[5] = FW_PARAM_PFVF(OCQ_END);
b2612722 5272 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
5c937dd3 5273 val);
a0881cab
DM
5274 if (ret < 0)
5275 goto bye;
5276 adap->vres.qp.start = val[0];
5277 adap->vres.qp.size = val[1] - val[0] + 1;
5278 adap->vres.cq.start = val[2];
5279 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
5280 adap->vres.ocq.start = val[4];
5281 adap->vres.ocq.size = val[5] - val[4] + 1;
4c2c5763
HS
5282
5283 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5284 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
b2612722 5285 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5c937dd3 5286 val);
4c2c5763
HS
5287 if (ret < 0) {
5288 adap->params.max_ordird_qp = 8;
5289 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5290 ret = 0;
5291 } else {
5292 adap->params.max_ordird_qp = val[0];
5293 adap->params.max_ird_adapter = val[1];
5294 }
5295 dev_info(adap->pdev_dev,
5296 "max_ordird_qp %d max_ird_adapter %d\n",
5297 adap->params.max_ordird_qp,
5298 adap->params.max_ird_adapter);
43db9296
RR
5299
5300 /* Enable write_with_immediate if FW supports it */
5301 params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
5302 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5303 val);
5304 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
f3910c62
RR
5305
5306 /* Enable write_cmpl if FW supports it */
5307 params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
5308 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5309 val);
5310 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
0fbc81b3 5311 adap->num_ofld_uld += 2;
b8ff05a9 5312 }
636f9d37 5313 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
5314 params[0] = FW_PARAM_PFVF(ISCSI_START);
5315 params[1] = FW_PARAM_PFVF(ISCSI_END);
b2612722 5316 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
636f9d37 5317 params, val);
b8ff05a9
DM
5318 if (ret < 0)
5319 goto bye;
5320 adap->vres.iscsi.start = val[0];
5321 adap->vres.iscsi.size = val[1] - val[0] + 1;
a248384e
VP
5322 if (is_t6(adap->params.chip)) {
5323 params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START);
5324 params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END);
5325 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5326 params, val);
5327 if (!ret) {
5328 adap->vres.ppod_edram.start = val[0];
5329 adap->vres.ppod_edram.size =
5330 val[1] - val[0] + 1;
5331
5332 dev_info(adap->pdev_dev,
5333 "ppod edram start 0x%x end 0x%x size 0x%x\n",
5334 val[0], val[1],
5335 adap->vres.ppod_edram.size);
5336 }
5337 }
0fbc81b3
HS
5338 /* LIO target and cxgb4i initiaitor */
5339 adap->num_ofld_uld += 2;
b8ff05a9 5340 }
94cdb8bb 5341 if (caps_cmd.cryptocaps) {
e383f248
AG
5342 if (ntohs(caps_cmd.cryptocaps) &
5343 FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
5344 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
5345 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5346 2, params, val);
5347 if (ret < 0) {
5348 if (ret != -EINVAL)
5349 goto bye;
5350 } else {
5351 adap->vres.ncrypto_fc = val[0];
5352 }
5353 adap->num_ofld_uld += 1;
5354 }
5355 if (ntohs(caps_cmd.cryptocaps) &
5356 FW_CAPS_CONFIG_TLS_INLINE) {
5357 params[0] = FW_PARAM_PFVF(TLS_START);
5358 params[1] = FW_PARAM_PFVF(TLS_END);
5359 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5360 2, params, val);
5361 if (ret < 0)
72a56ca9 5362 goto bye;
e383f248
AG
5363 adap->vres.key.start = val[0];
5364 adap->vres.key.size = val[1] - val[0] + 1;
5365 adap->num_uld += 1;
72a56ca9 5366 }
a6ec572b 5367 adap->params.crypto = ntohs(caps_cmd.cryptocaps);
94cdb8bb 5368 }
b8ff05a9 5369
92e7ae71
HS
5370 /* The MTU/MSS Table is initialized by now, so load their values. If
5371 * we're initializing the adapter, then we'll make any modifications
5372 * we want to the MTU/MSS Table and also initialize the congestion
5373 * parameters.
636f9d37 5374 */
b8ff05a9 5375 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
92e7ae71
HS
5376 if (state != DEV_STATE_INIT) {
5377 int i;
5378
5379 /* The default MTU Table contains values 1492 and 1500.
5380 * However, for TCP, it's better to have two values which are
5381 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5382 * This allows us to have a TCP Data Payload which is a
5383 * multiple of 8 regardless of what combination of TCP Options
5384 * are in use (always a multiple of 4 bytes) which is
5385 * important for performance reasons. For instance, if no
5386 * options are in use, then we have a 20-byte IP header and a
5387 * 20-byte TCP header. In this case, a 1500-byte MSS would
5388 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5389 * which is not a multiple of 8. So using an MSS of 1488 in
5390 * this case results in a TCP Data Payload of 1448 bytes which
5391 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5392 * Stamps have been negotiated, then an MTU of 1500 bytes
5393 * results in a TCP Data Payload of 1448 bytes which, as
5394 * above, is a multiple of 8 bytes ...
5395 */
5396 for (i = 0; i < NMTUS; i++)
5397 if (adap->params.mtus[i] == 1492) {
5398 adap->params.mtus[i] = 1488;
5399 break;
5400 }
7ee9ff94 5401
92e7ae71
HS
5402 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5403 adap->params.b_wnd);
5404 }
df64e4d3 5405 t4_init_sge_params(adap);
80f61f19 5406 adap->flags |= CXGB4_FW_OK;
5ccf9d04 5407 t4_init_tp_params(adap, true);
b8ff05a9
DM
5408 return 0;
5409
5410 /*
636f9d37
VP
5411 * Something bad happened. If a command timed out or failed with EIO
5412 * FW does not operate within its spec or something catastrophic
5413 * happened to HW/FW, stop issuing commands.
b8ff05a9 5414 */
636f9d37 5415bye:
8b4e6b3c 5416 adap_free_hma_mem(adap);
4b8e27a8
HS
5417 kfree(adap->sge.egr_map);
5418 kfree(adap->sge.ingr_map);
5419 kfree(adap->sge.starving_fl);
5420 kfree(adap->sge.txq_maperr);
5b377d11
HS
5421#ifdef CONFIG_DEBUG_FS
5422 kfree(adap->sge.blocked_fl);
5423#endif
636f9d37
VP
5424 if (ret != -ETIMEDOUT && ret != -EIO)
5425 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
5426 return ret;
5427}
5428
204dc3c0
DM
5429/* EEH callbacks */
5430
5431static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5432 pci_channel_state_t state)
5433{
5434 int i;
5435 struct adapter *adap = pci_get_drvdata(pdev);
5436
5437 if (!adap)
5438 goto out;
5439
5440 rtnl_lock();
80f61f19 5441 adap->flags &= ~CXGB4_FW_OK;
204dc3c0 5442 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
9fe6cb58 5443 spin_lock(&adap->stats_lock);
204dc3c0
DM
5444 for_each_port(adap, i) {
5445 struct net_device *dev = adap->port[i];
025d0973
GP
5446 if (dev) {
5447 netif_device_detach(dev);
5448 netif_carrier_off(dev);
5449 }
204dc3c0 5450 }
9fe6cb58 5451 spin_unlock(&adap->stats_lock);
b37987e8 5452 disable_interrupts(adap);
80f61f19 5453 if (adap->flags & CXGB4_FULL_INIT_DONE)
204dc3c0
DM
5454 cxgb_down(adap);
5455 rtnl_unlock();
80f61f19 5456 if ((adap->flags & CXGB4_DEV_ENABLED)) {
144be3d9 5457 pci_disable_device(pdev);
80f61f19 5458 adap->flags &= ~CXGB4_DEV_ENABLED;
144be3d9 5459 }
204dc3c0
DM
5460out: return state == pci_channel_io_perm_failure ?
5461 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5462}
5463
5464static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5465{
5466 int i, ret;
5467 struct fw_caps_config_cmd c;
5468 struct adapter *adap = pci_get_drvdata(pdev);
5469
5470 if (!adap) {
5471 pci_restore_state(pdev);
5472 pci_save_state(pdev);
5473 return PCI_ERS_RESULT_RECOVERED;
5474 }
5475
80f61f19 5476 if (!(adap->flags & CXGB4_DEV_ENABLED)) {
144be3d9
GS
5477 if (pci_enable_device(pdev)) {
5478 dev_err(&pdev->dev, "Cannot reenable PCI "
5479 "device after reset\n");
5480 return PCI_ERS_RESULT_DISCONNECT;
5481 }
80f61f19 5482 adap->flags |= CXGB4_DEV_ENABLED;
204dc3c0
DM
5483 }
5484
5485 pci_set_master(pdev);
5486 pci_restore_state(pdev);
5487 pci_save_state(pdev);
204dc3c0 5488
8203b509 5489 if (t4_wait_dev_ready(adap->regs) < 0)
204dc3c0 5490 return PCI_ERS_RESULT_DISCONNECT;
b2612722 5491 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
204dc3c0 5492 return PCI_ERS_RESULT_DISCONNECT;
80f61f19 5493 adap->flags |= CXGB4_FW_OK;
204dc3c0
DM
5494 if (adap_init1(adap, &c))
5495 return PCI_ERS_RESULT_DISCONNECT;
5496
5497 for_each_port(adap, i) {
02d805dc
SR
5498 struct port_info *pi = adap2pinfo(adap, i);
5499 u8 vivld = 0, vin = 0;
204dc3c0 5500
02d805dc
SR
5501 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
5502 NULL, NULL, &vivld, &vin);
204dc3c0
DM
5503 if (ret < 0)
5504 return PCI_ERS_RESULT_DISCONNECT;
02d805dc
SR
5505 pi->viid = ret;
5506 pi->xact_addr_filt = -1;
5507 /* If fw supports returning the VIN as part of FW_VI_CMD,
5508 * save the returned values.
5509 */
5510 if (adap->params.viid_smt_extn_support) {
5511 pi->vivld = vivld;
5512 pi->vin = vin;
5513 } else {
5514 /* Retrieve the values from VIID */
5515 pi->vivld = FW_VIID_VIVLD_G(pi->viid);
5516 pi->vin = FW_VIID_VIN_G(pi->viid);
5517 }
204dc3c0
DM
5518 }
5519
5520 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5521 adap->params.b_wnd);
1ae970e0 5522 setup_memwin(adap);
204dc3c0
DM
5523 if (cxgb_up(adap))
5524 return PCI_ERS_RESULT_DISCONNECT;
5525 return PCI_ERS_RESULT_RECOVERED;
5526}
5527
5528static void eeh_resume(struct pci_dev *pdev)
5529{
5530 int i;
5531 struct adapter *adap = pci_get_drvdata(pdev);
5532
5533 if (!adap)
5534 return;
5535
5536 rtnl_lock();
5537 for_each_port(adap, i) {
5538 struct net_device *dev = adap->port[i];
025d0973
GP
5539 if (dev) {
5540 if (netif_running(dev)) {
5541 link_start(dev);
5542 cxgb_set_rxmode(dev);
5543 }
5544 netif_device_attach(dev);
204dc3c0 5545 }
204dc3c0
DM
5546 }
5547 rtnl_unlock();
5548}
5549
86e8f298
VK
5550static void eeh_reset_prepare(struct pci_dev *pdev)
5551{
5552 struct adapter *adapter = pci_get_drvdata(pdev);
5553 int i;
5554
5555 if (adapter->pf != 4)
5556 return;
5557
5558 adapter->flags &= ~CXGB4_FW_OK;
5559
5560 notify_ulds(adapter, CXGB4_STATE_DOWN);
5561
5562 for_each_port(adapter, i)
5563 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5564 cxgb_close(adapter->port[i]);
5565
5566 disable_interrupts(adapter);
5567 cxgb4_free_mps_ref_entries(adapter);
5568
5569 adap_free_hma_mem(adapter);
5570
5571 if (adapter->flags & CXGB4_FULL_INIT_DONE)
5572 cxgb_down(adapter);
5573}
5574
5575static void eeh_reset_done(struct pci_dev *pdev)
5576{
5577 struct adapter *adapter = pci_get_drvdata(pdev);
5578 int err, i;
5579
5580 if (adapter->pf != 4)
5581 return;
5582
5583 err = t4_wait_dev_ready(adapter->regs);
5584 if (err < 0) {
5585 dev_err(adapter->pdev_dev,
5586 "Device not ready, err %d", err);
5587 return;
5588 }
5589
5590 setup_memwin(adapter);
5591
5592 err = adap_init0(adapter, 1);
5593 if (err) {
5594 dev_err(adapter->pdev_dev,
5595 "Adapter init failed, err %d", err);
5596 return;
5597 }
5598
5599 setup_memwin_rdma(adapter);
5600
5601 if (adapter->flags & CXGB4_FW_OK) {
5602 err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
5603 if (err) {
5604 dev_err(adapter->pdev_dev,
5605 "Port init failed, err %d", err);
5606 return;
5607 }
5608 }
5609
5610 err = cfg_queues(adapter);
5611 if (err) {
5612 dev_err(adapter->pdev_dev,
5613 "Config queues failed, err %d", err);
5614 return;
5615 }
5616
5617 cxgb4_init_mps_ref_entries(adapter);
5618
5619 err = setup_fw_sge_queues(adapter);
5620 if (err) {
5621 dev_err(adapter->pdev_dev,
5622 "FW sge queue allocation failed, err %d", err);
5623 return;
5624 }
5625
5626 for_each_port(adapter, i)
5627 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5628 cxgb_open(adapter->port[i]);
5629}
5630
3646f0e5 5631static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
5632 .error_detected = eeh_err_detected,
5633 .slot_reset = eeh_slot_reset,
5634 .resume = eeh_resume,
86e8f298
VK
5635 .reset_prepare = eeh_reset_prepare,
5636 .reset_done = eeh_reset_done,
204dc3c0
DM
5637};
5638
9b86a8d1
HS
5639/* Return true if the Link Configuration supports "High Speeds" (those greater
5640 * than 1Gb/s).
5641 */
57d8b764 5642static inline bool is_x_10g_port(const struct link_config *lc)
b8ff05a9 5643{
9b86a8d1
HS
5644 unsigned int speeds, high_speeds;
5645
c3168cab
GG
5646 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
5647 high_speeds = speeds &
5648 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
9b86a8d1
HS
5649
5650 return high_speeds != 0;
b8ff05a9
DM
5651}
5652
76c3a552 5653/* Perform default configuration of DMA queues depending on the number and type
b8ff05a9
DM
5654 * of ports we found and the number of available CPUs. Most settings can be
5655 * modified by the admin prior to actual use.
5656 */
0eaec62a 5657static int cfg_queues(struct adapter *adap)
b8ff05a9 5658{
76c3a552 5659 u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
116ca924 5660 u32 ncpus = num_online_cpus();
76c3a552 5661 u32 niqflint, neq, num_ulds;
b8ff05a9 5662 struct sge *s = &adap->sge;
00e31cfc 5663 u32 i, n10g = 0, qidx = 0;
116ca924 5664 u32 q10g = 0, q1g;
b8ff05a9 5665
76c3a552 5666 /* Reduce memory usage in kdump environment, disable all offload. */
85eacf3f 5667 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
0fbc81b3 5668 adap->params.offload = 0;
94cdb8bb 5669 adap->params.crypto = 0;
76c3a552 5670 adap->params.ethofld = 0;
94cdb8bb
HS
5671 }
5672
0eaec62a
CL
5673 /* Calculate the number of Ethernet Queue Sets available based on
5674 * resources provisioned for us. We always have an Asynchronous
5675 * Firmware Event Ingress Queue. If we're operating in MSI or Legacy
5676 * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
5677 * Ingress Queue. Meanwhile, we need two Egress Queues for each
5678 * Queue Set: one for the Free List and one for the Ethernet TX Queue.
5679 *
5680 * Note that we should also take into account all of the various
5681 * Offload Queues. But, in any situation where we're operating in
5682 * a Resource Constrained Provisioning environment, doing any Offload
5683 * at all is problematic ...
5684 */
5685 niqflint = adap->params.pfres.niqflint - 1;
80f61f19 5686 if (!(adap->flags & CXGB4_USING_MSIX))
0eaec62a
CL
5687 niqflint--;
5688 neq = adap->params.pfres.neq / 2;
76c3a552 5689 avail_qsets = min(niqflint, neq);
0eaec62a 5690
76c3a552 5691 if (avail_qsets < adap->params.nports) {
0eaec62a 5692 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
76c3a552 5693 avail_qsets, adap->params.nports);
0eaec62a
CL
5694 return -ENOMEM;
5695 }
5696
5697 /* Count the number of 10Gb/s or better ports */
5698 for_each_port(adap, i)
5699 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5700
76c3a552 5701 avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
116ca924
VK
5702
5703 /* We default to 1 queue per non-10G port and up to # of cores queues
5704 * per 10G port.
5705 */
5706 if (n10g)
5707 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
5708
688848b1
AB
5709#ifdef CONFIG_CHELSIO_T4_DCB
5710 /* For Data Center Bridging support we need to be able to support up
5711 * to 8 Traffic Priorities; each of which will be assigned to its
5712 * own TX Queue in order to prevent Head-Of-Line Blocking.
5713 */
116ca924 5714 q1g = 8;
0eaec62a
CL
5715 if (adap->params.nports * 8 > avail_eth_qsets) {
5716 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
5717 avail_eth_qsets, adap->params.nports * 8);
5718 return -ENOMEM;
688848b1 5719 }
b8ff05a9 5720
116ca924
VK
5721 if (adap->params.nports * ncpus < avail_eth_qsets)
5722 q10g = max(8U, ncpus);
5723 else
5724 q10g = max(8U, q10g);
688848b1 5725
00e31cfc
RL
5726 while ((q10g * n10g) >
5727 (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
116ca924 5728 q10g--;
b8ff05a9 5729
116ca924
VK
5730#else /* !CONFIG_CHELSIO_T4_DCB */
5731 q1g = 1;
5732 q10g = min(q10g, ncpus);
5733#endif /* !CONFIG_CHELSIO_T4_DCB */
5734 if (is_kdump_kernel()) {
85eacf3f 5735 q10g = 1;
116ca924
VK
5736 q1g = 1;
5737 }
85eacf3f 5738
b8ff05a9
DM
5739 for_each_port(adap, i) {
5740 struct port_info *pi = adap2pinfo(adap, i);
5741
5742 pi->first_qset = qidx;
116ca924 5743 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
b8ff05a9
DM
5744 qidx += pi->nqsets;
5745 }
5746
5747 s->ethqsets = qidx;
5748 s->max_ethqsets = qidx; /* MSI-X may lower it later */
76c3a552 5749 avail_qsets -= qidx;
b8ff05a9 5750
0fbc81b3 5751 if (is_uld(adap)) {
76c3a552 5752 /* For offload we use 1 queue/channel if all ports are up to 1G,
b8ff05a9
DM
5753 * otherwise we divide all available queues amongst the channels
5754 * capped by the number of available cores.
5755 */
76c3a552 5756 num_ulds = adap->num_uld + adap->num_ofld_uld;
116ca924 5757 i = min_t(u32, MAX_OFLD_QSETS, ncpus);
76c3a552
RL
5758 avail_uld_qsets = roundup(i, adap->params.nports);
5759 if (avail_qsets < num_ulds * adap->params.nports) {
5760 adap->params.offload = 0;
5761 adap->params.crypto = 0;
5762 s->ofldqsets = 0;
5763 } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
0fbc81b3 5764 s->ofldqsets = adap->params.nports;
76c3a552
RL
5765 } else {
5766 s->ofldqsets = avail_uld_qsets;
0fbc81b3 5767 }
76c3a552
RL
5768
5769 avail_qsets -= num_ulds * s->ofldqsets;
b8ff05a9
DM
5770 }
5771
2d0cb84d
RL
5772 /* ETHOFLD Queues used for QoS offload should follow same
5773 * allocation scheme as normal Ethernet Queues.
5774 */
5775 if (is_ethofld(adap)) {
5776 if (avail_qsets < s->max_ethqsets) {
5777 adap->params.ethofld = 0;
5778 s->eoqsets = 0;
5779 } else {
5780 s->eoqsets = s->max_ethqsets;
5781 }
5782 avail_qsets -= s->eoqsets;
5783 }
5784
fd2261d8
RL
5785 /* Mirror queues must follow same scheme as normal Ethernet
5786 * Queues, when there are enough queues available. Otherwise,
5787 * allocate at least 1 queue per port. If even 1 queue is not
5788 * available, then disable mirror queues support.
5789 */
5790 if (avail_qsets >= s->max_ethqsets)
5791 s->mirrorqsets = s->max_ethqsets;
5792 else if (avail_qsets >= adap->params.nports)
5793 s->mirrorqsets = adap->params.nports;
5794 else
5795 s->mirrorqsets = 0;
5796 avail_qsets -= s->mirrorqsets;
5797
b8ff05a9
DM
5798 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5799 struct sge_eth_rxq *r = &s->ethrxq[i];
5800
c887ad0e 5801 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
b8ff05a9
DM
5802 r->fl.size = 72;
5803 }
5804
5805 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5806 s->ethtxq[i].q.size = 1024;
5807
5808 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5809 s->ctrlq[i].q.size = 512;
5810
a4569504
AG
5811 if (!is_t4(adap->params.chip))
5812 s->ptptxq.q.size = 8;
5813
c887ad0e 5814 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
0fbc81b3 5815 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
0eaec62a
CL
5816
5817 return 0;
b8ff05a9
DM
5818}
5819
5820/*
5821 * Reduce the number of Ethernet queues across all ports to at most n.
5822 * n provides at least one queue per port.
5823 */
91744948 5824static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
5825{
5826 int i;
5827 struct port_info *pi;
5828
5829 while (n < adap->sge.ethqsets)
5830 for_each_port(adap, i) {
5831 pi = adap2pinfo(adap, i);
5832 if (pi->nqsets > 1) {
5833 pi->nqsets--;
5834 adap->sge.ethqsets--;
5835 if (adap->sge.ethqsets <= n)
5836 break;
5837 }
5838 }
5839
5840 n = 0;
5841 for_each_port(adap, i) {
5842 pi = adap2pinfo(adap, i);
5843 pi->first_qset = n;
5844 n += pi->nqsets;
5845 }
5846}
5847
76c3a552 5848static int alloc_msix_info(struct adapter *adap, u32 num_vec)
94cdb8bb 5849{
76c3a552 5850 struct msix_info *msix_info;
94cdb8bb 5851
76c3a552 5852 msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
94cdb8bb
HS
5853 if (!msix_info)
5854 return -ENOMEM;
5855
76c3a552
RL
5856 adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
5857 sizeof(long), GFP_KERNEL);
5858 if (!adap->msix_bmap.msix_bmap) {
94cdb8bb
HS
5859 kfree(msix_info);
5860 return -ENOMEM;
5861 }
76c3a552
RL
5862
5863 spin_lock_init(&adap->msix_bmap.lock);
5864 adap->msix_bmap.mapsize = num_vec;
5865
5866 adap->msix_info = msix_info;
94cdb8bb
HS
5867 return 0;
5868}
5869
5870static void free_msix_info(struct adapter *adap)
5871{
76c3a552
RL
5872 kfree(adap->msix_bmap.msix_bmap);
5873 kfree(adap->msix_info);
5874}
5875
5876int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
5877{
5878 struct msix_bmap *bmap = &adap->msix_bmap;
5879 unsigned int msix_idx;
5880 unsigned long flags;
94cdb8bb 5881
76c3a552
RL
5882 spin_lock_irqsave(&bmap->lock, flags);
5883 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
5884 if (msix_idx < bmap->mapsize) {
5885 __set_bit(msix_idx, bmap->msix_bmap);
5886 } else {
5887 spin_unlock_irqrestore(&bmap->lock, flags);
5888 return -ENOSPC;
5889 }
5890
5891 spin_unlock_irqrestore(&bmap->lock, flags);
5892 return msix_idx;
5893}
5894
5895void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
5896 unsigned int msix_idx)
5897{
5898 struct msix_bmap *bmap = &adap->msix_bmap;
5899 unsigned long flags;
5900
5901 spin_lock_irqsave(&bmap->lock, flags);
5902 __clear_bit(msix_idx, bmap->msix_bmap);
5903 spin_unlock_irqrestore(&bmap->lock, flags);
94cdb8bb
HS
5904}
5905
b8ff05a9
DM
5906/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5907#define EXTRA_VECS 2
5908
91744948 5909static int enable_msix(struct adapter *adap)
b8ff05a9 5910{
fd2261d8
RL
5911 u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0;
5912 u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0;
76c3a552 5913 u8 num_uld = 0, nchan = adap->params.nports;
76c3a552 5914 u32 i, want, need, num_vec;
b8ff05a9 5915 struct sge *s = &adap->sge;
f36e58e5 5916 struct msix_entry *entries;
76c3a552
RL
5917 struct port_info *pi;
5918 int allocated, ret;
b8ff05a9 5919
76c3a552 5920 want = s->max_ethqsets;
688848b1
AB
5921#ifdef CONFIG_CHELSIO_T4_DCB
5922 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5923 * each port.
5924 */
76c3a552 5925 need = 8 * nchan;
688848b1 5926#else
76c3a552 5927 need = nchan;
688848b1 5928#endif
76c3a552
RL
5929 eth_need = need;
5930 if (is_uld(adap)) {
5931 num_uld = adap->num_ofld_uld + adap->num_uld;
5932 want += num_uld * s->ofldqsets;
5933 uld_need = num_uld * nchan;
5934 need += uld_need;
5935 }
5936
2d0cb84d
RL
5937 if (is_ethofld(adap)) {
5938 want += s->eoqsets;
5939 ethofld_need = eth_need;
5940 need += ethofld_need;
5941 }
5942
fd2261d8
RL
5943 if (s->mirrorqsets) {
5944 want += s->mirrorqsets;
5945 mirror_need = nchan;
5946 need += mirror_need;
5947 }
5948
76c3a552
RL
5949 want += EXTRA_VECS;
5950 need += EXTRA_VECS;
5951
5952 entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
5953 if (!entries)
5954 return -ENOMEM;
5955
5956 for (i = 0; i < want; i++)
5957 entries[i].entry = i;
5958
f36e58e5
HS
5959 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5960 if (allocated < 0) {
76c3a552
RL
5961 /* Disable offload and attempt to get vectors for NIC
5962 * only mode.
5963 */
5964 want = s->max_ethqsets + EXTRA_VECS;
5965 need = eth_need + EXTRA_VECS;
5966 allocated = pci_enable_msix_range(adap->pdev, entries,
5967 need, want);
5968 if (allocated < 0) {
5969 dev_info(adap->pdev_dev,
5970 "Disabling MSI-X due to insufficient MSI-X vectors\n");
5971 ret = allocated;
5972 goto out_free;
5973 }
5974
5975 dev_info(adap->pdev_dev,
5976 "Disabling offload due to insufficient MSI-X vectors\n");
5977 adap->params.offload = 0;
5978 adap->params.crypto = 0;
5979 adap->params.ethofld = 0;
5980 s->ofldqsets = 0;
2d0cb84d 5981 s->eoqsets = 0;
fd2261d8 5982 s->mirrorqsets = 0;
76c3a552 5983 uld_need = 0;
2d0cb84d 5984 ethofld_need = 0;
fd2261d8 5985 mirror_need = 0;
f36e58e5 5986 }
b8ff05a9 5987
76c3a552
RL
5988 num_vec = allocated;
5989 if (num_vec < want) {
5990 /* Distribute available vectors to the various queue groups.
5991 * Every group gets its minimum requirement and NIC gets top
5992 * priority for leftovers.
5993 */
5994 ethqsets = eth_need;
5995 if (is_uld(adap))
5996 ofldqsets = nchan;
2d0cb84d
RL
5997 if (is_ethofld(adap))
5998 eoqsets = ethofld_need;
fd2261d8
RL
5999 if (s->mirrorqsets)
6000 mirrorqsets = mirror_need;
76c3a552
RL
6001
6002 num_vec -= need;
6003 while (num_vec) {
2d0cb84d 6004 if (num_vec < eth_need + ethofld_need ||
76c3a552
RL
6005 ethqsets > s->max_ethqsets)
6006 break;
6007
6008 for_each_port(adap, i) {
6009 pi = adap2pinfo(adap, i);
6010 if (pi->nqsets < 2)
6011 continue;
6012
6013 ethqsets++;
6014 num_vec--;
2d0cb84d
RL
6015 if (ethofld_need) {
6016 eoqsets++;
6017 num_vec--;
6018 }
76c3a552
RL
6019 }
6020 }
6021
6022 if (is_uld(adap)) {
6023 while (num_vec) {
6024 if (num_vec < uld_need ||
6025 ofldqsets > s->ofldqsets)
6026 break;
6027
6028 ofldqsets++;
6029 num_vec -= uld_need;
6030 }
6031 }
fd2261d8
RL
6032
6033 if (s->mirrorqsets) {
6034 while (num_vec) {
6035 if (num_vec < mirror_need ||
6036 mirrorqsets > s->mirrorqsets)
6037 break;
6038
6039 mirrorqsets++;
6040 num_vec -= mirror_need;
6041 }
6042 }
76c3a552
RL
6043 } else {
6044 ethqsets = s->max_ethqsets;
6045 if (is_uld(adap))
6046 ofldqsets = s->ofldqsets;
2d0cb84d
RL
6047 if (is_ethofld(adap))
6048 eoqsets = s->eoqsets;
fd2261d8
RL
6049 if (s->mirrorqsets)
6050 mirrorqsets = s->mirrorqsets;
76c3a552
RL
6051 }
6052
6053 if (ethqsets < s->max_ethqsets) {
6054 s->max_ethqsets = ethqsets;
6055 reduce_ethqs(adap, ethqsets);
c32ad224 6056 }
76c3a552 6057
0fbc81b3 6058 if (is_uld(adap)) {
76c3a552
RL
6059 s->ofldqsets = ofldqsets;
6060 s->nqs_per_uld = s->ofldqsets;
94cdb8bb
HS
6061 }
6062
2d0cb84d
RL
6063 if (is_ethofld(adap))
6064 s->eoqsets = eoqsets;
6065
fd2261d8
RL
6066 if (s->mirrorqsets) {
6067 s->mirrorqsets = mirrorqsets;
6068 for_each_port(adap, i) {
6069 pi = adap2pinfo(adap, i);
6070 pi->nmirrorqsets = s->mirrorqsets / nchan;
6071 mutex_init(&pi->vi_mirror_mutex);
6072 }
6073 }
6074
76c3a552
RL
6075 /* map for msix */
6076 ret = alloc_msix_info(adap, allocated);
6077 if (ret)
6078 goto out_disable_msix;
6079
6080 for (i = 0; i < allocated; i++) {
c32ad224 6081 adap->msix_info[i].vec = entries[i].vector;
76c3a552 6082 adap->msix_info[i].idx = i;
94cdb8bb 6083 }
76c3a552
RL
6084
6085 dev_info(adap->pdev_dev,
fd2261d8
RL
6086 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n",
6087 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld,
6088 s->mirrorqsets);
c32ad224 6089
f36e58e5 6090 kfree(entries);
c32ad224 6091 return 0;
76c3a552
RL
6092
6093out_disable_msix:
6094 pci_disable_msix(adap->pdev);
6095
6096out_free:
6097 kfree(entries);
6098 return ret;
b8ff05a9
DM
6099}
6100
6101#undef EXTRA_VECS
6102
91744948 6103static int init_rss(struct adapter *adap)
671b0060 6104{
c035e183
HS
6105 unsigned int i;
6106 int err;
6107
6108 err = t4_init_rss_mode(adap, adap->mbox);
6109 if (err)
6110 return err;
671b0060
DM
6111
6112 for_each_port(adap, i) {
6113 struct port_info *pi = adap2pinfo(adap, i);
6114
6115 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6116 if (!pi->rss)
6117 return -ENOMEM;
671b0060
DM
6118 }
6119 return 0;
6120}
6121
0de72738
HS
6122/* Dump basic information about the adapter */
6123static void print_adapter_info(struct adapter *adapter)
6124{
760446f9
GG
6125 /* Hardware/Firmware/etc. Version/Revision IDs */
6126 t4_dump_version_info(adapter);
0de72738
HS
6127
6128 /* Software/Hardware configuration */
6129 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
6130 is_offload(adapter) ? "R" : "",
80f61f19
AV
6131 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
6132 (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
0de72738
HS
6133 is_offload(adapter) ? "Offload" : "non-Offload");
6134}
6135
91744948 6136static void print_port_info(const struct net_device *dev)
b8ff05a9 6137{
b8ff05a9 6138 char buf[80];
118969ed 6139 char *bufp = buf;
118969ed
DM
6140 const struct port_info *pi = netdev_priv(dev);
6141 const struct adapter *adap = pi->adapter;
f1a051b9 6142
c3168cab 6143 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
5e78f7fd 6144 bufp += sprintf(bufp, "100M/");
c3168cab 6145 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
5e78f7fd 6146 bufp += sprintf(bufp, "1G/");
c3168cab 6147 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
118969ed 6148 bufp += sprintf(bufp, "10G/");
c3168cab 6149 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
9b86a8d1 6150 bufp += sprintf(bufp, "25G/");
c3168cab 6151 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
72aca4bf 6152 bufp += sprintf(bufp, "40G/");
c3168cab
GG
6153 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
6154 bufp += sprintf(bufp, "50G/");
6155 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
9b86a8d1 6156 bufp += sprintf(bufp, "100G/");
c3168cab
GG
6157 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
6158 bufp += sprintf(bufp, "200G/");
6159 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
6160 bufp += sprintf(bufp, "400G/");
118969ed
DM
6161 if (bufp != buf)
6162 --bufp;
72aca4bf 6163 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
118969ed 6164
0de72738
HS
6165 netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
6166 dev->name, adap->params.vpd.id, adap->name, buf);
b8ff05a9
DM
6167}
6168
06546391
DM
6169/*
6170 * Free the following resources:
6171 * - memory used for tables
6172 * - MSI/MSI-X
6173 * - net devices
6174 * - resources FW is holding for us
6175 */
6176static void free_some_resources(struct adapter *adapter)
6177{
6178 unsigned int i;
6179
3bdb376e 6180 kvfree(adapter->smt);
752ade68 6181 kvfree(adapter->l2t);
c68644ef 6182 kvfree(adapter->srq);
b72a32da 6183 t4_cleanup_sched(adapter);
752ade68 6184 kvfree(adapter->tids.tid_tab);
4ec4762d 6185 cxgb4_cleanup_tc_matchall(adapter);
b1396c2b 6186 cxgb4_cleanup_tc_mqprio(adapter);
e0f911c8 6187 cxgb4_cleanup_tc_flower(adapter);
d8931847 6188 cxgb4_cleanup_tc_u32(adapter);
d915c299 6189 cxgb4_cleanup_ethtool_filters(adapter);
4b8e27a8
HS
6190 kfree(adapter->sge.egr_map);
6191 kfree(adapter->sge.ingr_map);
6192 kfree(adapter->sge.starving_fl);
6193 kfree(adapter->sge.txq_maperr);
5b377d11
HS
6194#ifdef CONFIG_DEBUG_FS
6195 kfree(adapter->sge.blocked_fl);
6196#endif
06546391
DM
6197 disable_msi(adapter);
6198
6199 for_each_port(adapter, i)
671b0060 6200 if (adapter->port[i]) {
4f3a0fcf
HS
6201 struct port_info *pi = adap2pinfo(adapter, i);
6202
6203 if (pi->viid != 0)
6204 t4_free_vi(adapter, adapter->mbox, adapter->pf,
6205 0, pi->viid);
671b0060 6206 kfree(adap2pinfo(adapter, i)->rss);
06546391 6207 free_netdev(adapter->port[i]);
671b0060 6208 }
80f61f19 6209 if (adapter->flags & CXGB4_FW_OK)
b2612722 6210 t4_fw_bye(adapter, adapter->pf);
06546391
DM
6211}
6212
1a2a14fb
RL
6213#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
6214 NETIF_F_GSO_UDP_L4)
35d35682 6215#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
012475e3 6216 NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
22adfe0a 6217#define SEGMENT_SIZE 128
b8ff05a9 6218
e8d45292 6219static int t4_get_chip_type(struct adapter *adap, int ver)
d86bd29e 6220{
e8d45292 6221 u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
46cdc9be 6222
e8d45292 6223 switch (ver) {
d86bd29e 6224 case CHELSIO_T4:
46cdc9be 6225 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
d86bd29e 6226 case CHELSIO_T5:
46cdc9be 6227 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
d86bd29e 6228 case CHELSIO_T6:
46cdc9be 6229 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
d86bd29e 6230 default:
e8d45292 6231 break;
d86bd29e 6232 }
46cdc9be 6233 return -EINVAL;
d86bd29e
HS
6234}
6235
b6244201 6236#ifdef CONFIG_PCI_IOV
baf50868 6237static void cxgb4_mgmt_setup(struct net_device *dev)
e7b48a32
HS
6238{
6239 dev->type = ARPHRD_NONE;
6240 dev->mtu = 0;
6241 dev->hard_header_len = 0;
6242 dev->addr_len = 0;
6243 dev->tx_queue_len = 0;
6244 dev->flags |= IFF_NOARP;
6245 dev->priv_flags |= IFF_NO_QUEUE;
6246
6247 /* Initialize the device structure. */
6248 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
6249 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
e7b48a32
HS
6250}
6251
b6244201
HS
6252static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
6253{
7829451c 6254 struct adapter *adap = pci_get_drvdata(pdev);
b6244201
HS
6255 int err = 0;
6256 int current_vfs = pci_num_vf(pdev);
6257 u32 pcie_fw;
b6244201 6258
7829451c 6259 pcie_fw = readl(adap->regs + PCIE_FW_A);
7cfac881
AV
6260 /* Check if fw is initialized */
6261 if (!(pcie_fw & PCIE_FW_INIT_F)) {
6262 dev_warn(&pdev->dev, "Device not initialized\n");
b6244201
HS
6263 return -EOPNOTSUPP;
6264 }
6265
6266 /* If any of the VF's is already assigned to Guest OS, then
6267 * SRIOV for the same cannot be modified
6268 */
6269 if (current_vfs && pci_vfs_assigned(pdev)) {
6270 dev_err(&pdev->dev,
6271 "Cannot modify SR-IOV while VFs are assigned\n");
baf50868 6272 return current_vfs;
b6244201 6273 }
baf50868
GG
6274 /* Note that the upper-level code ensures that we're never called with
6275 * a non-zero "num_vfs" when we already have VFs instantiated. But
6276 * it never hurts to code defensively.
b6244201 6277 */
baf50868
GG
6278 if (num_vfs != 0 && current_vfs != 0)
6279 return -EBUSY;
6280
6281 /* Nothing to do for no change. */
6282 if (num_vfs == current_vfs)
6283 return num_vfs;
6284
6285 /* Disable SRIOV when zero is passed. */
b6244201
HS
6286 if (!num_vfs) {
6287 pci_disable_sriov(pdev);
baf50868
GG
6288 /* free VF Management Interface */
6289 unregister_netdev(adap->port[0]);
6290 free_netdev(adap->port[0]);
6291 adap->port[0] = NULL;
6292
661dbeb9 6293 /* free VF resources */
baf50868 6294 adap->num_vfs = 0;
661dbeb9
HS
6295 kfree(adap->vfinfo);
6296 adap->vfinfo = NULL;
baf50868 6297 return 0;
b6244201
HS
6298 }
6299
baf50868
GG
6300 if (!current_vfs) {
6301 struct fw_pfvf_cmd port_cmd, port_rpl;
6302 struct net_device *netdev;
6303 unsigned int pmask, port;
6304 struct pci_dev *pbridge;
6305 struct port_info *pi;
6306 char name[IFNAMSIZ];
6307 u32 devcap2;
6308 u16 flags;
baf50868
GG
6309
6310 /* If we want to instantiate Virtual Functions, then our
6311 * parent bridge's PCI-E needs to support Alternative Routing
6312 * ID (ARI) because our VFs will show up at function offset 8
6313 * and above.
6314 */
6315 pbridge = pdev->bus->self;
6133b920
FL
6316 pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
6317 pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
baf50868
GG
6318
6319 if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
6320 !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
6321 /* Our parent bridge does not support ARI so issue a
6322 * warning and skip instantiating the VFs. They
6323 * won't be reachable.
6324 */
6325 dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
6326 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
6327 PCI_FUNC(pbridge->devfn));
6328 return -ENOTSUPP;
6329 }
6330 memset(&port_cmd, 0, sizeof(port_cmd));
6331 port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
6332 FW_CMD_REQUEST_F |
6333 FW_CMD_READ_F |
6334 FW_PFVF_CMD_PFN_V(adap->pf) |
6335 FW_PFVF_CMD_VFN_V(0));
6336 port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
6337 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
6338 &port_rpl);
b6244201
HS
6339 if (err)
6340 return err;
baf50868
GG
6341 pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
6342 port = ffs(pmask) - 1;
6343 /* Allocate VF Management Interface. */
6344 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
6345 adap->pf);
6346 netdev = alloc_netdev(sizeof(struct port_info),
6347 name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
6348 if (!netdev)
6349 return -ENOMEM;
7829451c 6350
baf50868
GG
6351 pi = netdev_priv(netdev);
6352 pi->adapter = adap;
6353 pi->lport = port;
6354 pi->tx_chan = port;
6355 SET_NETDEV_DEV(netdev, &pdev->dev);
6356
6357 adap->port[0] = netdev;
6358 pi->port_id = 0;
6359
6360 err = register_netdev(adap->port[0]);
6361 if (err) {
6362 pr_info("Unable to register VF mgmt netdev %s\n", name);
6363 free_netdev(adap->port[0]);
6364 adap->port[0] = NULL;
e7b48a32 6365 return err;
baf50868
GG
6366 }
6367 /* Allocate and set up VF Information. */
6368 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
6369 sizeof(struct vf_info), GFP_KERNEL);
6370 if (!adap->vfinfo) {
6371 unregister_netdev(adap->port[0]);
6372 free_netdev(adap->port[0]);
6373 adap->port[0] = NULL;
6374 return -ENOMEM;
6375 }
6376 cxgb4_mgmt_fill_vf_station_mac_addr(adap);
6377 }
6378 /* Instantiate the requested number of VFs. */
6379 err = pci_enable_sriov(pdev, num_vfs);
6380 if (err) {
6381 pr_info("Unable to instantiate %d VFs\n", num_vfs);
6382 if (!current_vfs) {
6383 unregister_netdev(adap->port[0]);
6384 free_netdev(adap->port[0]);
6385 adap->port[0] = NULL;
6386 kfree(adap->vfinfo);
6387 adap->vfinfo = NULL;
6388 }
6389 return err;
b6244201 6390 }
661dbeb9 6391
baf50868 6392 adap->num_vfs = num_vfs;
b6244201
HS
6393 return num_vfs;
6394}
baf50868 6395#endif /* CONFIG_PCI_IOV */
b6244201 6396
a8c16e8e 6397#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
76f919eb
AS
6398
6399static int chcr_offload_state(struct adapter *adap,
6400 enum cxgb4_netdev_tls_ops op_val)
6401{
6402 switch (op_val) {
a8c16e8e 6403#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
76f919eb 6404 case CXGB4_TLSDEV_OPS:
a8c16e8e
RM
6405 if (!adap->uld[CXGB4_ULD_KTLS].handle) {
6406 dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n");
76f919eb
AS
6407 return -EOPNOTSUPP;
6408 }
a8c16e8e 6409 if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) {
76f919eb 6410 dev_dbg(adap->pdev_dev,
a8c16e8e 6411 "ch_ktls driver has no registered tlsdev_ops\n");
76f919eb
AS
6412 return -EOPNOTSUPP;
6413 }
6414 break;
6415#endif /* CONFIG_CHELSIO_TLS_DEVICE */
6416#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6417 case CXGB4_XFRMDEV_OPS:
6418 if (!adap->uld[CXGB4_ULD_IPSEC].handle) {
6419 dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n");
6420 return -EOPNOTSUPP;
6421 }
6422 if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) {
6423 dev_dbg(adap->pdev_dev,
6424 "chipsec driver has no registered xfrmdev_ops\n");
6425 return -EOPNOTSUPP;
6426 }
6427 break;
6428#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
6429 default:
6430 dev_dbg(adap->pdev_dev,
6431 "driver has no support for offload %d\n", op_val);
6432 return -EOPNOTSUPP;
6433 }
6434
6435 return 0;
6436}
6437
6438#endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */
6439
a8c16e8e 6440#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
a3ac249a
RM
6441
6442static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
6443 enum tls_offload_ctx_dir direction,
6444 struct tls_crypto_info *crypto_info,
6445 u32 tcp_sn)
6446{
6447 struct adapter *adap = netdev2adap(netdev);
76f919eb 6448 int ret;
a3ac249a
RM
6449
6450 mutex_lock(&uld_mutex);
76f919eb
AS
6451 ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS);
6452 if (ret)
a3ac249a 6453 goto out_unlock;
a3ac249a
RM
6454
6455 ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
6456 if (ret)
6457 goto out_unlock;
6458
a8c16e8e
RM
6459 ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk,
6460 direction,
6461 crypto_info,
6462 tcp_sn);
a3ac249a
RM
6463 /* if there is a failure, clear the refcount */
6464 if (ret)
6465 cxgb4_set_ktls_feature(adap,
6466 FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6467out_unlock:
6468 mutex_unlock(&uld_mutex);
6469 return ret;
6470}
6471
6472static void cxgb4_ktls_dev_del(struct net_device *netdev,
6473 struct tls_context *tls_ctx,
6474 enum tls_offload_ctx_dir direction)
6475{
6476 struct adapter *adap = netdev2adap(netdev);
6477
6478 mutex_lock(&uld_mutex);
76f919eb 6479 if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS))
a3ac249a 6480 goto out_unlock;
76f919eb 6481
a8c16e8e
RM
6482 adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
6483 direction);
76f919eb
AS
6484
6485out_unlock:
65e302a9 6486 cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
76f919eb
AS
6487 mutex_unlock(&uld_mutex);
6488}
6489
a8c16e8e
RM
6490static const struct tlsdev_ops cxgb4_ktls_ops = {
6491 .tls_dev_add = cxgb4_ktls_dev_add,
6492 .tls_dev_del = cxgb4_ktls_dev_del,
6493};
6494#endif /* CONFIG_CHELSIO_TLS_DEVICE */
6495
76f919eb
AS
6496#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6497
6498static int cxgb4_xfrm_add_state(struct xfrm_state *x)
6499{
6500 struct adapter *adap = netdev2adap(x->xso.dev);
6501 int ret;
6502
6503 if (!mutex_trylock(&uld_mutex)) {
6504 dev_dbg(adap->pdev_dev,
6505 "crypto uld critical resource is under use\n");
6506 return -EBUSY;
a3ac249a 6507 }
76f919eb
AS
6508 ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
6509 if (ret)
6510 goto out_unlock;
a3ac249a 6511
76f919eb
AS
6512 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
6513
6514out_unlock:
6515 mutex_unlock(&uld_mutex);
6516
6517 return ret;
6518}
6519
6520static void cxgb4_xfrm_del_state(struct xfrm_state *x)
6521{
6522 struct adapter *adap = netdev2adap(x->xso.dev);
6523
6524 if (!mutex_trylock(&uld_mutex)) {
6525 dev_dbg(adap->pdev_dev,
6526 "crypto uld critical resource is under use\n");
6527 return;
6528 }
6529 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
a3ac249a 6530 goto out_unlock;
76f919eb
AS
6531
6532 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
6533
6534out_unlock:
6535 mutex_unlock(&uld_mutex);
6536}
6537
6538static void cxgb4_xfrm_free_state(struct xfrm_state *x)
6539{
6540 struct adapter *adap = netdev2adap(x->xso.dev);
6541
6542 if (!mutex_trylock(&uld_mutex)) {
6543 dev_dbg(adap->pdev_dev,
6544 "crypto uld critical resource is under use\n");
6545 return;
a3ac249a 6546 }
76f919eb
AS
6547 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6548 goto out_unlock;
a3ac249a 6549
76f919eb
AS
6550 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
6551
6552out_unlock:
6553 mutex_unlock(&uld_mutex);
6554}
6555
6556static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
6557{
6558 struct adapter *adap = netdev2adap(x->xso.dev);
6559 bool ret = false;
6560
6561 if (!mutex_trylock(&uld_mutex)) {
6562 dev_dbg(adap->pdev_dev,
6563 "crypto uld critical resource is under use\n");
6564 return ret;
6565 }
6566 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6567 goto out_unlock;
6568
6569 ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
a3ac249a
RM
6570
6571out_unlock:
6572 mutex_unlock(&uld_mutex);
76f919eb 6573 return ret;
a3ac249a
RM
6574}
6575
76f919eb
AS
6576static void cxgb4_advance_esn_state(struct xfrm_state *x)
6577{
6578 struct adapter *adap = netdev2adap(x->xso.dev);
6579
6580 if (!mutex_trylock(&uld_mutex)) {
6581 dev_dbg(adap->pdev_dev,
6582 "crypto uld critical resource is under use\n");
6583 return;
6584 }
6585 if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6586 goto out_unlock;
6587
6588 adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x);
6589
6590out_unlock:
6591 mutex_unlock(&uld_mutex);
6592}
6593
6594static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
6595 .xdo_dev_state_add = cxgb4_xfrm_add_state,
6596 .xdo_dev_state_delete = cxgb4_xfrm_del_state,
6597 .xdo_dev_state_free = cxgb4_xfrm_free_state,
6598 .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
6599 .xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
6600};
6601
6602#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
6603
1dd06ae8 6604static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9 6605{
e8d45292
GG
6606 struct net_device *netdev;
6607 struct adapter *adapter;
6608 static int adap_idx = 1;
6609 int s_qpp, qpp, num_seg;
b8ff05a9 6610 struct port_info *pi;
c8f44aff 6611 bool highdma = false;
d86bd29e 6612 enum chip_type chip;
e8d45292
GG
6613 void __iomem *regs;
6614 int func, chip_ver;
6615 u16 device_id;
6616 int i, err;
6617 u32 whoami;
b8ff05a9 6618
b8ff05a9
DM
6619 err = pci_request_regions(pdev, KBUILD_MODNAME);
6620 if (err) {
6621 /* Just info, some other driver may have claimed the device. */
6622 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6623 return err;
6624 }
6625
b8ff05a9
DM
6626 err = pci_enable_device(pdev);
6627 if (err) {
6628 dev_err(&pdev->dev, "cannot enable PCI device\n");
6629 goto out_release_regions;
6630 }
6631
d6ce2628
HS
6632 regs = pci_ioremap_bar(pdev, 0);
6633 if (!regs) {
6634 dev_err(&pdev->dev, "cannot map device registers\n");
6635 err = -ENOMEM;
6636 goto out_disable_device;
6637 }
6638
baf50868
GG
6639 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6640 if (!adapter) {
6641 err = -ENOMEM;
6642 goto out_unmap_bar0;
6643 }
6644
6645 adapter->regs = regs;
8203b509
HS
6646 err = t4_wait_dev_ready(regs);
6647 if (err < 0)
e729452e 6648 goto out_free_adapter;
8203b509 6649
d6ce2628 6650 /* We control everything through one PF */
e8d45292
GG
6651 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
6652 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
6653 chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
28618314 6654 if ((int)chip < 0) {
e8d45292
GG
6655 dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
6656 err = chip;
6657 goto out_free_adapter;
6658 }
6659 chip_ver = CHELSIO_CHIP_VERSION(chip);
6660 func = chip_ver <= CHELSIO_T5 ?
6661 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
baf50868
GG
6662
6663 adapter->pdev = pdev;
6664 adapter->pdev_dev = &pdev->dev;
6665 adapter->name = pci_name(pdev);
6666 adapter->mbox = func;
6667 adapter->pf = func;
016764de
GG
6668 adapter->params.chip = chip;
6669 adapter->adap_idx = adap_idx;
baf50868
GG
6670 adapter->msg_enable = DFLT_MSG_ENABLE;
6671 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
6672 (sizeof(struct mbox_cmd) *
6673 T4_OS_LOG_MBOX_CMDS),
6674 GFP_KERNEL);
6675 if (!adapter->mbox_log) {
6676 err = -ENOMEM;
6677 goto out_free_adapter;
6678 }
6679 spin_lock_init(&adapter->mbox_lock);
6680 INIT_LIST_HEAD(&adapter->mlist.list);
aca06eaf 6681 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
baf50868
GG
6682 pci_set_drvdata(pdev, adapter);
6683
d6ce2628 6684 if (func != ent->driver_data) {
d6ce2628
HS
6685 pci_disable_device(pdev);
6686 pci_save_state(pdev); /* to restore SR-IOV later */
baf50868 6687 return 0;
d6ce2628
HS
6688 }
6689
b8ff05a9 6690 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 6691 highdma = true;
b8ff05a9
DM
6692 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6693 if (err) {
6694 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6695 "coherent allocations\n");
baf50868 6696 goto out_free_adapter;
b8ff05a9
DM
6697 }
6698 } else {
6699 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6700 if (err) {
6701 dev_err(&pdev->dev, "no usable DMA configuration\n");
baf50868 6702 goto out_free_adapter;
b8ff05a9
DM
6703 }
6704 }
6705
6706 pci_enable_pcie_error_reporting(pdev);
6707 pci_set_master(pdev);
6708 pci_save_state(pdev);
7829451c 6709 adap_idx++;
29aaee65
AB
6710 adapter->workq = create_singlethread_workqueue("cxgb4");
6711 if (!adapter->workq) {
6712 err = -ENOMEM;
6713 goto out_free_adapter;
6714 }
6715
144be3d9 6716 /* PCI device has been enabled */
80f61f19 6717 adapter->flags |= CXGB4_DEV_ENABLED;
b8ff05a9
DM
6718 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6719
b0ba9d5f
CL
6720 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
6721 * Ingress Packet Data to Free List Buffers in order to allow for
6722 * chipset performance optimizations between the Root Complex and
6723 * Memory Controllers. (Messages to the associated Ingress Queue
6724 * notifying new Packet Placement in the Free Lists Buffers will be
6725 * send without the Relaxed Ordering Attribute thus guaranteeing that
6726 * all preceding PCIe Transaction Layer Packets will be processed
6727 * first.) But some Root Complexes have various issues with Upstream
6728 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
6729 * The PCIe devices which under the Root Complexes will be cleared the
6730 * Relaxed Ordering bit in the configuration space, So we check our
6731 * PCIe configuration space to see if it's flagged with advice against
6732 * using Relaxed Ordering.
6733 */
6734 if (!pcie_relaxed_ordering_enabled(pdev))
80f61f19 6735 adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
b0ba9d5f 6736
b8ff05a9
DM
6737 spin_lock_init(&adapter->stats_lock);
6738 spin_lock_init(&adapter->tid_release_lock);
e327c225 6739 spin_lock_init(&adapter->win0_lock);
b8ff05a9
DM
6740
6741 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
6742 INIT_WORK(&adapter->db_full_task, process_db_full);
6743 INIT_WORK(&adapter->db_drop_task, process_db_drop);
8b7372c1 6744 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
b8ff05a9
DM
6745
6746 err = t4_prep_adapter(adapter);
6747 if (err)
d6ce2628
HS
6748 goto out_free_adapter;
6749
1dde532d
RL
6750 if (is_kdump_kernel()) {
6751 /* Collect hardware state and append to /proc/vmcore */
6752 err = cxgb4_cudbg_vmcore_add_dump(adapter);
6753 if (err) {
6754 dev_warn(adapter->pdev_dev,
6755 "Fail collecting vmcore device dump, err: %d. Continuing\n",
6756 err);
6757 err = 0;
6758 }
6759 }
22adfe0a 6760
d14807dd 6761 if (!is_t4(adapter->params.chip)) {
f612b815
HS
6762 s_qpp = (QUEUESPERPAGEPF0_S +
6763 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
b2612722 6764 adapter->pf);
f612b815
HS
6765 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6766 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
22adfe0a
SR
6767 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6768
6769 /* Each segment size is 128B. Write coalescing is enabled only
6770 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6771 * queue is less no of segments that can be accommodated in
6772 * a page size.
6773 */
6774 if (qpp > num_seg) {
6775 dev_err(&pdev->dev,
6776 "Incorrect number of egress queues per page\n");
6777 err = -EINVAL;
d6ce2628 6778 goto out_free_adapter;
22adfe0a
SR
6779 }
6780 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6781 pci_resource_len(pdev, 2));
6782 if (!adapter->bar2) {
6783 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6784 err = -ENOMEM;
d6ce2628 6785 goto out_free_adapter;
22adfe0a
SR
6786 }
6787 }
6788
636f9d37 6789 setup_memwin(adapter);
86e8f298 6790 err = adap_init0(adapter, 0);
5b377d11
HS
6791#ifdef CONFIG_DEBUG_FS
6792 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
6793#endif
636f9d37 6794 setup_memwin_rdma(adapter);
b8ff05a9
DM
6795 if (err)
6796 goto out_unmap_bar;
6797
2a485cf7
HS
6798 /* configure SGE_STAT_CFG_A to read WC stats */
6799 if (!is_t4(adapter->params.chip))
676d6a75
HS
6800 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
6801 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
6802 T6_STATMODE_V(0)));
2a485cf7 6803
b539ea60
AV
6804 /* Initialize hash mac addr list */
6805 INIT_LIST_HEAD(&adapter->mac_hlist);
6806
b8ff05a9 6807 for_each_port(adapter, i) {
ab0367ea
RL
6808 /* For supporting MQPRIO Offload, need some extra
6809 * queues for each ETHOFLD TIDs. Keep it equal to
6810 * MAX_ATIDs for now. Once we connect to firmware
6811 * later and query the EOTID params, we'll come to
6812 * know the actual # of EOTIDs supported.
6813 */
b8ff05a9 6814 netdev = alloc_etherdev_mq(sizeof(struct port_info),
ab0367ea 6815 MAX_ETH_QSETS + MAX_ATIDS);
b8ff05a9
DM
6816 if (!netdev) {
6817 err = -ENOMEM;
6818 goto out_free_dev;
6819 }
6820
6821 SET_NETDEV_DEV(netdev, &pdev->dev);
6822
6823 adapter->port[i] = netdev;
6824 pi = netdev_priv(netdev);
6825 pi->adapter = adapter;
6826 pi->xact_addr_filt = -1;
b8ff05a9 6827 pi->port_id = i;
b8ff05a9
DM
6828 netdev->irq = pdev->irq;
6829
2ed28baa
MM
6830 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6831 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
012475e3 6832 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
d8931847 6833 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
c8729cac 6834 NETIF_F_HW_TC | NETIF_F_NTUPLE;
d0a1299c 6835
e8d45292 6836 if (chip_ver > CHELSIO_T5) {
c50ae55e
GG
6837 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
6838 NETIF_F_IPV6_CSUM |
6839 NETIF_F_RXCSUM |
6840 NETIF_F_GSO_UDP_TUNNEL |
64f40cdd 6841 NETIF_F_GSO_UDP_TUNNEL_CSUM |
c50ae55e
GG
6842 NETIF_F_TSO | NETIF_F_TSO6;
6843
1435d997 6844 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
64f40cdd 6845 NETIF_F_GSO_UDP_TUNNEL_CSUM |
1435d997 6846 NETIF_F_HW_TLS_RECORD;
ad166a8e
JK
6847
6848 if (adapter->rawf_cnt)
6849 netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
c50ae55e 6850 }
d0a1299c 6851
c8f44aff
MM
6852 if (highdma)
6853 netdev->hw_features |= NETIF_F_HIGHDMA;
6854 netdev->features |= netdev->hw_features;
b8ff05a9 6855 netdev->vlan_features = netdev->features & VLAN_FEAT;
a8c16e8e 6856#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
a3ac249a
RM
6857 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
6858 netdev->hw_features |= NETIF_F_HW_TLS_TX;
6859 netdev->tlsdev_ops = &cxgb4_ktls_ops;
6860 /* initialize the refcount */
6861 refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
6862 }
76f919eb
AS
6863#endif /* CONFIG_CHELSIO_TLS_DEVICE */
6864#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6865 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) {
6866 netdev->hw_enc_features |= NETIF_F_HW_ESP;
6867 netdev->features |= NETIF_F_HW_ESP;
6868 netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops;
6869 }
6870#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
6871
01789349
JP
6872 netdev->priv_flags |= IFF_UNICAST_FLT;
6873
d894be57 6874 /* MTU range: 81 - 9600 */
a047fbae 6875 netdev->min_mtu = 81; /* accommodate SACK */
d894be57
JW
6876 netdev->max_mtu = MAX_MTU;
6877
b8ff05a9 6878 netdev->netdev_ops = &cxgb4_netdev_ops;
688848b1
AB
6879#ifdef CONFIG_CHELSIO_T4_DCB
6880 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6881 cxgb4_dcb_state_init(netdev);
ebddd97a 6882 cxgb4_dcb_version_init(netdev);
688848b1 6883#endif
812034f1 6884 cxgb4_set_ethtool_ops(netdev);
b8ff05a9
DM
6885 }
6886
ad75b7d3
RL
6887 cxgb4_init_ethtool_dump(adapter);
6888
b8ff05a9
DM
6889 pci_set_drvdata(pdev, adapter);
6890
80f61f19 6891 if (adapter->flags & CXGB4_FW_OK) {
060e0c75 6892 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
6893 if (err)
6894 goto out_free_dev;
098ef6c2
HS
6895 } else if (adapter->params.nports == 1) {
6896 /* If we don't have a connection to the firmware -- possibly
6897 * because of an error -- grab the raw VPD parameters so we
6898 * can set the proper MAC Address on the debug network
6899 * interface that we've created.
6900 */
6901 u8 hw_addr[ETH_ALEN];
6902 u8 *na = adapter->params.vpd.na;
6903
6904 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
6905 if (!err) {
6906 for (i = 0; i < ETH_ALEN; i++)
6907 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
6908 hex2val(na[2 * i + 1]));
6909 t4_set_hw_addr(adapter, 0, hw_addr);
6910 }
b8ff05a9
DM
6911 }
6912
80f61f19 6913 if (!(adapter->flags & CXGB4_FW_OK))
0eaec62a
CL
6914 goto fw_attach_fail;
6915
098ef6c2 6916 /* Configure queues and allocate tables now, they can be needed as
b8ff05a9
DM
6917 * soon as the first register_netdev completes.
6918 */
0eaec62a
CL
6919 err = cfg_queues(adapter);
6920 if (err)
6921 goto out_free_dev;
b8ff05a9 6922
3bdb376e
KS
6923 adapter->smt = t4_init_smt();
6924 if (!adapter->smt) {
6925 /* We tolerate a lack of SMT, giving up some functionality */
6926 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
6927 }
6928
5be9ed8d 6929 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
b8ff05a9
DM
6930 if (!adapter->l2t) {
6931 /* We tolerate a lack of L2T, giving up some functionality */
6932 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6933 adapter->params.offload = 0;
6934 }
6935
b5a02f50 6936#if IS_ENABLED(CONFIG_IPV6)
e8d45292 6937 if (chip_ver <= CHELSIO_T5 &&
eb72f74f
HS
6938 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
6939 /* CLIP functionality is not present in hardware,
6940 * hence disable all offload features
b5a02f50
AB
6941 */
6942 dev_warn(&pdev->dev,
eb72f74f 6943 "CLIP not enabled in hardware, continuing\n");
b5a02f50 6944 adapter->params.offload = 0;
eb72f74f
HS
6945 } else {
6946 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6947 adapter->clipt_end);
6948 if (!adapter->clipt) {
6949 /* We tolerate a lack of clip_table, giving up
6950 * some functionality
6951 */
6952 dev_warn(&pdev->dev,
6953 "could not allocate Clip table, continuing\n");
6954 adapter->params.offload = 0;
6955 }
b5a02f50
AB
6956 }
6957#endif
b72a32da
RL
6958
6959 for_each_port(adapter, i) {
6960 pi = adap2pinfo(adapter, i);
6961 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
6962 if (!pi->sched_tbl)
6963 dev_warn(&pdev->dev,
6964 "could not activate scheduling on port %d\n",
6965 i);
6966 }
6967
d915c299
VK
6968 if (is_offload(adapter) || is_hashfilter(adapter)) {
6969 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
6970 u32 v;
6971
6972 v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A);
6973 if (chip_ver <= CHELSIO_T5) {
6974 adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
6975 v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A);
6976 adapter->tids.hash_base = v / 4;
6977 } else {
6978 adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
6979 v = t4_read_reg(adapter,
6980 T6_LE_DB_HASH_TID_BASE_A);
6981 adapter->tids.hash_base = v;
6982 }
6983 }
6984 }
6985
578b46b9 6986 if (tid_init(&adapter->tids) < 0) {
b8ff05a9
DM
6987 dev_warn(&pdev->dev, "could not allocate TID table, "
6988 "continuing\n");
6989 adapter->params.offload = 0;
d8931847 6990 } else {
45da1ca2 6991 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
d8931847
RL
6992 if (!adapter->tc_u32)
6993 dev_warn(&pdev->dev,
6994 "could not offload tc u32, continuing\n");
62488e4b 6995
79e6d46a
KS
6996 if (cxgb4_init_tc_flower(adapter))
6997 dev_warn(&pdev->dev,
6998 "could not offload tc flower, continuing\n");
b1396c2b
RL
6999
7000 if (cxgb4_init_tc_mqprio(adapter))
7001 dev_warn(&pdev->dev,
7002 "could not offload tc mqprio, continuing\n");
4ec4762d
RL
7003
7004 if (cxgb4_init_tc_matchall(adapter))
7005 dev_warn(&pdev->dev,
7006 "could not offload tc matchall, continuing\n");
d915c299
VK
7007 if (cxgb4_init_ethtool_filters(adapter))
7008 dev_warn(&pdev->dev,
7009 "could not initialize ethtool filters, continuing\n");
9a1bb9f6
HS
7010 }
7011
f7cabcdd
DM
7012 /* See what interrupts we'll be using */
7013 if (msi > 1 && enable_msix(adapter) == 0)
80f61f19 7014 adapter->flags |= CXGB4_USING_MSIX;
94cdb8bb 7015 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
80f61f19 7016 adapter->flags |= CXGB4_USING_MSI;
94cdb8bb
HS
7017 if (msi > 1)
7018 free_msix_info(adapter);
7019 }
f7cabcdd 7020
547fd272 7021 /* check for PCI Express bandwidth capabiltites */
57d12fc6 7022 pcie_print_link_status(pdev);
547fd272 7023
28b38705
RR
7024 cxgb4_init_mps_ref_entries(adapter);
7025
671b0060
DM
7026 err = init_rss(adapter);
7027 if (err)
7028 goto out_free_dev;
7029
76c3a552
RL
7030 err = setup_non_data_intr(adapter);
7031 if (err) {
7032 dev_err(adapter->pdev_dev,
7033 "Non Data interrupt allocation failed, err: %d\n", err);
7034 goto out_free_dev;
7035 }
7036
843bd7db
AV
7037 err = setup_fw_sge_queues(adapter);
7038 if (err) {
7039 dev_err(adapter->pdev_dev,
7040 "FW sge queue allocation failed, err %d", err);
7041 goto out_free_dev;
7042 }
7043
0eaec62a 7044fw_attach_fail:
b8ff05a9
DM
7045 /*
7046 * The card is now ready to go. If any errors occur during device
7047 * registration we do not fail the whole card but rather proceed only
7048 * with the ports we manage to register successfully. However we must
7049 * register at least one net device.
7050 */
7051 for_each_port(adapter, i) {
a57cabe0 7052 pi = adap2pinfo(adapter, i);
d2a007ab 7053 adapter->port[i]->dev_port = pi->lport;
a57cabe0
DM
7054 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
7055 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
7056
b1a73af9
SM
7057 netif_carrier_off(adapter->port[i]);
7058
b8ff05a9
DM
7059 err = register_netdev(adapter->port[i]);
7060 if (err)
b1a3c2b6 7061 break;
b1a3c2b6
DM
7062 adapter->chan_map[pi->tx_chan] = i;
7063 print_port_info(adapter->port[i]);
b8ff05a9 7064 }
b1a3c2b6 7065 if (i == 0) {
b8ff05a9
DM
7066 dev_err(&pdev->dev, "could not register any net devices\n");
7067 goto out_free_dev;
7068 }
b1a3c2b6
DM
7069 if (err) {
7070 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
7071 err = 0;
6403eab1 7072 }
b8ff05a9
DM
7073
7074 if (cxgb4_debugfs_root) {
7075 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
7076 cxgb4_debugfs_root);
7077 setup_debugfs(adapter);
7078 }
7079
6482aa7c
DLR
7080 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7081 pdev->needs_freset = 1;
7082
93a09e74
PBT
7083 if (is_uld(adapter))
7084 cxgb4_uld_enable(adapter);
b8ff05a9 7085
9c33e420
AG
7086 if (!is_t4(adapter->params.chip))
7087 cxgb4_ptp_init(adapter);
7088
ebcd210e 7089 if (IS_REACHABLE(CONFIG_THERMAL) &&
80f61f19 7090 !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
b1871915 7091 cxgb4_thermal_init(adapter);
b1871915 7092
0de72738 7093 print_adapter_info(adapter);
7829451c 7094 return 0;
0de72738 7095
b8ff05a9 7096 out_free_dev:
843bd7db 7097 t4_free_sge_resources(adapter);
06546391 7098 free_some_resources(adapter);
80f61f19 7099 if (adapter->flags & CXGB4_USING_MSIX)
94cdb8bb 7100 free_msix_info(adapter);
0fbc81b3
HS
7101 if (adapter->num_uld || adapter->num_ofld_uld)
7102 t4_uld_mem_free(adapter);
b8ff05a9 7103 out_unmap_bar:
d14807dd 7104 if (!is_t4(adapter->params.chip))
22adfe0a 7105 iounmap(adapter->bar2);
b8ff05a9 7106 out_free_adapter:
29aaee65
AB
7107 if (adapter->workq)
7108 destroy_workqueue(adapter->workq);
7109
7f080c3f 7110 kfree(adapter->mbox_log);
b8ff05a9 7111 kfree(adapter);
d6ce2628
HS
7112 out_unmap_bar0:
7113 iounmap(regs);
b8ff05a9
DM
7114 out_disable_device:
7115 pci_disable_pcie_error_reporting(pdev);
7116 pci_disable_device(pdev);
7117 out_release_regions:
7118 pci_release_regions(pdev);
b8ff05a9
DM
7119 return err;
7120}
7121
91744948 7122static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
7123{
7124 struct adapter *adapter = pci_get_drvdata(pdev);
b539ea60 7125 struct hash_mac_addr *entry, *tmp;
b8ff05a9 7126
7829451c
HS
7127 if (!adapter) {
7128 pci_release_regions(pdev);
7129 return;
7130 }
636f9d37 7131
b1a79360
VK
7132 /* If we allocated filters, free up state associated with any
7133 * valid filters ...
7134 */
7135 clear_all_filters(adapter);
7136
80f61f19 7137 adapter->flags |= CXGB4_SHUTTING_DOWN;
e1f6198e 7138
7829451c 7139 if (adapter->pf == 4) {
b8ff05a9
DM
7140 int i;
7141
29aaee65
AB
7142 /* Tear down per-adapter Work Queue first since it can contain
7143 * references to our adapter data structure.
7144 */
7145 destroy_workqueue(adapter->workq);
7146
015fe6fd
SAH
7147 detach_ulds(adapter);
7148
7149 for_each_port(adapter, i)
7150 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7151 unregister_netdev(adapter->port[i]);
7152
7153 t4_uld_clean_up(adapter);
b8ff05a9 7154
8b4e6b3c
AV
7155 adap_free_hma_mem(adapter);
7156
b37987e8
HS
7157 disable_interrupts(adapter);
7158
28b38705
RR
7159 cxgb4_free_mps_ref_entries(adapter);
7160
9f16dc2e 7161 debugfs_remove_recursive(adapter->debugfs_root);
b8ff05a9 7162
9c33e420
AG
7163 if (!is_t4(adapter->params.chip))
7164 cxgb4_ptp_stop(adapter);
ebcd210e 7165 if (IS_REACHABLE(CONFIG_THERMAL))
e70a57fa 7166 cxgb4_thermal_remove(adapter);
9c33e420 7167
80f61f19 7168 if (adapter->flags & CXGB4_FULL_INIT_DONE)
aaefae9b 7169 cxgb_down(adapter);
b8ff05a9 7170
80f61f19 7171 if (adapter->flags & CXGB4_USING_MSIX)
94cdb8bb 7172 free_msix_info(adapter);
0fbc81b3
HS
7173 if (adapter->num_uld || adapter->num_ofld_uld)
7174 t4_uld_mem_free(adapter);
06546391 7175 free_some_resources(adapter);
b539ea60
AV
7176 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
7177 list) {
7178 list_del(&entry->list);
7179 kfree(entry);
7180 }
7181
b5a02f50
AB
7182#if IS_ENABLED(CONFIG_IPV6)
7183 t4_cleanup_clip_tbl(adapter);
7184#endif
d14807dd 7185 if (!is_t4(adapter->params.chip))
22adfe0a 7186 iounmap(adapter->bar2);
7829451c
HS
7187 }
7188#ifdef CONFIG_PCI_IOV
7189 else {
baf50868 7190 cxgb4_iov_configure(adapter->pdev, 0);
7829451c
HS
7191 }
7192#endif
c4e43e14
GG
7193 iounmap(adapter->regs);
7194 pci_disable_pcie_error_reporting(pdev);
80f61f19 7195 if ((adapter->flags & CXGB4_DEV_ENABLED)) {
c4e43e14 7196 pci_disable_device(pdev);
80f61f19 7197 adapter->flags &= ~CXGB4_DEV_ENABLED;
c4e43e14
GG
7198 }
7199 pci_release_regions(pdev);
7200 kfree(adapter->mbox_log);
7201 synchronize_rcu();
7202 kfree(adapter);
b8ff05a9
DM
7203}
7204
0fbc81b3
HS
7205/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
7206 * delivery. This is essentially a stripped down version of the PCI remove()
7207 * function where we do the minimal amount of work necessary to shutdown any
7208 * further activity.
7209 */
7210static void shutdown_one(struct pci_dev *pdev)
7211{
7212 struct adapter *adapter = pci_get_drvdata(pdev);
7213
7214 /* As with remove_one() above (see extended comment), we only want do
7215 * do cleanup on PCI Devices which went all the way through init_one()
7216 * ...
7217 */
7218 if (!adapter) {
7219 pci_release_regions(pdev);
7220 return;
7221 }
7222
80f61f19 7223 adapter->flags |= CXGB4_SHUTTING_DOWN;
e1f6198e 7224
0fbc81b3
HS
7225 if (adapter->pf == 4) {
7226 int i;
7227
7228 for_each_port(adapter, i)
7229 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7230 cxgb_close(adapter->port[i]);
7231
cef8dac9
RL
7232 rtnl_lock();
7233 cxgb4_mqprio_stop_offload(adapter);
7234 rtnl_unlock();
7235
6a146f3a
GP
7236 if (is_uld(adapter)) {
7237 detach_ulds(adapter);
7238 t4_uld_clean_up(adapter);
7239 }
7240
0fbc81b3
HS
7241 disable_interrupts(adapter);
7242 disable_msi(adapter);
7243
7244 t4_sge_stop(adapter);
80f61f19 7245 if (adapter->flags & CXGB4_FW_OK)
0fbc81b3
HS
7246 t4_fw_bye(adapter, adapter->mbox);
7247 }
0fbc81b3
HS
7248}
7249
b8ff05a9
DM
7250static struct pci_driver cxgb4_driver = {
7251 .name = KBUILD_MODNAME,
7252 .id_table = cxgb4_pci_tbl,
7253 .probe = init_one,
91744948 7254 .remove = remove_one,
0fbc81b3 7255 .shutdown = shutdown_one,
b6244201
HS
7256#ifdef CONFIG_PCI_IOV
7257 .sriov_configure = cxgb4_iov_configure,
7258#endif
204dc3c0 7259 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
7260};
7261
7262static int __init cxgb4_init_module(void)
7263{
7264 int ret;
7265
b8ff05a9 7266 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
b8ff05a9
DM
7267
7268 ret = pci_register_driver(&cxgb4_driver);
29aaee65 7269 if (ret < 0)
a3147770 7270 goto err_pci;
01bcca68 7271
1bb60376 7272#if IS_ENABLED(CONFIG_IPV6)
b5a02f50 7273 if (!inet6addr_registered) {
a3147770
Y
7274 ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
7275 if (ret)
7276 pci_unregister_driver(&cxgb4_driver);
7277 else
7278 inet6addr_registered = true;
b5a02f50 7279 }
1bb60376 7280#endif
01bcca68 7281
a3147770
Y
7282 if (ret == 0)
7283 return ret;
7284
7285err_pci:
7286 debugfs_remove(cxgb4_debugfs_root);
7287
b8ff05a9
DM
7288 return ret;
7289}
7290
7291static void __exit cxgb4_cleanup_module(void)
7292{
1bb60376 7293#if IS_ENABLED(CONFIG_IPV6)
1793c798 7294 if (inet6addr_registered) {
b5a02f50
AB
7295 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
7296 inet6addr_registered = false;
7297 }
1bb60376 7298#endif
b8ff05a9
DM
7299 pci_unregister_driver(&cxgb4_driver);
7300 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
7301}
7302
7303module_init(cxgb4_init_module);
7304module_exit(cxgb4_cleanup_module);