llseek: automatically add .llseek fop
[linux-2.6-block.git] / drivers / net / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if_vlan.h>
45#include <linux/init.h>
46#include <linux/log2.h>
47#include <linux/mdio.h>
48#include <linux/module.h>
49#include <linux/moduleparam.h>
50#include <linux/mutex.h>
51#include <linux/netdevice.h>
52#include <linux/pci.h>
53#include <linux/aer.h>
54#include <linux/rtnetlink.h>
55#include <linux/sched.h>
56#include <linux/seq_file.h>
57#include <linux/sockios.h>
58#include <linux/vmalloc.h>
59#include <linux/workqueue.h>
60#include <net/neighbour.h>
61#include <net/netevent.h>
62#include <asm/uaccess.h>
63
64#include "cxgb4.h"
65#include "t4_regs.h"
66#include "t4_msg.h"
67#include "t4fw_api.h"
68#include "l2t.h"
69
99e6d065 70#define DRV_VERSION "1.3.0-ko"
b8ff05a9
DM
71#define DRV_DESC "Chelsio T4 Network Driver"
72
73/*
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
76 * recover.
77 */
78#define MAX_SGE_TIMERVAL 200U
79
7ee9ff94
CL
80#ifdef CONFIG_PCI_IOV
81/*
82 * Virtual Function provisioning constants. We need two extra Ingress Queues
83 * with Interrupt capability to serve as the VF's Firmware Event Queue and
84 * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
85 * Lists associated with them). For each Ethernet/Control Egress Queue and
86 * for each Free List, we need an Egress Context.
87 */
88enum {
89 VFRES_NPORTS = 1, /* # of "ports" per VF */
90 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
91
92 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
93 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
94 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
95 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
96 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
97 VFRES_TC = 0, /* PCI-E traffic class */
98 VFRES_NEXACTF = 16, /* # of exact MPS filters */
99
100 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
101 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
102};
103
104/*
105 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
106 * static and likely not to be useful in the long run. We really need to
107 * implement some form of persistent configuration which the firmware
108 * controls.
109 */
110static unsigned int pfvfres_pmask(struct adapter *adapter,
111 unsigned int pf, unsigned int vf)
112{
113 unsigned int portn, portvec;
114
115 /*
116 * Give PF's access to all of the ports.
117 */
118 if (vf == 0)
119 return FW_PFVF_CMD_PMASK_MASK;
120
121 /*
122 * For VFs, we'll assign them access to the ports based purely on the
123 * PF. We assign active ports in order, wrapping around if there are
124 * fewer active ports than PFs: e.g. active port[pf % nports].
125 * Unfortunately the adapter's port_info structs haven't been
126 * initialized yet so we have to compute this.
127 */
128 if (adapter->params.nports == 0)
129 return 0;
130
131 portn = pf % adapter->params.nports;
132 portvec = adapter->params.portvec;
133 for (;;) {
134 /*
135 * Isolate the lowest set bit in the port vector. If we're at
136 * the port number that we want, return that as the pmask.
137 * otherwise mask that bit out of the port vector and
138 * decrement our port number ...
139 */
140 unsigned int pmask = portvec ^ (portvec & (portvec-1));
141 if (portn == 0)
142 return pmask;
143 portn--;
144 portvec &= ~pmask;
145 }
146 /*NOTREACHED*/
147}
148#endif
149
b8ff05a9
DM
150enum {
151 MEMWIN0_APERTURE = 65536,
152 MEMWIN0_BASE = 0x30000,
153 MEMWIN1_APERTURE = 32768,
154 MEMWIN1_BASE = 0x28000,
155 MEMWIN2_APERTURE = 2048,
156 MEMWIN2_BASE = 0x1b800,
157};
158
159enum {
160 MAX_TXQ_ENTRIES = 16384,
161 MAX_CTRL_TXQ_ENTRIES = 1024,
162 MAX_RSPQ_ENTRIES = 16384,
163 MAX_RX_BUFFERS = 16384,
164 MIN_TXQ_ENTRIES = 32,
165 MIN_CTRL_TXQ_ENTRIES = 32,
166 MIN_RSPQ_ENTRIES = 128,
167 MIN_FL_ENTRIES = 16
168};
169
170#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
171 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
172 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
173
060e0c75 174#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
b8ff05a9
DM
175
176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
060e0c75 177 CH_DEVICE(0xa000, 0), /* PE10K */
ac50bed3
DM
178 CH_DEVICE(0x4001, 0),
179 CH_DEVICE(0x4002, 0),
180 CH_DEVICE(0x4003, 0),
181 CH_DEVICE(0x4004, 0),
182 CH_DEVICE(0x4005, 0),
183 CH_DEVICE(0x4006, 0),
184 CH_DEVICE(0x4007, 0),
185 CH_DEVICE(0x4008, 0),
186 CH_DEVICE(0x4009, 0),
187 CH_DEVICE(0x400a, 0),
b8ff05a9
DM
188 { 0, }
189};
190
191#define FW_FNAME "cxgb4/t4fw.bin"
192
193MODULE_DESCRIPTION(DRV_DESC);
194MODULE_AUTHOR("Chelsio Communications");
195MODULE_LICENSE("Dual BSD/GPL");
196MODULE_VERSION(DRV_VERSION);
197MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
198MODULE_FIRMWARE(FW_FNAME);
199
200static int dflt_msg_enable = DFLT_MSG_ENABLE;
201
202module_param(dflt_msg_enable, int, 0644);
203MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
204
205/*
206 * The driver uses the best interrupt scheme available on a platform in the
207 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
208 * of these schemes the driver may consider as follows:
209 *
210 * msi = 2: choose from among all three options
211 * msi = 1: only consider MSI and INTx interrupts
212 * msi = 0: force INTx interrupts
213 */
214static int msi = 2;
215
216module_param(msi, int, 0644);
217MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
218
219/*
220 * Queue interrupt hold-off timer values. Queues default to the first of these
221 * upon creation.
222 */
223static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
224
225module_param_array(intr_holdoff, uint, NULL, 0644);
226MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
227 "0..4 in microseconds");
228
229static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
230
231module_param_array(intr_cnt, uint, NULL, 0644);
232MODULE_PARM_DESC(intr_cnt,
233 "thresholds 1..3 for queue interrupt packet counters");
234
235static int vf_acls;
236
237#ifdef CONFIG_PCI_IOV
238module_param(vf_acls, bool, 0644);
239MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
240
241static unsigned int num_vf[4];
242
243module_param_array(num_vf, uint, NULL, 0644);
244MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
245#endif
246
247static struct dentry *cxgb4_debugfs_root;
248
249static LIST_HEAD(adapter_list);
250static DEFINE_MUTEX(uld_mutex);
251static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
252static const char *uld_str[] = { "RDMA", "iSCSI" };
253
254static void link_report(struct net_device *dev)
255{
256 if (!netif_carrier_ok(dev))
257 netdev_info(dev, "link down\n");
258 else {
259 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
260
261 const char *s = "10Mbps";
262 const struct port_info *p = netdev_priv(dev);
263
264 switch (p->link_cfg.speed) {
265 case SPEED_10000:
266 s = "10Gbps";
267 break;
268 case SPEED_1000:
269 s = "1000Mbps";
270 break;
271 case SPEED_100:
272 s = "100Mbps";
273 break;
274 }
275
276 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
277 fc[p->link_cfg.fc]);
278 }
279}
280
281void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
282{
283 struct net_device *dev = adapter->port[port_id];
284
285 /* Skip changes from disabled ports. */
286 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
287 if (link_stat)
288 netif_carrier_on(dev);
289 else
290 netif_carrier_off(dev);
291
292 link_report(dev);
293 }
294}
295
296void t4_os_portmod_changed(const struct adapter *adap, int port_id)
297{
298 static const char *mod_str[] = {
a0881cab 299 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
300 };
301
302 const struct net_device *dev = adap->port[port_id];
303 const struct port_info *pi = netdev_priv(dev);
304
305 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
306 netdev_info(dev, "port module unplugged\n");
a0881cab 307 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
308 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
309}
310
311/*
312 * Configure the exact and hash address filters to handle a port's multicast
313 * and secondary unicast MAC addresses.
314 */
315static int set_addr_filters(const struct net_device *dev, bool sleep)
316{
317 u64 mhash = 0;
318 u64 uhash = 0;
319 bool free = true;
320 u16 filt_idx[7];
321 const u8 *addr[7];
322 int ret, naddr = 0;
b8ff05a9
DM
323 const struct netdev_hw_addr *ha;
324 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 325 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 326 const struct port_info *pi = netdev_priv(dev);
060e0c75 327 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
328
329 /* first do the secondary unicast addresses */
330 netdev_for_each_uc_addr(ha, dev) {
331 addr[naddr++] = ha->addr;
332 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 333 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
334 naddr, addr, filt_idx, &uhash, sleep);
335 if (ret < 0)
336 return ret;
337
338 free = false;
339 naddr = 0;
340 }
341 }
342
343 /* next set up the multicast addresses */
4a35ecf8
DM
344 netdev_for_each_mc_addr(ha, dev) {
345 addr[naddr++] = ha->addr;
346 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 347 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
348 naddr, addr, filt_idx, &mhash, sleep);
349 if (ret < 0)
350 return ret;
351
352 free = false;
353 naddr = 0;
354 }
355 }
356
060e0c75 357 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
358 uhash | mhash, sleep);
359}
360
361/*
362 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
363 * If @mtu is -1 it is left unchanged.
364 */
365static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
366{
367 int ret;
368 struct port_info *pi = netdev_priv(dev);
369
370 ret = set_addr_filters(dev, sleep_ok);
371 if (ret == 0)
060e0c75 372 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
b8ff05a9 373 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 374 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
375 sleep_ok);
376 return ret;
377}
378
379/**
380 * link_start - enable a port
381 * @dev: the port to enable
382 *
383 * Performs the MAC and PHY actions needed to enable a port.
384 */
385static int link_start(struct net_device *dev)
386{
387 int ret;
388 struct port_info *pi = netdev_priv(dev);
060e0c75 389 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
390
391 /*
392 * We do not set address filters and promiscuity here, the stack does
393 * that step explicitly.
394 */
060e0c75 395 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
f8f5aafa 396 pi->vlan_grp != NULL, true);
b8ff05a9 397 if (ret == 0) {
060e0c75 398 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 399 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 400 true);
b8ff05a9
DM
401 if (ret >= 0) {
402 pi->xact_addr_filt = ret;
403 ret = 0;
404 }
405 }
406 if (ret == 0)
060e0c75
DM
407 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
408 &pi->link_cfg);
b8ff05a9 409 if (ret == 0)
060e0c75 410 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
b8ff05a9
DM
411 return ret;
412}
413
414/*
415 * Response queue handler for the FW event queue.
416 */
417static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
418 const struct pkt_gl *gl)
419{
420 u8 opcode = ((const struct rss_header *)rsp)->opcode;
421
422 rsp++; /* skip RSS header */
423 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
424 const struct cpl_sge_egr_update *p = (void *)rsp;
425 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
426 struct sge_txq *txq = q->adap->sge.egr_map[qid];
427
428 txq->restarts++;
429 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
430 struct sge_eth_txq *eq;
431
432 eq = container_of(txq, struct sge_eth_txq, q);
433 netif_tx_wake_queue(eq->txq);
434 } else {
435 struct sge_ofld_txq *oq;
436
437 oq = container_of(txq, struct sge_ofld_txq, q);
438 tasklet_schedule(&oq->qresume_tsk);
439 }
440 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
441 const struct cpl_fw6_msg *p = (void *)rsp;
442
443 if (p->type == 0)
444 t4_handle_fw_rpl(q->adap, p->data);
445 } else if (opcode == CPL_L2T_WRITE_RPL) {
446 const struct cpl_l2t_write_rpl *p = (void *)rsp;
447
448 do_l2t_write_rpl(q->adap, p);
449 } else
450 dev_err(q->adap->pdev_dev,
451 "unexpected CPL %#x on FW event queue\n", opcode);
452 return 0;
453}
454
455/**
456 * uldrx_handler - response queue handler for ULD queues
457 * @q: the response queue that received the packet
458 * @rsp: the response queue descriptor holding the offload message
459 * @gl: the gather list of packet fragments
460 *
461 * Deliver an ingress offload packet to a ULD. All processing is done by
462 * the ULD, we just maintain statistics.
463 */
464static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
465 const struct pkt_gl *gl)
466{
467 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
468
469 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
470 rxq->stats.nomem++;
471 return -1;
472 }
473 if (gl == NULL)
474 rxq->stats.imm++;
475 else if (gl == CXGB4_MSG_AN)
476 rxq->stats.an++;
477 else
478 rxq->stats.pkts++;
479 return 0;
480}
481
482static void disable_msi(struct adapter *adapter)
483{
484 if (adapter->flags & USING_MSIX) {
485 pci_disable_msix(adapter->pdev);
486 adapter->flags &= ~USING_MSIX;
487 } else if (adapter->flags & USING_MSI) {
488 pci_disable_msi(adapter->pdev);
489 adapter->flags &= ~USING_MSI;
490 }
491}
492
493/*
494 * Interrupt handler for non-data events used with MSI-X.
495 */
496static irqreturn_t t4_nondata_intr(int irq, void *cookie)
497{
498 struct adapter *adap = cookie;
499
500 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
501 if (v & PFSW) {
502 adap->swintr = 1;
503 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
504 }
505 t4_slow_intr_handler(adap);
506 return IRQ_HANDLED;
507}
508
509/*
510 * Name the MSI-X interrupts.
511 */
512static void name_msix_vecs(struct adapter *adap)
513{
514 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
515
516 /* non-data interrupts */
517 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
518 adap->msix_info[0].desc[n] = 0;
519
520 /* FW events */
521 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
522 adap->msix_info[1].desc[n] = 0;
523
524 /* Ethernet queues */
525 for_each_port(adap, j) {
526 struct net_device *d = adap->port[j];
527 const struct port_info *pi = netdev_priv(d);
528
529 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
530 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
531 d->name, i);
532 adap->msix_info[msi_idx].desc[n] = 0;
533 }
534 }
535
536 /* offload queues */
537 for_each_ofldrxq(&adap->sge, i) {
538 snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
539 adap->name, i);
540 adap->msix_info[msi_idx++].desc[n] = 0;
541 }
542 for_each_rdmarxq(&adap->sge, i) {
543 snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
544 adap->name, i);
545 adap->msix_info[msi_idx++].desc[n] = 0;
546 }
547}
548
549static int request_msix_queue_irqs(struct adapter *adap)
550{
551 struct sge *s = &adap->sge;
552 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
553
554 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
555 adap->msix_info[1].desc, &s->fw_evtq);
556 if (err)
557 return err;
558
559 for_each_ethrxq(s, ethqidx) {
560 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
561 adap->msix_info[msi].desc,
562 &s->ethrxq[ethqidx].rspq);
563 if (err)
564 goto unwind;
565 msi++;
566 }
567 for_each_ofldrxq(s, ofldqidx) {
568 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
569 adap->msix_info[msi].desc,
570 &s->ofldrxq[ofldqidx].rspq);
571 if (err)
572 goto unwind;
573 msi++;
574 }
575 for_each_rdmarxq(s, rdmaqidx) {
576 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
577 adap->msix_info[msi].desc,
578 &s->rdmarxq[rdmaqidx].rspq);
579 if (err)
580 goto unwind;
581 msi++;
582 }
583 return 0;
584
585unwind:
586 while (--rdmaqidx >= 0)
587 free_irq(adap->msix_info[--msi].vec,
588 &s->rdmarxq[rdmaqidx].rspq);
589 while (--ofldqidx >= 0)
590 free_irq(adap->msix_info[--msi].vec,
591 &s->ofldrxq[ofldqidx].rspq);
592 while (--ethqidx >= 0)
593 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
594 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
595 return err;
596}
597
598static void free_msix_queue_irqs(struct adapter *adap)
599{
600 int i, msi = 2;
601 struct sge *s = &adap->sge;
602
603 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
604 for_each_ethrxq(s, i)
605 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
606 for_each_ofldrxq(s, i)
607 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
608 for_each_rdmarxq(s, i)
609 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
610}
611
671b0060
DM
612/**
613 * write_rss - write the RSS table for a given port
614 * @pi: the port
615 * @queues: array of queue indices for RSS
616 *
617 * Sets up the portion of the HW RSS table for the port's VI to distribute
618 * packets to the Rx queues in @queues.
619 */
620static int write_rss(const struct port_info *pi, const u16 *queues)
621{
622 u16 *rss;
623 int i, err;
624 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
625
626 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
627 if (!rss)
628 return -ENOMEM;
629
630 /* map the queue indices to queue ids */
631 for (i = 0; i < pi->rss_size; i++, queues++)
632 rss[i] = q[*queues].rspq.abs_id;
633
060e0c75
DM
634 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
635 pi->rss_size, rss, pi->rss_size);
671b0060
DM
636 kfree(rss);
637 return err;
638}
639
b8ff05a9
DM
640/**
641 * setup_rss - configure RSS
642 * @adap: the adapter
643 *
671b0060 644 * Sets up RSS for each port.
b8ff05a9
DM
645 */
646static int setup_rss(struct adapter *adap)
647{
671b0060 648 int i, err;
b8ff05a9
DM
649
650 for_each_port(adap, i) {
651 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 652
671b0060 653 err = write_rss(pi, pi->rss);
b8ff05a9
DM
654 if (err)
655 return err;
656 }
657 return 0;
658}
659
660/*
661 * Wait until all NAPI handlers are descheduled.
662 */
663static void quiesce_rx(struct adapter *adap)
664{
665 int i;
666
667 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
668 struct sge_rspq *q = adap->sge.ingr_map[i];
669
670 if (q && q->handler)
671 napi_disable(&q->napi);
672 }
673}
674
675/*
676 * Enable NAPI scheduling and interrupt generation for all Rx queues.
677 */
678static void enable_rx(struct adapter *adap)
679{
680 int i;
681
682 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
683 struct sge_rspq *q = adap->sge.ingr_map[i];
684
685 if (!q)
686 continue;
687 if (q->handler)
688 napi_enable(&q->napi);
689 /* 0-increment GTS to start the timer and enable interrupts */
690 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
691 SEINTARM(q->intr_params) |
692 INGRESSQID(q->cntxt_id));
693 }
694}
695
696/**
697 * setup_sge_queues - configure SGE Tx/Rx/response queues
698 * @adap: the adapter
699 *
700 * Determines how many sets of SGE queues to use and initializes them.
701 * We support multiple queue sets per port if we have MSI-X, otherwise
702 * just one queue set per port.
703 */
704static int setup_sge_queues(struct adapter *adap)
705{
706 int err, msi_idx, i, j;
707 struct sge *s = &adap->sge;
708
709 bitmap_zero(s->starving_fl, MAX_EGRQ);
710 bitmap_zero(s->txq_maperr, MAX_EGRQ);
711
712 if (adap->flags & USING_MSIX)
713 msi_idx = 1; /* vector 0 is for non-queue interrupts */
714 else {
715 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
716 NULL, NULL);
717 if (err)
718 return err;
719 msi_idx = -((int)s->intrq.abs_id + 1);
720 }
721
722 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
723 msi_idx, NULL, fwevtq_handler);
724 if (err) {
725freeout: t4_free_sge_resources(adap);
726 return err;
727 }
728
729 for_each_port(adap, i) {
730 struct net_device *dev = adap->port[i];
731 struct port_info *pi = netdev_priv(dev);
732 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
733 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
734
735 for (j = 0; j < pi->nqsets; j++, q++) {
736 if (msi_idx > 0)
737 msi_idx++;
738 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
739 msi_idx, &q->fl,
740 t4_ethrx_handler);
741 if (err)
742 goto freeout;
743 q->rspq.idx = j;
744 memset(&q->stats, 0, sizeof(q->stats));
745 }
746 for (j = 0; j < pi->nqsets; j++, t++) {
747 err = t4_sge_alloc_eth_txq(adap, t, dev,
748 netdev_get_tx_queue(dev, j),
749 s->fw_evtq.cntxt_id);
750 if (err)
751 goto freeout;
752 }
753 }
754
755 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
756 for_each_ofldrxq(s, i) {
757 struct sge_ofld_rxq *q = &s->ofldrxq[i];
758 struct net_device *dev = adap->port[i / j];
759
760 if (msi_idx > 0)
761 msi_idx++;
762 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
763 &q->fl, uldrx_handler);
764 if (err)
765 goto freeout;
766 memset(&q->stats, 0, sizeof(q->stats));
767 s->ofld_rxq[i] = q->rspq.abs_id;
768 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
769 s->fw_evtq.cntxt_id);
770 if (err)
771 goto freeout;
772 }
773
774 for_each_rdmarxq(s, i) {
775 struct sge_ofld_rxq *q = &s->rdmarxq[i];
776
777 if (msi_idx > 0)
778 msi_idx++;
779 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
780 msi_idx, &q->fl, uldrx_handler);
781 if (err)
782 goto freeout;
783 memset(&q->stats, 0, sizeof(q->stats));
784 s->rdma_rxq[i] = q->rspq.abs_id;
785 }
786
787 for_each_port(adap, i) {
788 /*
789 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
790 * have RDMA queues, and that's the right value.
791 */
792 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
793 s->fw_evtq.cntxt_id,
794 s->rdmarxq[i].rspq.cntxt_id);
795 if (err)
796 goto freeout;
797 }
798
799 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
800 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
801 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
802 return 0;
803}
804
805/*
806 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
807 * started but failed, and a negative errno if flash load couldn't start.
808 */
809static int upgrade_fw(struct adapter *adap)
810{
811 int ret;
812 u32 vers;
813 const struct fw_hdr *hdr;
814 const struct firmware *fw;
815 struct device *dev = adap->pdev_dev;
816
817 ret = request_firmware(&fw, FW_FNAME, dev);
818 if (ret < 0) {
819 dev_err(dev, "unable to load firmware image " FW_FNAME
820 ", error %d\n", ret);
821 return ret;
822 }
823
824 hdr = (const struct fw_hdr *)fw->data;
825 vers = ntohl(hdr->fw_ver);
826 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
827 ret = -EINVAL; /* wrong major version, won't do */
828 goto out;
829 }
830
831 /*
832 * If the flash FW is unusable or we found something newer, load it.
833 */
834 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
835 vers > adap->params.fw_vers) {
836 ret = -t4_load_fw(adap, fw->data, fw->size);
837 if (!ret)
838 dev_info(dev, "firmware upgraded to version %pI4 from "
839 FW_FNAME "\n", &hdr->fw_ver);
840 }
841out: release_firmware(fw);
842 return ret;
843}
844
845/*
846 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
847 * The allocated memory is cleared.
848 */
849void *t4_alloc_mem(size_t size)
850{
851 void *p = kmalloc(size, GFP_KERNEL);
852
853 if (!p)
854 p = vmalloc(size);
855 if (p)
856 memset(p, 0, size);
857 return p;
858}
859
860/*
861 * Free memory allocated through alloc_mem().
862 */
863void t4_free_mem(void *addr)
864{
865 if (is_vmalloc_addr(addr))
866 vfree(addr);
867 else
868 kfree(addr);
869}
870
871static inline int is_offload(const struct adapter *adap)
872{
873 return adap->params.offload;
874}
875
876/*
877 * Implementation of ethtool operations.
878 */
879
880static u32 get_msglevel(struct net_device *dev)
881{
882 return netdev2adap(dev)->msg_enable;
883}
884
885static void set_msglevel(struct net_device *dev, u32 val)
886{
887 netdev2adap(dev)->msg_enable = val;
888}
889
890static char stats_strings[][ETH_GSTRING_LEN] = {
891 "TxOctetsOK ",
892 "TxFramesOK ",
893 "TxBroadcastFrames ",
894 "TxMulticastFrames ",
895 "TxUnicastFrames ",
896 "TxErrorFrames ",
897
898 "TxFrames64 ",
899 "TxFrames65To127 ",
900 "TxFrames128To255 ",
901 "TxFrames256To511 ",
902 "TxFrames512To1023 ",
903 "TxFrames1024To1518 ",
904 "TxFrames1519ToMax ",
905
906 "TxFramesDropped ",
907 "TxPauseFrames ",
908 "TxPPP0Frames ",
909 "TxPPP1Frames ",
910 "TxPPP2Frames ",
911 "TxPPP3Frames ",
912 "TxPPP4Frames ",
913 "TxPPP5Frames ",
914 "TxPPP6Frames ",
915 "TxPPP7Frames ",
916
917 "RxOctetsOK ",
918 "RxFramesOK ",
919 "RxBroadcastFrames ",
920 "RxMulticastFrames ",
921 "RxUnicastFrames ",
922
923 "RxFramesTooLong ",
924 "RxJabberErrors ",
925 "RxFCSErrors ",
926 "RxLengthErrors ",
927 "RxSymbolErrors ",
928 "RxRuntFrames ",
929
930 "RxFrames64 ",
931 "RxFrames65To127 ",
932 "RxFrames128To255 ",
933 "RxFrames256To511 ",
934 "RxFrames512To1023 ",
935 "RxFrames1024To1518 ",
936 "RxFrames1519ToMax ",
937
938 "RxPauseFrames ",
939 "RxPPP0Frames ",
940 "RxPPP1Frames ",
941 "RxPPP2Frames ",
942 "RxPPP3Frames ",
943 "RxPPP4Frames ",
944 "RxPPP5Frames ",
945 "RxPPP6Frames ",
946 "RxPPP7Frames ",
947
948 "RxBG0FramesDropped ",
949 "RxBG1FramesDropped ",
950 "RxBG2FramesDropped ",
951 "RxBG3FramesDropped ",
952 "RxBG0FramesTrunc ",
953 "RxBG1FramesTrunc ",
954 "RxBG2FramesTrunc ",
955 "RxBG3FramesTrunc ",
956
957 "TSO ",
958 "TxCsumOffload ",
959 "RxCsumGood ",
960 "VLANextractions ",
961 "VLANinsertions ",
4a6346d4
DM
962 "GROpackets ",
963 "GROmerged ",
b8ff05a9
DM
964};
965
966static int get_sset_count(struct net_device *dev, int sset)
967{
968 switch (sset) {
969 case ETH_SS_STATS:
970 return ARRAY_SIZE(stats_strings);
971 default:
972 return -EOPNOTSUPP;
973 }
974}
975
976#define T4_REGMAP_SIZE (160 * 1024)
977
978static int get_regs_len(struct net_device *dev)
979{
980 return T4_REGMAP_SIZE;
981}
982
983static int get_eeprom_len(struct net_device *dev)
984{
985 return EEPROMSIZE;
986}
987
988static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
989{
990 struct adapter *adapter = netdev2adap(dev);
991
992 strcpy(info->driver, KBUILD_MODNAME);
993 strcpy(info->version, DRV_VERSION);
994 strcpy(info->bus_info, pci_name(adapter->pdev));
995
996 if (!adapter->params.fw_vers)
997 strcpy(info->fw_version, "N/A");
998 else
999 snprintf(info->fw_version, sizeof(info->fw_version),
1000 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1001 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1002 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1003 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1004 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1005 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1006 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1007 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1008 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1009}
1010
1011static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1012{
1013 if (stringset == ETH_SS_STATS)
1014 memcpy(data, stats_strings, sizeof(stats_strings));
1015}
1016
1017/*
1018 * port stats maintained per queue of the port. They should be in the same
1019 * order as in stats_strings above.
1020 */
1021struct queue_port_stats {
1022 u64 tso;
1023 u64 tx_csum;
1024 u64 rx_csum;
1025 u64 vlan_ex;
1026 u64 vlan_ins;
4a6346d4
DM
1027 u64 gro_pkts;
1028 u64 gro_merged;
b8ff05a9
DM
1029};
1030
1031static void collect_sge_port_stats(const struct adapter *adap,
1032 const struct port_info *p, struct queue_port_stats *s)
1033{
1034 int i;
1035 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1036 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1037
1038 memset(s, 0, sizeof(*s));
1039 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1040 s->tso += tx->tso;
1041 s->tx_csum += tx->tx_cso;
1042 s->rx_csum += rx->stats.rx_cso;
1043 s->vlan_ex += rx->stats.vlan_ex;
1044 s->vlan_ins += tx->vlan_ins;
4a6346d4
DM
1045 s->gro_pkts += rx->stats.lro_pkts;
1046 s->gro_merged += rx->stats.lro_merged;
b8ff05a9
DM
1047 }
1048}
1049
1050static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1051 u64 *data)
1052{
1053 struct port_info *pi = netdev_priv(dev);
1054 struct adapter *adapter = pi->adapter;
1055
1056 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1057
1058 data += sizeof(struct port_stats) / sizeof(u64);
1059 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1060}
1061
1062/*
1063 * Return a version number to identify the type of adapter. The scheme is:
1064 * - bits 0..9: chip version
1065 * - bits 10..15: chip revision
835bb606 1066 * - bits 16..23: register dump version
b8ff05a9
DM
1067 */
1068static inline unsigned int mk_adap_vers(const struct adapter *ap)
1069{
835bb606 1070 return 4 | (ap->params.rev << 10) | (1 << 16);
b8ff05a9
DM
1071}
1072
1073static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1074 unsigned int end)
1075{
1076 u32 *p = buf + start;
1077
1078 for ( ; start <= end; start += sizeof(u32))
1079 *p++ = t4_read_reg(ap, start);
1080}
1081
1082static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1083 void *buf)
1084{
1085 static const unsigned int reg_ranges[] = {
1086 0x1008, 0x1108,
1087 0x1180, 0x11b4,
1088 0x11fc, 0x123c,
1089 0x1300, 0x173c,
1090 0x1800, 0x18fc,
1091 0x3000, 0x30d8,
1092 0x30e0, 0x5924,
1093 0x5960, 0x59d4,
1094 0x5a00, 0x5af8,
1095 0x6000, 0x6098,
1096 0x6100, 0x6150,
1097 0x6200, 0x6208,
1098 0x6240, 0x6248,
1099 0x6280, 0x6338,
1100 0x6370, 0x638c,
1101 0x6400, 0x643c,
1102 0x6500, 0x6524,
1103 0x6a00, 0x6a38,
1104 0x6a60, 0x6a78,
1105 0x6b00, 0x6b84,
1106 0x6bf0, 0x6c84,
1107 0x6cf0, 0x6d84,
1108 0x6df0, 0x6e84,
1109 0x6ef0, 0x6f84,
1110 0x6ff0, 0x7084,
1111 0x70f0, 0x7184,
1112 0x71f0, 0x7284,
1113 0x72f0, 0x7384,
1114 0x73f0, 0x7450,
1115 0x7500, 0x7530,
1116 0x7600, 0x761c,
1117 0x7680, 0x76cc,
1118 0x7700, 0x7798,
1119 0x77c0, 0x77fc,
1120 0x7900, 0x79fc,
1121 0x7b00, 0x7c38,
1122 0x7d00, 0x7efc,
1123 0x8dc0, 0x8e1c,
1124 0x8e30, 0x8e78,
1125 0x8ea0, 0x8f6c,
1126 0x8fc0, 0x9074,
1127 0x90fc, 0x90fc,
1128 0x9400, 0x9458,
1129 0x9600, 0x96bc,
1130 0x9800, 0x9808,
1131 0x9820, 0x983c,
1132 0x9850, 0x9864,
1133 0x9c00, 0x9c6c,
1134 0x9c80, 0x9cec,
1135 0x9d00, 0x9d6c,
1136 0x9d80, 0x9dec,
1137 0x9e00, 0x9e6c,
1138 0x9e80, 0x9eec,
1139 0x9f00, 0x9f6c,
1140 0x9f80, 0x9fec,
1141 0xd004, 0xd03c,
1142 0xdfc0, 0xdfe0,
1143 0xe000, 0xea7c,
1144 0xf000, 0x11190,
835bb606
DM
1145 0x19040, 0x1906c,
1146 0x19078, 0x19080,
1147 0x1908c, 0x19124,
b8ff05a9
DM
1148 0x19150, 0x191b0,
1149 0x191d0, 0x191e8,
1150 0x19238, 0x1924c,
1151 0x193f8, 0x19474,
1152 0x19490, 0x194f8,
1153 0x19800, 0x19f30,
1154 0x1a000, 0x1a06c,
1155 0x1a0b0, 0x1a120,
1156 0x1a128, 0x1a138,
1157 0x1a190, 0x1a1c4,
1158 0x1a1fc, 0x1a1fc,
1159 0x1e040, 0x1e04c,
835bb606 1160 0x1e284, 0x1e28c,
b8ff05a9
DM
1161 0x1e2c0, 0x1e2c0,
1162 0x1e2e0, 0x1e2e0,
1163 0x1e300, 0x1e384,
1164 0x1e3c0, 0x1e3c8,
1165 0x1e440, 0x1e44c,
835bb606 1166 0x1e684, 0x1e68c,
b8ff05a9
DM
1167 0x1e6c0, 0x1e6c0,
1168 0x1e6e0, 0x1e6e0,
1169 0x1e700, 0x1e784,
1170 0x1e7c0, 0x1e7c8,
1171 0x1e840, 0x1e84c,
835bb606 1172 0x1ea84, 0x1ea8c,
b8ff05a9
DM
1173 0x1eac0, 0x1eac0,
1174 0x1eae0, 0x1eae0,
1175 0x1eb00, 0x1eb84,
1176 0x1ebc0, 0x1ebc8,
1177 0x1ec40, 0x1ec4c,
835bb606 1178 0x1ee84, 0x1ee8c,
b8ff05a9
DM
1179 0x1eec0, 0x1eec0,
1180 0x1eee0, 0x1eee0,
1181 0x1ef00, 0x1ef84,
1182 0x1efc0, 0x1efc8,
1183 0x1f040, 0x1f04c,
835bb606 1184 0x1f284, 0x1f28c,
b8ff05a9
DM
1185 0x1f2c0, 0x1f2c0,
1186 0x1f2e0, 0x1f2e0,
1187 0x1f300, 0x1f384,
1188 0x1f3c0, 0x1f3c8,
1189 0x1f440, 0x1f44c,
835bb606 1190 0x1f684, 0x1f68c,
b8ff05a9
DM
1191 0x1f6c0, 0x1f6c0,
1192 0x1f6e0, 0x1f6e0,
1193 0x1f700, 0x1f784,
1194 0x1f7c0, 0x1f7c8,
1195 0x1f840, 0x1f84c,
835bb606 1196 0x1fa84, 0x1fa8c,
b8ff05a9
DM
1197 0x1fac0, 0x1fac0,
1198 0x1fae0, 0x1fae0,
1199 0x1fb00, 0x1fb84,
1200 0x1fbc0, 0x1fbc8,
1201 0x1fc40, 0x1fc4c,
835bb606 1202 0x1fe84, 0x1fe8c,
b8ff05a9
DM
1203 0x1fec0, 0x1fec0,
1204 0x1fee0, 0x1fee0,
1205 0x1ff00, 0x1ff84,
1206 0x1ffc0, 0x1ffc8,
1207 0x20000, 0x2002c,
1208 0x20100, 0x2013c,
1209 0x20190, 0x201c8,
1210 0x20200, 0x20318,
1211 0x20400, 0x20528,
1212 0x20540, 0x20614,
1213 0x21000, 0x21040,
1214 0x2104c, 0x21060,
1215 0x210c0, 0x210ec,
1216 0x21200, 0x21268,
1217 0x21270, 0x21284,
1218 0x212fc, 0x21388,
1219 0x21400, 0x21404,
1220 0x21500, 0x21518,
1221 0x2152c, 0x2153c,
1222 0x21550, 0x21554,
1223 0x21600, 0x21600,
1224 0x21608, 0x21628,
1225 0x21630, 0x2163c,
1226 0x21700, 0x2171c,
1227 0x21780, 0x2178c,
1228 0x21800, 0x21c38,
1229 0x21c80, 0x21d7c,
1230 0x21e00, 0x21e04,
1231 0x22000, 0x2202c,
1232 0x22100, 0x2213c,
1233 0x22190, 0x221c8,
1234 0x22200, 0x22318,
1235 0x22400, 0x22528,
1236 0x22540, 0x22614,
1237 0x23000, 0x23040,
1238 0x2304c, 0x23060,
1239 0x230c0, 0x230ec,
1240 0x23200, 0x23268,
1241 0x23270, 0x23284,
1242 0x232fc, 0x23388,
1243 0x23400, 0x23404,
1244 0x23500, 0x23518,
1245 0x2352c, 0x2353c,
1246 0x23550, 0x23554,
1247 0x23600, 0x23600,
1248 0x23608, 0x23628,
1249 0x23630, 0x2363c,
1250 0x23700, 0x2371c,
1251 0x23780, 0x2378c,
1252 0x23800, 0x23c38,
1253 0x23c80, 0x23d7c,
1254 0x23e00, 0x23e04,
1255 0x24000, 0x2402c,
1256 0x24100, 0x2413c,
1257 0x24190, 0x241c8,
1258 0x24200, 0x24318,
1259 0x24400, 0x24528,
1260 0x24540, 0x24614,
1261 0x25000, 0x25040,
1262 0x2504c, 0x25060,
1263 0x250c0, 0x250ec,
1264 0x25200, 0x25268,
1265 0x25270, 0x25284,
1266 0x252fc, 0x25388,
1267 0x25400, 0x25404,
1268 0x25500, 0x25518,
1269 0x2552c, 0x2553c,
1270 0x25550, 0x25554,
1271 0x25600, 0x25600,
1272 0x25608, 0x25628,
1273 0x25630, 0x2563c,
1274 0x25700, 0x2571c,
1275 0x25780, 0x2578c,
1276 0x25800, 0x25c38,
1277 0x25c80, 0x25d7c,
1278 0x25e00, 0x25e04,
1279 0x26000, 0x2602c,
1280 0x26100, 0x2613c,
1281 0x26190, 0x261c8,
1282 0x26200, 0x26318,
1283 0x26400, 0x26528,
1284 0x26540, 0x26614,
1285 0x27000, 0x27040,
1286 0x2704c, 0x27060,
1287 0x270c0, 0x270ec,
1288 0x27200, 0x27268,
1289 0x27270, 0x27284,
1290 0x272fc, 0x27388,
1291 0x27400, 0x27404,
1292 0x27500, 0x27518,
1293 0x2752c, 0x2753c,
1294 0x27550, 0x27554,
1295 0x27600, 0x27600,
1296 0x27608, 0x27628,
1297 0x27630, 0x2763c,
1298 0x27700, 0x2771c,
1299 0x27780, 0x2778c,
1300 0x27800, 0x27c38,
1301 0x27c80, 0x27d7c,
1302 0x27e00, 0x27e04
1303 };
1304
1305 int i;
1306 struct adapter *ap = netdev2adap(dev);
1307
1308 regs->version = mk_adap_vers(ap);
1309
1310 memset(buf, 0, T4_REGMAP_SIZE);
1311 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1312 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1313}
1314
1315static int restart_autoneg(struct net_device *dev)
1316{
1317 struct port_info *p = netdev_priv(dev);
1318
1319 if (!netif_running(dev))
1320 return -EAGAIN;
1321 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1322 return -EINVAL;
060e0c75 1323 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
b8ff05a9
DM
1324 return 0;
1325}
1326
1327static int identify_port(struct net_device *dev, u32 data)
1328{
060e0c75
DM
1329 struct adapter *adap = netdev2adap(dev);
1330
b8ff05a9
DM
1331 if (data == 0)
1332 data = 2; /* default to 2 seconds */
1333
060e0c75 1334 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid,
b8ff05a9
DM
1335 data * 5);
1336}
1337
1338static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1339{
1340 unsigned int v = 0;
1341
a0881cab
DM
1342 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1343 type == FW_PORT_TYPE_BT_XAUI) {
b8ff05a9
DM
1344 v |= SUPPORTED_TP;
1345 if (caps & FW_PORT_CAP_SPEED_100M)
1346 v |= SUPPORTED_100baseT_Full;
1347 if (caps & FW_PORT_CAP_SPEED_1G)
1348 v |= SUPPORTED_1000baseT_Full;
1349 if (caps & FW_PORT_CAP_SPEED_10G)
1350 v |= SUPPORTED_10000baseT_Full;
1351 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1352 v |= SUPPORTED_Backplane;
1353 if (caps & FW_PORT_CAP_SPEED_1G)
1354 v |= SUPPORTED_1000baseKX_Full;
1355 if (caps & FW_PORT_CAP_SPEED_10G)
1356 v |= SUPPORTED_10000baseKX4_Full;
1357 } else if (type == FW_PORT_TYPE_KR)
1358 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
a0881cab
DM
1359 else if (type == FW_PORT_TYPE_BP_AP)
1360 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC;
1361 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1362 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
b8ff05a9
DM
1363 v |= SUPPORTED_FIBRE;
1364
1365 if (caps & FW_PORT_CAP_ANEG)
1366 v |= SUPPORTED_Autoneg;
1367 return v;
1368}
1369
1370static unsigned int to_fw_linkcaps(unsigned int caps)
1371{
1372 unsigned int v = 0;
1373
1374 if (caps & ADVERTISED_100baseT_Full)
1375 v |= FW_PORT_CAP_SPEED_100M;
1376 if (caps & ADVERTISED_1000baseT_Full)
1377 v |= FW_PORT_CAP_SPEED_1G;
1378 if (caps & ADVERTISED_10000baseT_Full)
1379 v |= FW_PORT_CAP_SPEED_10G;
1380 return v;
1381}
1382
1383static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1384{
1385 const struct port_info *p = netdev_priv(dev);
1386
1387 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
a0881cab 1388 p->port_type == FW_PORT_TYPE_BT_XFI ||
b8ff05a9
DM
1389 p->port_type == FW_PORT_TYPE_BT_XAUI)
1390 cmd->port = PORT_TP;
a0881cab
DM
1391 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1392 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
b8ff05a9 1393 cmd->port = PORT_FIBRE;
a0881cab
DM
1394 else if (p->port_type == FW_PORT_TYPE_SFP) {
1395 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1396 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1397 cmd->port = PORT_DA;
1398 else
1399 cmd->port = PORT_FIBRE;
1400 } else
b8ff05a9
DM
1401 cmd->port = PORT_OTHER;
1402
1403 if (p->mdio_addr >= 0) {
1404 cmd->phy_address = p->mdio_addr;
1405 cmd->transceiver = XCVR_EXTERNAL;
1406 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1407 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1408 } else {
1409 cmd->phy_address = 0; /* not really, but no better option */
1410 cmd->transceiver = XCVR_INTERNAL;
1411 cmd->mdio_support = 0;
1412 }
1413
1414 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1415 cmd->advertising = from_fw_linkcaps(p->port_type,
1416 p->link_cfg.advertising);
1417 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1418 cmd->duplex = DUPLEX_FULL;
1419 cmd->autoneg = p->link_cfg.autoneg;
1420 cmd->maxtxpkt = 0;
1421 cmd->maxrxpkt = 0;
1422 return 0;
1423}
1424
1425static unsigned int speed_to_caps(int speed)
1426{
1427 if (speed == SPEED_100)
1428 return FW_PORT_CAP_SPEED_100M;
1429 if (speed == SPEED_1000)
1430 return FW_PORT_CAP_SPEED_1G;
1431 if (speed == SPEED_10000)
1432 return FW_PORT_CAP_SPEED_10G;
1433 return 0;
1434}
1435
1436static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1437{
1438 unsigned int cap;
1439 struct port_info *p = netdev_priv(dev);
1440 struct link_config *lc = &p->link_cfg;
1441
1442 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1443 return -EINVAL;
1444
1445 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1446 /*
1447 * PHY offers a single speed. See if that's what's
1448 * being requested.
1449 */
1450 if (cmd->autoneg == AUTONEG_DISABLE &&
1451 (lc->supported & speed_to_caps(cmd->speed)))
1452 return 0;
1453 return -EINVAL;
1454 }
1455
1456 if (cmd->autoneg == AUTONEG_DISABLE) {
1457 cap = speed_to_caps(cmd->speed);
1458
1459 if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1460 cmd->speed == SPEED_10000)
1461 return -EINVAL;
1462 lc->requested_speed = cap;
1463 lc->advertising = 0;
1464 } else {
1465 cap = to_fw_linkcaps(cmd->advertising);
1466 if (!(lc->supported & cap))
1467 return -EINVAL;
1468 lc->requested_speed = 0;
1469 lc->advertising = cap | FW_PORT_CAP_ANEG;
1470 }
1471 lc->autoneg = cmd->autoneg;
1472
1473 if (netif_running(dev))
060e0c75
DM
1474 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1475 lc);
b8ff05a9
DM
1476 return 0;
1477}
1478
1479static void get_pauseparam(struct net_device *dev,
1480 struct ethtool_pauseparam *epause)
1481{
1482 struct port_info *p = netdev_priv(dev);
1483
1484 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1485 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1486 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1487}
1488
1489static int set_pauseparam(struct net_device *dev,
1490 struct ethtool_pauseparam *epause)
1491{
1492 struct port_info *p = netdev_priv(dev);
1493 struct link_config *lc = &p->link_cfg;
1494
1495 if (epause->autoneg == AUTONEG_DISABLE)
1496 lc->requested_fc = 0;
1497 else if (lc->supported & FW_PORT_CAP_ANEG)
1498 lc->requested_fc = PAUSE_AUTONEG;
1499 else
1500 return -EINVAL;
1501
1502 if (epause->rx_pause)
1503 lc->requested_fc |= PAUSE_RX;
1504 if (epause->tx_pause)
1505 lc->requested_fc |= PAUSE_TX;
1506 if (netif_running(dev))
060e0c75
DM
1507 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1508 lc);
b8ff05a9
DM
1509 return 0;
1510}
1511
1512static u32 get_rx_csum(struct net_device *dev)
1513{
1514 struct port_info *p = netdev_priv(dev);
1515
1516 return p->rx_offload & RX_CSO;
1517}
1518
1519static int set_rx_csum(struct net_device *dev, u32 data)
1520{
1521 struct port_info *p = netdev_priv(dev);
1522
1523 if (data)
1524 p->rx_offload |= RX_CSO;
1525 else
1526 p->rx_offload &= ~RX_CSO;
1527 return 0;
1528}
1529
1530static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1531{
1532 const struct port_info *pi = netdev_priv(dev);
1533 const struct sge *s = &pi->adapter->sge;
1534
1535 e->rx_max_pending = MAX_RX_BUFFERS;
1536 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1537 e->rx_jumbo_max_pending = 0;
1538 e->tx_max_pending = MAX_TXQ_ENTRIES;
1539
1540 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1541 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1542 e->rx_jumbo_pending = 0;
1543 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1544}
1545
1546static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1547{
1548 int i;
1549 const struct port_info *pi = netdev_priv(dev);
1550 struct adapter *adapter = pi->adapter;
1551 struct sge *s = &adapter->sge;
1552
1553 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1554 e->tx_pending > MAX_TXQ_ENTRIES ||
1555 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1556 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1557 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1558 return -EINVAL;
1559
1560 if (adapter->flags & FULL_INIT_DONE)
1561 return -EBUSY;
1562
1563 for (i = 0; i < pi->nqsets; ++i) {
1564 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1565 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1566 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1567 }
1568 return 0;
1569}
1570
1571static int closest_timer(const struct sge *s, int time)
1572{
1573 int i, delta, match = 0, min_delta = INT_MAX;
1574
1575 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1576 delta = time - s->timer_val[i];
1577 if (delta < 0)
1578 delta = -delta;
1579 if (delta < min_delta) {
1580 min_delta = delta;
1581 match = i;
1582 }
1583 }
1584 return match;
1585}
1586
1587static int closest_thres(const struct sge *s, int thres)
1588{
1589 int i, delta, match = 0, min_delta = INT_MAX;
1590
1591 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1592 delta = thres - s->counter_val[i];
1593 if (delta < 0)
1594 delta = -delta;
1595 if (delta < min_delta) {
1596 min_delta = delta;
1597 match = i;
1598 }
1599 }
1600 return match;
1601}
1602
1603/*
1604 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1605 */
1606static unsigned int qtimer_val(const struct adapter *adap,
1607 const struct sge_rspq *q)
1608{
1609 unsigned int idx = q->intr_params >> 1;
1610
1611 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1612}
1613
1614/**
1615 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1616 * @adap: the adapter
1617 * @q: the Rx queue
1618 * @us: the hold-off time in us, or 0 to disable timer
1619 * @cnt: the hold-off packet count, or 0 to disable counter
1620 *
1621 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1622 * one of the two needs to be enabled for the queue to generate interrupts.
1623 */
1624static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1625 unsigned int us, unsigned int cnt)
1626{
1627 if ((us | cnt) == 0)
1628 cnt = 1;
1629
1630 if (cnt) {
1631 int err;
1632 u32 v, new_idx;
1633
1634 new_idx = closest_thres(&adap->sge, cnt);
1635 if (q->desc && q->pktcnt_idx != new_idx) {
1636 /* the queue has already been created, update it */
1637 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1638 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1639 FW_PARAMS_PARAM_YZ(q->cntxt_id);
060e0c75
DM
1640 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1641 &new_idx);
b8ff05a9
DM
1642 if (err)
1643 return err;
1644 }
1645 q->pktcnt_idx = new_idx;
1646 }
1647
1648 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1649 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1650 return 0;
1651}
1652
1653static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1654{
1655 const struct port_info *pi = netdev_priv(dev);
1656 struct adapter *adap = pi->adapter;
1657
1658 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1659 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1660}
1661
1662static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1663{
1664 const struct port_info *pi = netdev_priv(dev);
1665 const struct adapter *adap = pi->adapter;
1666 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1667
1668 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1669 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1670 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1671 return 0;
1672}
1673
1674/*
1675 * Translate a physical EEPROM address to virtual. The first 1K is accessed
1676 * through virtual addresses starting at 31K, the rest is accessed through
1677 * virtual addresses starting at 0. This mapping is correct only for PF0.
1678 */
1679static int eeprom_ptov(unsigned int phys_addr)
1680{
1681 if (phys_addr < 1024)
1682 return phys_addr + (31 << 10);
1683 if (phys_addr < EEPROMSIZE)
1684 return phys_addr - 1024;
1685 return -EINVAL;
1686}
1687
1688/*
1689 * The next two routines implement eeprom read/write from physical addresses.
1690 * The physical->virtual translation is correct only for PF0.
1691 */
1692static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1693{
1694 int vaddr = eeprom_ptov(phys_addr);
1695
1696 if (vaddr >= 0)
1697 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1698 return vaddr < 0 ? vaddr : 0;
1699}
1700
1701static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1702{
1703 int vaddr = eeprom_ptov(phys_addr);
1704
1705 if (vaddr >= 0)
1706 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1707 return vaddr < 0 ? vaddr : 0;
1708}
1709
1710#define EEPROM_MAGIC 0x38E2F10C
1711
1712static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1713 u8 *data)
1714{
1715 int i, err = 0;
1716 struct adapter *adapter = netdev2adap(dev);
1717
1718 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1719 if (!buf)
1720 return -ENOMEM;
1721
1722 e->magic = EEPROM_MAGIC;
1723 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1724 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1725
1726 if (!err)
1727 memcpy(data, buf + e->offset, e->len);
1728 kfree(buf);
1729 return err;
1730}
1731
1732static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1733 u8 *data)
1734{
1735 u8 *buf;
1736 int err = 0;
1737 u32 aligned_offset, aligned_len, *p;
1738 struct adapter *adapter = netdev2adap(dev);
1739
1740 if (eeprom->magic != EEPROM_MAGIC)
1741 return -EINVAL;
1742
1743 aligned_offset = eeprom->offset & ~3;
1744 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1745
1746 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1747 /*
1748 * RMW possibly needed for first or last words.
1749 */
1750 buf = kmalloc(aligned_len, GFP_KERNEL);
1751 if (!buf)
1752 return -ENOMEM;
1753 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1754 if (!err && aligned_len > 4)
1755 err = eeprom_rd_phys(adapter,
1756 aligned_offset + aligned_len - 4,
1757 (u32 *)&buf[aligned_len - 4]);
1758 if (err)
1759 goto out;
1760 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1761 } else
1762 buf = data;
1763
1764 err = t4_seeprom_wp(adapter, false);
1765 if (err)
1766 goto out;
1767
1768 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1769 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1770 aligned_offset += 4;
1771 }
1772
1773 if (!err)
1774 err = t4_seeprom_wp(adapter, true);
1775out:
1776 if (buf != data)
1777 kfree(buf);
1778 return err;
1779}
1780
1781static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1782{
1783 int ret;
1784 const struct firmware *fw;
1785 struct adapter *adap = netdev2adap(netdev);
1786
1787 ef->data[sizeof(ef->data) - 1] = '\0';
1788 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1789 if (ret < 0)
1790 return ret;
1791
1792 ret = t4_load_fw(adap, fw->data, fw->size);
1793 release_firmware(fw);
1794 if (!ret)
1795 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1796 return ret;
1797}
1798
1799#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1800#define BCAST_CRC 0xa0ccc1a6
1801
1802static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1803{
1804 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1805 wol->wolopts = netdev2adap(dev)->wol;
1806 memset(&wol->sopass, 0, sizeof(wol->sopass));
1807}
1808
1809static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1810{
1811 int err = 0;
1812 struct port_info *pi = netdev_priv(dev);
1813
1814 if (wol->wolopts & ~WOL_SUPPORTED)
1815 return -EINVAL;
1816 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1817 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1818 if (wol->wolopts & WAKE_BCAST) {
1819 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1820 ~0ULL, 0, false);
1821 if (!err)
1822 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1823 ~6ULL, ~0ULL, BCAST_CRC, true);
1824 } else
1825 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1826 return err;
1827}
1828
35d35682
DM
1829#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1830
b8ff05a9
DM
1831static int set_tso(struct net_device *dev, u32 value)
1832{
1833 if (value)
35d35682 1834 dev->features |= TSO_FLAGS;
b8ff05a9 1835 else
35d35682 1836 dev->features &= ~TSO_FLAGS;
b8ff05a9
DM
1837 return 0;
1838}
1839
87b6cf51
DM
1840static int set_flags(struct net_device *dev, u32 flags)
1841{
1437ce39 1842 return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
87b6cf51
DM
1843}
1844
671b0060
DM
1845static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
1846{
1847 const struct port_info *pi = netdev_priv(dev);
1848 unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
1849
1850 p->size = pi->rss_size;
1851 while (n--)
1852 p->ring_index[n] = pi->rss[n];
1853 return 0;
1854}
1855
1856static int set_rss_table(struct net_device *dev,
1857 const struct ethtool_rxfh_indir *p)
1858{
1859 unsigned int i;
1860 struct port_info *pi = netdev_priv(dev);
1861
1862 if (p->size != pi->rss_size)
1863 return -EINVAL;
1864 for (i = 0; i < p->size; i++)
1865 if (p->ring_index[i] >= pi->nqsets)
1866 return -EINVAL;
1867 for (i = 0; i < p->size; i++)
1868 pi->rss[i] = p->ring_index[i];
1869 if (pi->adapter->flags & FULL_INIT_DONE)
1870 return write_rss(pi, pi->rss);
1871 return 0;
1872}
1873
1874static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1875 void *rules)
1876{
f796564a
DM
1877 const struct port_info *pi = netdev_priv(dev);
1878
671b0060 1879 switch (info->cmd) {
f796564a
DM
1880 case ETHTOOL_GRXFH: {
1881 unsigned int v = pi->rss_mode;
1882
1883 info->data = 0;
1884 switch (info->flow_type) {
1885 case TCP_V4_FLOW:
1886 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
1887 info->data = RXH_IP_SRC | RXH_IP_DST |
1888 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1889 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1890 info->data = RXH_IP_SRC | RXH_IP_DST;
1891 break;
1892 case UDP_V4_FLOW:
1893 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
1894 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1895 info->data = RXH_IP_SRC | RXH_IP_DST |
1896 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1897 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1898 info->data = RXH_IP_SRC | RXH_IP_DST;
1899 break;
1900 case SCTP_V4_FLOW:
1901 case AH_ESP_V4_FLOW:
1902 case IPV4_FLOW:
1903 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1904 info->data = RXH_IP_SRC | RXH_IP_DST;
1905 break;
1906 case TCP_V6_FLOW:
1907 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
1908 info->data = RXH_IP_SRC | RXH_IP_DST |
1909 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1910 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1911 info->data = RXH_IP_SRC | RXH_IP_DST;
1912 break;
1913 case UDP_V6_FLOW:
1914 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
1915 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1916 info->data = RXH_IP_SRC | RXH_IP_DST |
1917 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1918 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1919 info->data = RXH_IP_SRC | RXH_IP_DST;
1920 break;
1921 case SCTP_V6_FLOW:
1922 case AH_ESP_V6_FLOW:
1923 case IPV6_FLOW:
1924 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1925 info->data = RXH_IP_SRC | RXH_IP_DST;
1926 break;
1927 }
1928 return 0;
1929 }
671b0060 1930 case ETHTOOL_GRXRINGS:
f796564a 1931 info->data = pi->nqsets;
671b0060
DM
1932 return 0;
1933 }
1934 return -EOPNOTSUPP;
1935}
1936
b8ff05a9
DM
1937static struct ethtool_ops cxgb_ethtool_ops = {
1938 .get_settings = get_settings,
1939 .set_settings = set_settings,
1940 .get_drvinfo = get_drvinfo,
1941 .get_msglevel = get_msglevel,
1942 .set_msglevel = set_msglevel,
1943 .get_ringparam = get_sge_param,
1944 .set_ringparam = set_sge_param,
1945 .get_coalesce = get_coalesce,
1946 .set_coalesce = set_coalesce,
1947 .get_eeprom_len = get_eeprom_len,
1948 .get_eeprom = get_eeprom,
1949 .set_eeprom = set_eeprom,
1950 .get_pauseparam = get_pauseparam,
1951 .set_pauseparam = set_pauseparam,
1952 .get_rx_csum = get_rx_csum,
1953 .set_rx_csum = set_rx_csum,
1954 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1955 .set_sg = ethtool_op_set_sg,
1956 .get_link = ethtool_op_get_link,
1957 .get_strings = get_strings,
1958 .phys_id = identify_port,
1959 .nway_reset = restart_autoneg,
1960 .get_sset_count = get_sset_count,
1961 .get_ethtool_stats = get_stats,
1962 .get_regs_len = get_regs_len,
1963 .get_regs = get_regs,
1964 .get_wol = get_wol,
1965 .set_wol = set_wol,
1966 .set_tso = set_tso,
87b6cf51 1967 .set_flags = set_flags,
671b0060
DM
1968 .get_rxnfc = get_rxnfc,
1969 .get_rxfh_indir = get_rss_table,
1970 .set_rxfh_indir = set_rss_table,
b8ff05a9
DM
1971 .flash_device = set_flash,
1972};
1973
1974/*
1975 * debugfs support
1976 */
1977
1978static int mem_open(struct inode *inode, struct file *file)
1979{
1980 file->private_data = inode->i_private;
1981 return 0;
1982}
1983
1984static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
1985 loff_t *ppos)
1986{
1987 loff_t pos = *ppos;
1988 loff_t avail = file->f_path.dentry->d_inode->i_size;
1989 unsigned int mem = (uintptr_t)file->private_data & 3;
1990 struct adapter *adap = file->private_data - mem;
1991
1992 if (pos < 0)
1993 return -EINVAL;
1994 if (pos >= avail)
1995 return 0;
1996 if (count > avail - pos)
1997 count = avail - pos;
1998
1999 while (count) {
2000 size_t len;
2001 int ret, ofst;
2002 __be32 data[16];
2003
2004 if (mem == MEM_MC)
2005 ret = t4_mc_read(adap, pos, data, NULL);
2006 else
2007 ret = t4_edc_read(adap, mem, pos, data, NULL);
2008 if (ret)
2009 return ret;
2010
2011 ofst = pos % sizeof(data);
2012 len = min(count, sizeof(data) - ofst);
2013 if (copy_to_user(buf, (u8 *)data + ofst, len))
2014 return -EFAULT;
2015
2016 buf += len;
2017 pos += len;
2018 count -= len;
2019 }
2020 count = pos - *ppos;
2021 *ppos = pos;
2022 return count;
2023}
2024
2025static const struct file_operations mem_debugfs_fops = {
2026 .owner = THIS_MODULE,
2027 .open = mem_open,
2028 .read = mem_read,
6038f373 2029 .llseek = default_llseek,
b8ff05a9
DM
2030};
2031
2032static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2033 unsigned int idx, unsigned int size_mb)
2034{
2035 struct dentry *de;
2036
2037 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2038 (void *)adap + idx, &mem_debugfs_fops);
2039 if (de && de->d_inode)
2040 de->d_inode->i_size = size_mb << 20;
2041}
2042
2043static int __devinit setup_debugfs(struct adapter *adap)
2044{
2045 int i;
2046
2047 if (IS_ERR_OR_NULL(adap->debugfs_root))
2048 return -1;
2049
2050 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2051 if (i & EDRAM0_ENABLE)
2052 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2053 if (i & EDRAM1_ENABLE)
2054 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2055 if (i & EXT_MEM_ENABLE)
2056 add_debugfs_mem(adap, "mc", MEM_MC,
2057 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2058 if (adap->l2t)
2059 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2060 &t4_l2t_fops);
2061 return 0;
2062}
2063
2064/*
2065 * upper-layer driver support
2066 */
2067
2068/*
2069 * Allocate an active-open TID and set it to the supplied value.
2070 */
2071int cxgb4_alloc_atid(struct tid_info *t, void *data)
2072{
2073 int atid = -1;
2074
2075 spin_lock_bh(&t->atid_lock);
2076 if (t->afree) {
2077 union aopen_entry *p = t->afree;
2078
2079 atid = p - t->atid_tab;
2080 t->afree = p->next;
2081 p->data = data;
2082 t->atids_in_use++;
2083 }
2084 spin_unlock_bh(&t->atid_lock);
2085 return atid;
2086}
2087EXPORT_SYMBOL(cxgb4_alloc_atid);
2088
2089/*
2090 * Release an active-open TID.
2091 */
2092void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2093{
2094 union aopen_entry *p = &t->atid_tab[atid];
2095
2096 spin_lock_bh(&t->atid_lock);
2097 p->next = t->afree;
2098 t->afree = p;
2099 t->atids_in_use--;
2100 spin_unlock_bh(&t->atid_lock);
2101}
2102EXPORT_SYMBOL(cxgb4_free_atid);
2103
2104/*
2105 * Allocate a server TID and set it to the supplied value.
2106 */
2107int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2108{
2109 int stid;
2110
2111 spin_lock_bh(&t->stid_lock);
2112 if (family == PF_INET) {
2113 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2114 if (stid < t->nstids)
2115 __set_bit(stid, t->stid_bmap);
2116 else
2117 stid = -1;
2118 } else {
2119 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2120 if (stid < 0)
2121 stid = -1;
2122 }
2123 if (stid >= 0) {
2124 t->stid_tab[stid].data = data;
2125 stid += t->stid_base;
2126 t->stids_in_use++;
2127 }
2128 spin_unlock_bh(&t->stid_lock);
2129 return stid;
2130}
2131EXPORT_SYMBOL(cxgb4_alloc_stid);
2132
2133/*
2134 * Release a server TID.
2135 */
2136void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2137{
2138 stid -= t->stid_base;
2139 spin_lock_bh(&t->stid_lock);
2140 if (family == PF_INET)
2141 __clear_bit(stid, t->stid_bmap);
2142 else
2143 bitmap_release_region(t->stid_bmap, stid, 2);
2144 t->stid_tab[stid].data = NULL;
2145 t->stids_in_use--;
2146 spin_unlock_bh(&t->stid_lock);
2147}
2148EXPORT_SYMBOL(cxgb4_free_stid);
2149
2150/*
2151 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2152 */
2153static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2154 unsigned int tid)
2155{
2156 struct cpl_tid_release *req;
2157
2158 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2159 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2160 INIT_TP_WR(req, tid);
2161 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2162}
2163
2164/*
2165 * Queue a TID release request and if necessary schedule a work queue to
2166 * process it.
2167 */
2168void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2169 unsigned int tid)
2170{
2171 void **p = &t->tid_tab[tid];
2172 struct adapter *adap = container_of(t, struct adapter, tids);
2173
2174 spin_lock_bh(&adap->tid_release_lock);
2175 *p = adap->tid_release_head;
2176 /* Low 2 bits encode the Tx channel number */
2177 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2178 if (!adap->tid_release_task_busy) {
2179 adap->tid_release_task_busy = true;
2180 schedule_work(&adap->tid_release_task);
2181 }
2182 spin_unlock_bh(&adap->tid_release_lock);
2183}
2184EXPORT_SYMBOL(cxgb4_queue_tid_release);
2185
2186/*
2187 * Process the list of pending TID release requests.
2188 */
2189static void process_tid_release_list(struct work_struct *work)
2190{
2191 struct sk_buff *skb;
2192 struct adapter *adap;
2193
2194 adap = container_of(work, struct adapter, tid_release_task);
2195
2196 spin_lock_bh(&adap->tid_release_lock);
2197 while (adap->tid_release_head) {
2198 void **p = adap->tid_release_head;
2199 unsigned int chan = (uintptr_t)p & 3;
2200 p = (void *)p - chan;
2201
2202 adap->tid_release_head = *p;
2203 *p = NULL;
2204 spin_unlock_bh(&adap->tid_release_lock);
2205
2206 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2207 GFP_KERNEL)))
2208 schedule_timeout_uninterruptible(1);
2209
2210 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2211 t4_ofld_send(adap, skb);
2212 spin_lock_bh(&adap->tid_release_lock);
2213 }
2214 adap->tid_release_task_busy = false;
2215 spin_unlock_bh(&adap->tid_release_lock);
2216}
2217
2218/*
2219 * Release a TID and inform HW. If we are unable to allocate the release
2220 * message we defer to a work queue.
2221 */
2222void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2223{
2224 void *old;
2225 struct sk_buff *skb;
2226 struct adapter *adap = container_of(t, struct adapter, tids);
2227
2228 old = t->tid_tab[tid];
2229 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2230 if (likely(skb)) {
2231 t->tid_tab[tid] = NULL;
2232 mk_tid_release(skb, chan, tid);
2233 t4_ofld_send(adap, skb);
2234 } else
2235 cxgb4_queue_tid_release(t, chan, tid);
2236 if (old)
2237 atomic_dec(&t->tids_in_use);
2238}
2239EXPORT_SYMBOL(cxgb4_remove_tid);
2240
2241/*
2242 * Allocate and initialize the TID tables. Returns 0 on success.
2243 */
2244static int tid_init(struct tid_info *t)
2245{
2246 size_t size;
2247 unsigned int natids = t->natids;
2248
2249 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2250 t->nstids * sizeof(*t->stid_tab) +
2251 BITS_TO_LONGS(t->nstids) * sizeof(long);
2252 t->tid_tab = t4_alloc_mem(size);
2253 if (!t->tid_tab)
2254 return -ENOMEM;
2255
2256 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2257 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2258 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2259 spin_lock_init(&t->stid_lock);
2260 spin_lock_init(&t->atid_lock);
2261
2262 t->stids_in_use = 0;
2263 t->afree = NULL;
2264 t->atids_in_use = 0;
2265 atomic_set(&t->tids_in_use, 0);
2266
2267 /* Setup the free list for atid_tab and clear the stid bitmap. */
2268 if (natids) {
2269 while (--natids)
2270 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2271 t->afree = t->atid_tab;
2272 }
2273 bitmap_zero(t->stid_bmap, t->nstids);
2274 return 0;
2275}
2276
2277/**
2278 * cxgb4_create_server - create an IP server
2279 * @dev: the device
2280 * @stid: the server TID
2281 * @sip: local IP address to bind server to
2282 * @sport: the server's TCP port
2283 * @queue: queue to direct messages from this server to
2284 *
2285 * Create an IP server for the given port and address.
2286 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2287 */
2288int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2289 __be32 sip, __be16 sport, unsigned int queue)
2290{
2291 unsigned int chan;
2292 struct sk_buff *skb;
2293 struct adapter *adap;
2294 struct cpl_pass_open_req *req;
2295
2296 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2297 if (!skb)
2298 return -ENOMEM;
2299
2300 adap = netdev2adap(dev);
2301 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2302 INIT_TP_WR(req, 0);
2303 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2304 req->local_port = sport;
2305 req->peer_port = htons(0);
2306 req->local_ip = sip;
2307 req->peer_ip = htonl(0);
2308 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2309 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2310 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2311 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2312 return t4_mgmt_tx(adap, skb);
2313}
2314EXPORT_SYMBOL(cxgb4_create_server);
2315
2316/**
2317 * cxgb4_create_server6 - create an IPv6 server
2318 * @dev: the device
2319 * @stid: the server TID
2320 * @sip: local IPv6 address to bind server to
2321 * @sport: the server's TCP port
2322 * @queue: queue to direct messages from this server to
2323 *
2324 * Create an IPv6 server for the given port and address.
2325 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2326 */
2327int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2328 const struct in6_addr *sip, __be16 sport,
2329 unsigned int queue)
2330{
2331 unsigned int chan;
2332 struct sk_buff *skb;
2333 struct adapter *adap;
2334 struct cpl_pass_open_req6 *req;
2335
2336 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2337 if (!skb)
2338 return -ENOMEM;
2339
2340 adap = netdev2adap(dev);
2341 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
2342 INIT_TP_WR(req, 0);
2343 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
2344 req->local_port = sport;
2345 req->peer_port = htons(0);
2346 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
2347 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2348 req->peer_ip_hi = cpu_to_be64(0);
2349 req->peer_ip_lo = cpu_to_be64(0);
2350 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2351 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2352 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2353 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2354 return t4_mgmt_tx(adap, skb);
2355}
2356EXPORT_SYMBOL(cxgb4_create_server6);
2357
2358/**
2359 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2360 * @mtus: the HW MTU table
2361 * @mtu: the target MTU
2362 * @idx: index of selected entry in the MTU table
2363 *
2364 * Returns the index and the value in the HW MTU table that is closest to
2365 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2366 * table, in which case that smallest available value is selected.
2367 */
2368unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2369 unsigned int *idx)
2370{
2371 unsigned int i = 0;
2372
2373 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2374 ++i;
2375 if (idx)
2376 *idx = i;
2377 return mtus[i];
2378}
2379EXPORT_SYMBOL(cxgb4_best_mtu);
2380
2381/**
2382 * cxgb4_port_chan - get the HW channel of a port
2383 * @dev: the net device for the port
2384 *
2385 * Return the HW Tx channel of the given port.
2386 */
2387unsigned int cxgb4_port_chan(const struct net_device *dev)
2388{
2389 return netdev2pinfo(dev)->tx_chan;
2390}
2391EXPORT_SYMBOL(cxgb4_port_chan);
2392
2393/**
2394 * cxgb4_port_viid - get the VI id of a port
2395 * @dev: the net device for the port
2396 *
2397 * Return the VI id of the given port.
2398 */
2399unsigned int cxgb4_port_viid(const struct net_device *dev)
2400{
2401 return netdev2pinfo(dev)->viid;
2402}
2403EXPORT_SYMBOL(cxgb4_port_viid);
2404
2405/**
2406 * cxgb4_port_idx - get the index of a port
2407 * @dev: the net device for the port
2408 *
2409 * Return the index of the given port.
2410 */
2411unsigned int cxgb4_port_idx(const struct net_device *dev)
2412{
2413 return netdev2pinfo(dev)->port_id;
2414}
2415EXPORT_SYMBOL(cxgb4_port_idx);
2416
2417/**
2418 * cxgb4_netdev_by_hwid - return the net device of a HW port
2419 * @pdev: identifies the adapter
2420 * @id: the HW port id
2421 *
2422 * Return the net device associated with the interface with the given HW
2423 * id.
2424 */
2425struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
2426{
2427 const struct adapter *adap = pci_get_drvdata(pdev);
2428
2429 if (!adap || id >= NCHAN)
2430 return NULL;
2431 id = adap->chan_map[id];
2432 return id < MAX_NPORTS ? adap->port[id] : NULL;
2433}
2434EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
2435
2436void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2437 struct tp_tcp_stats *v6)
2438{
2439 struct adapter *adap = pci_get_drvdata(pdev);
2440
2441 spin_lock(&adap->stats_lock);
2442 t4_tp_get_tcp_stats(adap, v4, v6);
2443 spin_unlock(&adap->stats_lock);
2444}
2445EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2446
2447void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2448 const unsigned int *pgsz_order)
2449{
2450 struct adapter *adap = netdev2adap(dev);
2451
2452 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2453 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2454 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2455 HPZ3(pgsz_order[3]));
2456}
2457EXPORT_SYMBOL(cxgb4_iscsi_init);
2458
2459static struct pci_driver cxgb4_driver;
2460
2461static void check_neigh_update(struct neighbour *neigh)
2462{
2463 const struct device *parent;
2464 const struct net_device *netdev = neigh->dev;
2465
2466 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2467 netdev = vlan_dev_real_dev(netdev);
2468 parent = netdev->dev.parent;
2469 if (parent && parent->driver == &cxgb4_driver.driver)
2470 t4_l2t_update(dev_get_drvdata(parent), neigh);
2471}
2472
2473static int netevent_cb(struct notifier_block *nb, unsigned long event,
2474 void *data)
2475{
2476 switch (event) {
2477 case NETEVENT_NEIGH_UPDATE:
2478 check_neigh_update(data);
2479 break;
2480 case NETEVENT_PMTU_UPDATE:
2481 case NETEVENT_REDIRECT:
2482 default:
2483 break;
2484 }
2485 return 0;
2486}
2487
2488static bool netevent_registered;
2489static struct notifier_block cxgb4_netevent_nb = {
2490 .notifier_call = netevent_cb
2491};
2492
2493static void uld_attach(struct adapter *adap, unsigned int uld)
2494{
2495 void *handle;
2496 struct cxgb4_lld_info lli;
2497
2498 lli.pdev = adap->pdev;
2499 lli.l2t = adap->l2t;
2500 lli.tids = &adap->tids;
2501 lli.ports = adap->port;
2502 lli.vr = &adap->vres;
2503 lli.mtus = adap->params.mtus;
2504 if (uld == CXGB4_ULD_RDMA) {
2505 lli.rxq_ids = adap->sge.rdma_rxq;
2506 lli.nrxq = adap->sge.rdmaqs;
2507 } else if (uld == CXGB4_ULD_ISCSI) {
2508 lli.rxq_ids = adap->sge.ofld_rxq;
2509 lli.nrxq = adap->sge.ofldqsets;
2510 }
2511 lli.ntxq = adap->sge.ofldqsets;
2512 lli.nchan = adap->params.nports;
2513 lli.nports = adap->params.nports;
2514 lli.wr_cred = adap->params.ofldq_wr_cred;
2515 lli.adapter_type = adap->params.rev;
2516 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2517 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
2518 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2519 (adap->fn * 4));
b8ff05a9 2520 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
2521 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2522 (adap->fn * 4));
b8ff05a9
DM
2523 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2524 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2525 lli.fw_vers = adap->params.fw_vers;
2526
2527 handle = ulds[uld].add(&lli);
2528 if (IS_ERR(handle)) {
2529 dev_warn(adap->pdev_dev,
2530 "could not attach to the %s driver, error %ld\n",
2531 uld_str[uld], PTR_ERR(handle));
2532 return;
2533 }
2534
2535 adap->uld_handle[uld] = handle;
2536
2537 if (!netevent_registered) {
2538 register_netevent_notifier(&cxgb4_netevent_nb);
2539 netevent_registered = true;
2540 }
e29f5dbc
DM
2541
2542 if (adap->flags & FULL_INIT_DONE)
2543 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
2544}
2545
2546static void attach_ulds(struct adapter *adap)
2547{
2548 unsigned int i;
2549
2550 mutex_lock(&uld_mutex);
2551 list_add_tail(&adap->list_node, &adapter_list);
2552 for (i = 0; i < CXGB4_ULD_MAX; i++)
2553 if (ulds[i].add)
2554 uld_attach(adap, i);
2555 mutex_unlock(&uld_mutex);
2556}
2557
2558static void detach_ulds(struct adapter *adap)
2559{
2560 unsigned int i;
2561
2562 mutex_lock(&uld_mutex);
2563 list_del(&adap->list_node);
2564 for (i = 0; i < CXGB4_ULD_MAX; i++)
2565 if (adap->uld_handle[i]) {
2566 ulds[i].state_change(adap->uld_handle[i],
2567 CXGB4_STATE_DETACH);
2568 adap->uld_handle[i] = NULL;
2569 }
2570 if (netevent_registered && list_empty(&adapter_list)) {
2571 unregister_netevent_notifier(&cxgb4_netevent_nb);
2572 netevent_registered = false;
2573 }
2574 mutex_unlock(&uld_mutex);
2575}
2576
2577static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2578{
2579 unsigned int i;
2580
2581 mutex_lock(&uld_mutex);
2582 for (i = 0; i < CXGB4_ULD_MAX; i++)
2583 if (adap->uld_handle[i])
2584 ulds[i].state_change(adap->uld_handle[i], new_state);
2585 mutex_unlock(&uld_mutex);
2586}
2587
2588/**
2589 * cxgb4_register_uld - register an upper-layer driver
2590 * @type: the ULD type
2591 * @p: the ULD methods
2592 *
2593 * Registers an upper-layer driver with this driver and notifies the ULD
2594 * about any presently available devices that support its type. Returns
2595 * %-EBUSY if a ULD of the same type is already registered.
2596 */
2597int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2598{
2599 int ret = 0;
2600 struct adapter *adap;
2601
2602 if (type >= CXGB4_ULD_MAX)
2603 return -EINVAL;
2604 mutex_lock(&uld_mutex);
2605 if (ulds[type].add) {
2606 ret = -EBUSY;
2607 goto out;
2608 }
2609 ulds[type] = *p;
2610 list_for_each_entry(adap, &adapter_list, list_node)
2611 uld_attach(adap, type);
2612out: mutex_unlock(&uld_mutex);
2613 return ret;
2614}
2615EXPORT_SYMBOL(cxgb4_register_uld);
2616
2617/**
2618 * cxgb4_unregister_uld - unregister an upper-layer driver
2619 * @type: the ULD type
2620 *
2621 * Unregisters an existing upper-layer driver.
2622 */
2623int cxgb4_unregister_uld(enum cxgb4_uld type)
2624{
2625 struct adapter *adap;
2626
2627 if (type >= CXGB4_ULD_MAX)
2628 return -EINVAL;
2629 mutex_lock(&uld_mutex);
2630 list_for_each_entry(adap, &adapter_list, list_node)
2631 adap->uld_handle[type] = NULL;
2632 ulds[type].add = NULL;
2633 mutex_unlock(&uld_mutex);
2634 return 0;
2635}
2636EXPORT_SYMBOL(cxgb4_unregister_uld);
2637
2638/**
2639 * cxgb_up - enable the adapter
2640 * @adap: adapter being enabled
2641 *
2642 * Called when the first port is enabled, this function performs the
2643 * actions necessary to make an adapter operational, such as completing
2644 * the initialization of HW modules, and enabling interrupts.
2645 *
2646 * Must be called with the rtnl lock held.
2647 */
2648static int cxgb_up(struct adapter *adap)
2649{
aaefae9b 2650 int err;
b8ff05a9 2651
aaefae9b
DM
2652 err = setup_sge_queues(adap);
2653 if (err)
2654 goto out;
2655 err = setup_rss(adap);
2656 if (err)
2657 goto freeq;
b8ff05a9
DM
2658
2659 if (adap->flags & USING_MSIX) {
aaefae9b 2660 name_msix_vecs(adap);
b8ff05a9
DM
2661 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2662 adap->msix_info[0].desc, adap);
2663 if (err)
2664 goto irq_err;
2665
2666 err = request_msix_queue_irqs(adap);
2667 if (err) {
2668 free_irq(adap->msix_info[0].vec, adap);
2669 goto irq_err;
2670 }
2671 } else {
2672 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2673 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2674 adap->name, adap);
2675 if (err)
2676 goto irq_err;
2677 }
2678 enable_rx(adap);
2679 t4_sge_start(adap);
2680 t4_intr_enable(adap);
aaefae9b 2681 adap->flags |= FULL_INIT_DONE;
b8ff05a9
DM
2682 notify_ulds(adap, CXGB4_STATE_UP);
2683 out:
2684 return err;
2685 irq_err:
2686 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
2687 freeq:
2688 t4_free_sge_resources(adap);
b8ff05a9
DM
2689 goto out;
2690}
2691
2692static void cxgb_down(struct adapter *adapter)
2693{
2694 t4_intr_disable(adapter);
2695 cancel_work_sync(&adapter->tid_release_task);
2696 adapter->tid_release_task_busy = false;
204dc3c0 2697 adapter->tid_release_head = NULL;
b8ff05a9
DM
2698
2699 if (adapter->flags & USING_MSIX) {
2700 free_msix_queue_irqs(adapter);
2701 free_irq(adapter->msix_info[0].vec, adapter);
2702 } else
2703 free_irq(adapter->pdev->irq, adapter);
2704 quiesce_rx(adapter);
aaefae9b
DM
2705 t4_sge_stop(adapter);
2706 t4_free_sge_resources(adapter);
2707 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
2708}
2709
2710/*
2711 * net_device operations
2712 */
2713static int cxgb_open(struct net_device *dev)
2714{
2715 int err;
2716 struct port_info *pi = netdev_priv(dev);
2717 struct adapter *adapter = pi->adapter;
2718
aaefae9b
DM
2719 if (!(adapter->flags & FULL_INIT_DONE)) {
2720 err = cxgb_up(adapter);
2721 if (err < 0)
2722 return err;
2723 }
b8ff05a9
DM
2724
2725 dev->real_num_tx_queues = pi->nqsets;
f68707b8
DM
2726 err = link_start(dev);
2727 if (!err)
2728 netif_tx_start_all_queues(dev);
2729 return err;
b8ff05a9
DM
2730}
2731
2732static int cxgb_close(struct net_device *dev)
2733{
b8ff05a9
DM
2734 struct port_info *pi = netdev_priv(dev);
2735 struct adapter *adapter = pi->adapter;
2736
2737 netif_tx_stop_all_queues(dev);
2738 netif_carrier_off(dev);
060e0c75 2739 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
b8ff05a9
DM
2740}
2741
f5152c90
DM
2742static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2743 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
2744{
2745 struct port_stats stats;
2746 struct port_info *p = netdev_priv(dev);
2747 struct adapter *adapter = p->adapter;
b8ff05a9
DM
2748
2749 spin_lock(&adapter->stats_lock);
2750 t4_get_port_stats(adapter, p->tx_chan, &stats);
2751 spin_unlock(&adapter->stats_lock);
2752
2753 ns->tx_bytes = stats.tx_octets;
2754 ns->tx_packets = stats.tx_frames;
2755 ns->rx_bytes = stats.rx_octets;
2756 ns->rx_packets = stats.rx_frames;
2757 ns->multicast = stats.rx_mcast_frames;
2758
2759 /* detailed rx_errors */
2760 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2761 stats.rx_runt;
2762 ns->rx_over_errors = 0;
2763 ns->rx_crc_errors = stats.rx_fcs_err;
2764 ns->rx_frame_errors = stats.rx_symbol_err;
2765 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2766 stats.rx_ovflow2 + stats.rx_ovflow3 +
2767 stats.rx_trunc0 + stats.rx_trunc1 +
2768 stats.rx_trunc2 + stats.rx_trunc3;
2769 ns->rx_missed_errors = 0;
2770
2771 /* detailed tx_errors */
2772 ns->tx_aborted_errors = 0;
2773 ns->tx_carrier_errors = 0;
2774 ns->tx_fifo_errors = 0;
2775 ns->tx_heartbeat_errors = 0;
2776 ns->tx_window_errors = 0;
2777
2778 ns->tx_errors = stats.tx_error_frames;
2779 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2780 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2781 return ns;
2782}
2783
2784static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2785{
060e0c75 2786 unsigned int mbox;
b8ff05a9
DM
2787 int ret = 0, prtad, devad;
2788 struct port_info *pi = netdev_priv(dev);
2789 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2790
2791 switch (cmd) {
2792 case SIOCGMIIPHY:
2793 if (pi->mdio_addr < 0)
2794 return -EOPNOTSUPP;
2795 data->phy_id = pi->mdio_addr;
2796 break;
2797 case SIOCGMIIREG:
2798 case SIOCSMIIREG:
2799 if (mdio_phy_id_is_c45(data->phy_id)) {
2800 prtad = mdio_phy_id_prtad(data->phy_id);
2801 devad = mdio_phy_id_devad(data->phy_id);
2802 } else if (data->phy_id < 32) {
2803 prtad = data->phy_id;
2804 devad = 0;
2805 data->reg_num &= 0x1f;
2806 } else
2807 return -EINVAL;
2808
060e0c75 2809 mbox = pi->adapter->fn;
b8ff05a9 2810 if (cmd == SIOCGMIIREG)
060e0c75 2811 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2812 data->reg_num, &data->val_out);
2813 else
060e0c75 2814 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2815 data->reg_num, data->val_in);
2816 break;
2817 default:
2818 return -EOPNOTSUPP;
2819 }
2820 return ret;
2821}
2822
2823static void cxgb_set_rxmode(struct net_device *dev)
2824{
2825 /* unfortunately we can't return errors to the stack */
2826 set_rxmode(dev, -1, false);
2827}
2828
2829static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2830{
2831 int ret;
2832 struct port_info *pi = netdev_priv(dev);
2833
2834 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2835 return -EINVAL;
060e0c75
DM
2836 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2837 -1, -1, -1, true);
b8ff05a9
DM
2838 if (!ret)
2839 dev->mtu = new_mtu;
2840 return ret;
2841}
2842
2843static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2844{
2845 int ret;
2846 struct sockaddr *addr = p;
2847 struct port_info *pi = netdev_priv(dev);
2848
2849 if (!is_valid_ether_addr(addr->sa_data))
2850 return -EINVAL;
2851
060e0c75
DM
2852 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2853 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
2854 if (ret < 0)
2855 return ret;
2856
2857 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2858 pi->xact_addr_filt = ret;
2859 return 0;
2860}
2861
2862static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2863{
2864 struct port_info *pi = netdev_priv(dev);
2865
2866 pi->vlan_grp = grp;
060e0c75
DM
2867 t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
2868 grp != NULL, true);
b8ff05a9
DM
2869}
2870
2871#ifdef CONFIG_NET_POLL_CONTROLLER
2872static void cxgb_netpoll(struct net_device *dev)
2873{
2874 struct port_info *pi = netdev_priv(dev);
2875 struct adapter *adap = pi->adapter;
2876
2877 if (adap->flags & USING_MSIX) {
2878 int i;
2879 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2880
2881 for (i = pi->nqsets; i; i--, rx++)
2882 t4_sge_intr_msix(0, &rx->rspq);
2883 } else
2884 t4_intr_handler(adap)(0, adap);
2885}
2886#endif
2887
2888static const struct net_device_ops cxgb4_netdev_ops = {
2889 .ndo_open = cxgb_open,
2890 .ndo_stop = cxgb_close,
2891 .ndo_start_xmit = t4_eth_xmit,
9be793bf 2892 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
2893 .ndo_set_rx_mode = cxgb_set_rxmode,
2894 .ndo_set_mac_address = cxgb_set_mac_addr,
2895 .ndo_validate_addr = eth_validate_addr,
2896 .ndo_do_ioctl = cxgb_ioctl,
2897 .ndo_change_mtu = cxgb_change_mtu,
2898 .ndo_vlan_rx_register = vlan_rx_register,
2899#ifdef CONFIG_NET_POLL_CONTROLLER
2900 .ndo_poll_controller = cxgb_netpoll,
2901#endif
2902};
2903
2904void t4_fatal_err(struct adapter *adap)
2905{
2906 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2907 t4_intr_disable(adap);
2908 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2909}
2910
2911static void setup_memwin(struct adapter *adap)
2912{
2913 u32 bar0;
2914
2915 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2916 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2917 (bar0 + MEMWIN0_BASE) | BIR(0) |
2918 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2919 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2920 (bar0 + MEMWIN1_BASE) | BIR(0) |
2921 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2922 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2923 (bar0 + MEMWIN2_BASE) | BIR(0) |
2924 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
1ae970e0
DM
2925 if (adap->vres.ocq.size) {
2926 unsigned int start, sz_kb;
2927
2928 start = pci_resource_start(adap->pdev, 2) +
2929 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2930 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2931 t4_write_reg(adap,
2932 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
2933 start | BIR(1) | WINDOW(ilog2(sz_kb)));
2934 t4_write_reg(adap,
2935 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
2936 adap->vres.ocq.start);
2937 t4_read_reg(adap,
2938 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
2939 }
b8ff05a9
DM
2940}
2941
02b5fb8e
DM
2942static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2943{
2944 u32 v;
2945 int ret;
2946
2947 /* get device capabilities */
2948 memset(c, 0, sizeof(*c));
2949 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2950 FW_CMD_REQUEST | FW_CMD_READ);
2951 c->retval_len16 = htonl(FW_LEN16(*c));
060e0c75 2952 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
02b5fb8e
DM
2953 if (ret < 0)
2954 return ret;
2955
2956 /* select capabilities we'll be using */
2957 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2958 if (!vf_acls)
2959 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2960 else
2961 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2962 } else if (vf_acls) {
2963 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2964 return ret;
2965 }
2966 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2967 FW_CMD_REQUEST | FW_CMD_WRITE);
060e0c75 2968 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
02b5fb8e
DM
2969 if (ret < 0)
2970 return ret;
2971
060e0c75 2972 ret = t4_config_glbl_rss(adap, adap->fn,
02b5fb8e
DM
2973 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2974 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2975 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2976 if (ret < 0)
2977 return ret;
2978
060e0c75
DM
2979 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
2980 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
02b5fb8e
DM
2981 if (ret < 0)
2982 return ret;
2983
2984 t4_sge_init(adap);
2985
02b5fb8e
DM
2986 /* tweak some settings */
2987 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2988 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2989 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2990 v = t4_read_reg(adap, TP_PIO_DATA);
2991 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
060e0c75
DM
2992
2993 /* get basic stuff going */
2994 return t4_early_init(adap, adap->fn);
02b5fb8e
DM
2995}
2996
b8ff05a9
DM
2997/*
2998 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2999 */
3000#define MAX_ATIDS 8192U
3001
3002/*
3003 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3004 */
3005static int adap_init0(struct adapter *adap)
3006{
3007 int ret;
3008 u32 v, port_vec;
3009 enum dev_state state;
3010 u32 params[7], val[7];
3011 struct fw_caps_config_cmd c;
3012
3013 ret = t4_check_fw_version(adap);
3014 if (ret == -EINVAL || ret > 0) {
3015 if (upgrade_fw(adap) >= 0) /* recache FW version */
3016 ret = t4_check_fw_version(adap);
3017 }
3018 if (ret < 0)
3019 return ret;
3020
3021 /* contact FW, request master */
060e0c75 3022 ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
b8ff05a9
DM
3023 if (ret < 0) {
3024 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3025 ret);
3026 return ret;
3027 }
3028
3029 /* reset device */
060e0c75 3030 ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
b8ff05a9
DM
3031 if (ret < 0)
3032 goto bye;
3033
b8ff05a9
DM
3034 for (v = 0; v < SGE_NTIMERS - 1; v++)
3035 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
3036 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3037 adap->sge.counter_val[0] = 1;
3038 for (v = 1; v < SGE_NCOUNTERS; v++)
3039 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
3040 THRESHOLD_3_MASK);
b8ff05a9
DM
3041#define FW_PARAM_DEV(param) \
3042 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3043 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3044
a0881cab 3045 params[0] = FW_PARAM_DEV(CCLK);
060e0c75 3046 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
a0881cab
DM
3047 if (ret < 0)
3048 goto bye;
3049 adap->params.vpd.cclk = val[0];
3050
3051 ret = adap_init1(adap, &c);
3052 if (ret < 0)
3053 goto bye;
3054
b8ff05a9
DM
3055#define FW_PARAM_PFVF(param) \
3056 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
060e0c75
DM
3057 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
3058 FW_PARAMS_PARAM_Y(adap->fn))
b8ff05a9
DM
3059
3060 params[0] = FW_PARAM_DEV(PORTVEC);
3061 params[1] = FW_PARAM_PFVF(L2T_START);
3062 params[2] = FW_PARAM_PFVF(L2T_END);
3063 params[3] = FW_PARAM_PFVF(FILTER_START);
3064 params[4] = FW_PARAM_PFVF(FILTER_END);
060e0c75 3065 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val);
b8ff05a9
DM
3066 if (ret < 0)
3067 goto bye;
3068 port_vec = val[0];
3069 adap->tids.ftid_base = val[3];
3070 adap->tids.nftids = val[4] - val[3] + 1;
3071
3072 if (c.ofldcaps) {
3073 /* query offload-related parameters */
3074 params[0] = FW_PARAM_DEV(NTID);
3075 params[1] = FW_PARAM_PFVF(SERVER_START);
3076 params[2] = FW_PARAM_PFVF(SERVER_END);
3077 params[3] = FW_PARAM_PFVF(TDDP_START);
3078 params[4] = FW_PARAM_PFVF(TDDP_END);
3079 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
060e0c75
DM
3080 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3081 val);
b8ff05a9
DM
3082 if (ret < 0)
3083 goto bye;
3084 adap->tids.ntids = val[0];
3085 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3086 adap->tids.stid_base = val[1];
3087 adap->tids.nstids = val[2] - val[1] + 1;
3088 adap->vres.ddp.start = val[3];
3089 adap->vres.ddp.size = val[4] - val[3] + 1;
3090 adap->params.ofldq_wr_cred = val[5];
3091 adap->params.offload = 1;
3092 }
3093 if (c.rdmacaps) {
3094 params[0] = FW_PARAM_PFVF(STAG_START);
3095 params[1] = FW_PARAM_PFVF(STAG_END);
3096 params[2] = FW_PARAM_PFVF(RQ_START);
3097 params[3] = FW_PARAM_PFVF(RQ_END);
3098 params[4] = FW_PARAM_PFVF(PBL_START);
3099 params[5] = FW_PARAM_PFVF(PBL_END);
060e0c75
DM
3100 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3101 val);
b8ff05a9
DM
3102 if (ret < 0)
3103 goto bye;
3104 adap->vres.stag.start = val[0];
3105 adap->vres.stag.size = val[1] - val[0] + 1;
3106 adap->vres.rq.start = val[2];
3107 adap->vres.rq.size = val[3] - val[2] + 1;
3108 adap->vres.pbl.start = val[4];
3109 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
3110
3111 params[0] = FW_PARAM_PFVF(SQRQ_START);
3112 params[1] = FW_PARAM_PFVF(SQRQ_END);
3113 params[2] = FW_PARAM_PFVF(CQ_START);
3114 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
3115 params[4] = FW_PARAM_PFVF(OCQ_START);
3116 params[5] = FW_PARAM_PFVF(OCQ_END);
060e0c75
DM
3117 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3118 val);
a0881cab
DM
3119 if (ret < 0)
3120 goto bye;
3121 adap->vres.qp.start = val[0];
3122 adap->vres.qp.size = val[1] - val[0] + 1;
3123 adap->vres.cq.start = val[2];
3124 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
3125 adap->vres.ocq.start = val[4];
3126 adap->vres.ocq.size = val[5] - val[4] + 1;
b8ff05a9
DM
3127 }
3128 if (c.iscsicaps) {
3129 params[0] = FW_PARAM_PFVF(ISCSI_START);
3130 params[1] = FW_PARAM_PFVF(ISCSI_END);
060e0c75
DM
3131 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
3132 val);
b8ff05a9
DM
3133 if (ret < 0)
3134 goto bye;
3135 adap->vres.iscsi.start = val[0];
3136 adap->vres.iscsi.size = val[1] - val[0] + 1;
3137 }
3138#undef FW_PARAM_PFVF
3139#undef FW_PARAM_DEV
3140
3141 adap->params.nports = hweight32(port_vec);
3142 adap->params.portvec = port_vec;
3143 adap->flags |= FW_OK;
3144
3145 /* These are finalized by FW initialization, load their values now */
3146 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3147 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3148 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3149 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3150 adap->params.b_wnd);
7ee9ff94
CL
3151
3152#ifdef CONFIG_PCI_IOV
3153 /*
3154 * Provision resource limits for Virtual Functions. We currently
3155 * grant them all the same static resource limits except for the Port
3156 * Access Rights Mask which we're assigning based on the PF. All of
3157 * the static provisioning stuff for both the PF and VF really needs
3158 * to be managed in a persistent manner for each device which the
3159 * firmware controls.
3160 */
3161 {
3162 int pf, vf;
3163
3164 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3165 if (num_vf[pf] <= 0)
3166 continue;
3167
3168 /* VF numbering starts at 1! */
3169 for (vf = 1; vf <= num_vf[pf]; vf++) {
060e0c75 3170 ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
7ee9ff94
CL
3171 VFRES_NEQ, VFRES_NETHCTRL,
3172 VFRES_NIQFLINT, VFRES_NIQ,
3173 VFRES_TC, VFRES_NVI,
3174 FW_PFVF_CMD_CMASK_MASK,
3175 pfvfres_pmask(adap, pf, vf),
3176 VFRES_NEXACTF,
3177 VFRES_R_CAPS, VFRES_WX_CAPS);
3178 if (ret < 0)
3179 dev_warn(adap->pdev_dev, "failed to "
3180 "provision pf/vf=%d/%d; "
3181 "err=%d\n", pf, vf, ret);
3182 }
3183 }
3184 }
3185#endif
3186
1ae970e0 3187 setup_memwin(adap);
b8ff05a9
DM
3188 return 0;
3189
3190 /*
3191 * If a command timed out or failed with EIO FW does not operate within
3192 * its spec or something catastrophic happened to HW/FW, stop issuing
3193 * commands.
3194 */
3195bye: if (ret != -ETIMEDOUT && ret != -EIO)
060e0c75 3196 t4_fw_bye(adap, adap->fn);
b8ff05a9
DM
3197 return ret;
3198}
3199
204dc3c0
DM
3200/* EEH callbacks */
3201
3202static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3203 pci_channel_state_t state)
3204{
3205 int i;
3206 struct adapter *adap = pci_get_drvdata(pdev);
3207
3208 if (!adap)
3209 goto out;
3210
3211 rtnl_lock();
3212 adap->flags &= ~FW_OK;
3213 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3214 for_each_port(adap, i) {
3215 struct net_device *dev = adap->port[i];
3216
3217 netif_device_detach(dev);
3218 netif_carrier_off(dev);
3219 }
3220 if (adap->flags & FULL_INIT_DONE)
3221 cxgb_down(adap);
3222 rtnl_unlock();
3223 pci_disable_device(pdev);
3224out: return state == pci_channel_io_perm_failure ?
3225 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3226}
3227
3228static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3229{
3230 int i, ret;
3231 struct fw_caps_config_cmd c;
3232 struct adapter *adap = pci_get_drvdata(pdev);
3233
3234 if (!adap) {
3235 pci_restore_state(pdev);
3236 pci_save_state(pdev);
3237 return PCI_ERS_RESULT_RECOVERED;
3238 }
3239
3240 if (pci_enable_device(pdev)) {
3241 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
3242 return PCI_ERS_RESULT_DISCONNECT;
3243 }
3244
3245 pci_set_master(pdev);
3246 pci_restore_state(pdev);
3247 pci_save_state(pdev);
3248 pci_cleanup_aer_uncorrect_error_status(pdev);
3249
3250 if (t4_wait_dev_ready(adap) < 0)
3251 return PCI_ERS_RESULT_DISCONNECT;
060e0c75 3252 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
204dc3c0
DM
3253 return PCI_ERS_RESULT_DISCONNECT;
3254 adap->flags |= FW_OK;
3255 if (adap_init1(adap, &c))
3256 return PCI_ERS_RESULT_DISCONNECT;
3257
3258 for_each_port(adap, i) {
3259 struct port_info *p = adap2pinfo(adap, i);
3260
060e0c75
DM
3261 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
3262 NULL, NULL);
204dc3c0
DM
3263 if (ret < 0)
3264 return PCI_ERS_RESULT_DISCONNECT;
3265 p->viid = ret;
3266 p->xact_addr_filt = -1;
3267 }
3268
3269 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3270 adap->params.b_wnd);
1ae970e0 3271 setup_memwin(adap);
204dc3c0
DM
3272 if (cxgb_up(adap))
3273 return PCI_ERS_RESULT_DISCONNECT;
3274 return PCI_ERS_RESULT_RECOVERED;
3275}
3276
3277static void eeh_resume(struct pci_dev *pdev)
3278{
3279 int i;
3280 struct adapter *adap = pci_get_drvdata(pdev);
3281
3282 if (!adap)
3283 return;
3284
3285 rtnl_lock();
3286 for_each_port(adap, i) {
3287 struct net_device *dev = adap->port[i];
3288
3289 if (netif_running(dev)) {
3290 link_start(dev);
3291 cxgb_set_rxmode(dev);
3292 }
3293 netif_device_attach(dev);
3294 }
3295 rtnl_unlock();
3296}
3297
3298static struct pci_error_handlers cxgb4_eeh = {
3299 .error_detected = eeh_err_detected,
3300 .slot_reset = eeh_slot_reset,
3301 .resume = eeh_resume,
3302};
3303
b8ff05a9
DM
3304static inline bool is_10g_port(const struct link_config *lc)
3305{
3306 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3307}
3308
3309static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3310 unsigned int size, unsigned int iqe_size)
3311{
3312 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3313 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3314 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3315 q->iqe_len = iqe_size;
3316 q->size = size;
3317}
3318
3319/*
3320 * Perform default configuration of DMA queues depending on the number and type
3321 * of ports we found and the number of available CPUs. Most settings can be
3322 * modified by the admin prior to actual use.
3323 */
3324static void __devinit cfg_queues(struct adapter *adap)
3325{
3326 struct sge *s = &adap->sge;
3327 int i, q10g = 0, n10g = 0, qidx = 0;
3328
3329 for_each_port(adap, i)
3330 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3331
3332 /*
3333 * We default to 1 queue per non-10G port and up to # of cores queues
3334 * per 10G port.
3335 */
3336 if (n10g)
3337 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3338 if (q10g > num_online_cpus())
3339 q10g = num_online_cpus();
3340
3341 for_each_port(adap, i) {
3342 struct port_info *pi = adap2pinfo(adap, i);
3343
3344 pi->first_qset = qidx;
3345 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3346 qidx += pi->nqsets;
3347 }
3348
3349 s->ethqsets = qidx;
3350 s->max_ethqsets = qidx; /* MSI-X may lower it later */
3351
3352 if (is_offload(adap)) {
3353 /*
3354 * For offload we use 1 queue/channel if all ports are up to 1G,
3355 * otherwise we divide all available queues amongst the channels
3356 * capped by the number of available cores.
3357 */
3358 if (n10g) {
3359 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3360 num_online_cpus());
3361 s->ofldqsets = roundup(i, adap->params.nports);
3362 } else
3363 s->ofldqsets = adap->params.nports;
3364 /* For RDMA one Rx queue per channel suffices */
3365 s->rdmaqs = adap->params.nports;
3366 }
3367
3368 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3369 struct sge_eth_rxq *r = &s->ethrxq[i];
3370
3371 init_rspq(&r->rspq, 0, 0, 1024, 64);
3372 r->fl.size = 72;
3373 }
3374
3375 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3376 s->ethtxq[i].q.size = 1024;
3377
3378 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3379 s->ctrlq[i].q.size = 512;
3380
3381 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3382 s->ofldtxq[i].q.size = 1024;
3383
3384 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3385 struct sge_ofld_rxq *r = &s->ofldrxq[i];
3386
3387 init_rspq(&r->rspq, 0, 0, 1024, 64);
3388 r->rspq.uld = CXGB4_ULD_ISCSI;
3389 r->fl.size = 72;
3390 }
3391
3392 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3393 struct sge_ofld_rxq *r = &s->rdmarxq[i];
3394
3395 init_rspq(&r->rspq, 0, 0, 511, 64);
3396 r->rspq.uld = CXGB4_ULD_RDMA;
3397 r->fl.size = 72;
3398 }
3399
3400 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3401 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3402}
3403
3404/*
3405 * Reduce the number of Ethernet queues across all ports to at most n.
3406 * n provides at least one queue per port.
3407 */
3408static void __devinit reduce_ethqs(struct adapter *adap, int n)
3409{
3410 int i;
3411 struct port_info *pi;
3412
3413 while (n < adap->sge.ethqsets)
3414 for_each_port(adap, i) {
3415 pi = adap2pinfo(adap, i);
3416 if (pi->nqsets > 1) {
3417 pi->nqsets--;
3418 adap->sge.ethqsets--;
3419 if (adap->sge.ethqsets <= n)
3420 break;
3421 }
3422 }
3423
3424 n = 0;
3425 for_each_port(adap, i) {
3426 pi = adap2pinfo(adap, i);
3427 pi->first_qset = n;
3428 n += pi->nqsets;
3429 }
3430}
3431
3432/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3433#define EXTRA_VECS 2
3434
3435static int __devinit enable_msix(struct adapter *adap)
3436{
3437 int ofld_need = 0;
3438 int i, err, want, need;
3439 struct sge *s = &adap->sge;
3440 unsigned int nchan = adap->params.nports;
3441 struct msix_entry entries[MAX_INGQ + 1];
3442
3443 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3444 entries[i].entry = i;
3445
3446 want = s->max_ethqsets + EXTRA_VECS;
3447 if (is_offload(adap)) {
3448 want += s->rdmaqs + s->ofldqsets;
3449 /* need nchan for each possible ULD */
3450 ofld_need = 2 * nchan;
3451 }
3452 need = adap->params.nports + EXTRA_VECS + ofld_need;
3453
3454 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3455 want = err;
3456
3457 if (!err) {
3458 /*
3459 * Distribute available vectors to the various queue groups.
3460 * Every group gets its minimum requirement and NIC gets top
3461 * priority for leftovers.
3462 */
3463 i = want - EXTRA_VECS - ofld_need;
3464 if (i < s->max_ethqsets) {
3465 s->max_ethqsets = i;
3466 if (i < s->ethqsets)
3467 reduce_ethqs(adap, i);
3468 }
3469 if (is_offload(adap)) {
3470 i = want - EXTRA_VECS - s->max_ethqsets;
3471 i -= ofld_need - nchan;
3472 s->ofldqsets = (i / nchan) * nchan; /* round down */
3473 }
3474 for (i = 0; i < want; ++i)
3475 adap->msix_info[i].vec = entries[i].vector;
3476 } else if (err > 0)
3477 dev_info(adap->pdev_dev,
3478 "only %d MSI-X vectors left, not using MSI-X\n", err);
3479 return err;
3480}
3481
3482#undef EXTRA_VECS
3483
671b0060
DM
3484static int __devinit init_rss(struct adapter *adap)
3485{
3486 unsigned int i, j;
3487
3488 for_each_port(adap, i) {
3489 struct port_info *pi = adap2pinfo(adap, i);
3490
3491 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
3492 if (!pi->rss)
3493 return -ENOMEM;
3494 for (j = 0; j < pi->rss_size; j++)
3495 pi->rss[j] = j % pi->nqsets;
3496 }
3497 return 0;
3498}
3499
b8ff05a9
DM
3500static void __devinit print_port_info(struct adapter *adap)
3501{
3502 static const char *base[] = {
a0881cab
DM
3503 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
3504 "KX", "KR", "KR SFP+", "KR FEC"
b8ff05a9
DM
3505 };
3506
3507 int i;
3508 char buf[80];
f1a051b9
DM
3509 const char *spd = "";
3510
3511 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3512 spd = " 2.5 GT/s";
3513 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3514 spd = " 5 GT/s";
b8ff05a9
DM
3515
3516 for_each_port(adap, i) {
3517 struct net_device *dev = adap->port[i];
3518 const struct port_info *pi = netdev_priv(dev);
3519 char *bufp = buf;
3520
3521 if (!test_bit(i, &adap->registered_device_map))
3522 continue;
3523
3524 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3525 bufp += sprintf(bufp, "100/");
3526 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3527 bufp += sprintf(bufp, "1000/");
3528 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3529 bufp += sprintf(bufp, "10G/");
3530 if (bufp != buf)
3531 --bufp;
3532 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3533
f1a051b9 3534 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
b8ff05a9
DM
3535 adap->params.vpd.id, adap->params.rev,
3536 buf, is_offload(adap) ? "R" : "",
f1a051b9 3537 adap->params.pci.width, spd,
b8ff05a9
DM
3538 (adap->flags & USING_MSIX) ? " MSI-X" :
3539 (adap->flags & USING_MSI) ? " MSI" : "");
3540 if (adap->name == dev->name)
3541 netdev_info(dev, "S/N: %s, E/C: %s\n",
3542 adap->params.vpd.sn, adap->params.vpd.ec);
3543 }
3544}
3545
06546391
DM
3546/*
3547 * Free the following resources:
3548 * - memory used for tables
3549 * - MSI/MSI-X
3550 * - net devices
3551 * - resources FW is holding for us
3552 */
3553static void free_some_resources(struct adapter *adapter)
3554{
3555 unsigned int i;
3556
3557 t4_free_mem(adapter->l2t);
3558 t4_free_mem(adapter->tids.tid_tab);
3559 disable_msi(adapter);
3560
3561 for_each_port(adapter, i)
671b0060
DM
3562 if (adapter->port[i]) {
3563 kfree(adap2pinfo(adapter, i)->rss);
06546391 3564 free_netdev(adapter->port[i]);
671b0060 3565 }
06546391 3566 if (adapter->flags & FW_OK)
060e0c75 3567 t4_fw_bye(adapter, adapter->fn);
06546391
DM
3568}
3569
35d35682 3570#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9
DM
3571 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3572
3573static int __devinit init_one(struct pci_dev *pdev,
3574 const struct pci_device_id *ent)
3575{
3576 int func, i, err;
3577 struct port_info *pi;
3578 unsigned int highdma = 0;
3579 struct adapter *adapter = NULL;
3580
3581 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3582
3583 err = pci_request_regions(pdev, KBUILD_MODNAME);
3584 if (err) {
3585 /* Just info, some other driver may have claimed the device. */
3586 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3587 return err;
3588 }
3589
060e0c75 3590 /* We control everything through one PF */
b8ff05a9 3591 func = PCI_FUNC(pdev->devfn);
060e0c75 3592 if (func != ent->driver_data) {
204dc3c0 3593 pci_save_state(pdev); /* to restore SR-IOV later */
b8ff05a9 3594 goto sriov;
204dc3c0 3595 }
b8ff05a9
DM
3596
3597 err = pci_enable_device(pdev);
3598 if (err) {
3599 dev_err(&pdev->dev, "cannot enable PCI device\n");
3600 goto out_release_regions;
3601 }
3602
3603 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3604 highdma = NETIF_F_HIGHDMA;
3605 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3606 if (err) {
3607 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3608 "coherent allocations\n");
3609 goto out_disable_device;
3610 }
3611 } else {
3612 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3613 if (err) {
3614 dev_err(&pdev->dev, "no usable DMA configuration\n");
3615 goto out_disable_device;
3616 }
3617 }
3618
3619 pci_enable_pcie_error_reporting(pdev);
3620 pci_set_master(pdev);
3621 pci_save_state(pdev);
3622
3623 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3624 if (!adapter) {
3625 err = -ENOMEM;
3626 goto out_disable_device;
3627 }
3628
3629 adapter->regs = pci_ioremap_bar(pdev, 0);
3630 if (!adapter->regs) {
3631 dev_err(&pdev->dev, "cannot map device registers\n");
3632 err = -ENOMEM;
3633 goto out_free_adapter;
3634 }
3635
3636 adapter->pdev = pdev;
3637 adapter->pdev_dev = &pdev->dev;
060e0c75 3638 adapter->fn = func;
b8ff05a9
DM
3639 adapter->name = pci_name(pdev);
3640 adapter->msg_enable = dflt_msg_enable;
3641 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3642
3643 spin_lock_init(&adapter->stats_lock);
3644 spin_lock_init(&adapter->tid_release_lock);
3645
3646 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3647
3648 err = t4_prep_adapter(adapter);
3649 if (err)
3650 goto out_unmap_bar;
3651 err = adap_init0(adapter);
3652 if (err)
3653 goto out_unmap_bar;
3654
3655 for_each_port(adapter, i) {
3656 struct net_device *netdev;
3657
3658 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3659 MAX_ETH_QSETS);
3660 if (!netdev) {
3661 err = -ENOMEM;
3662 goto out_free_dev;
3663 }
3664
3665 SET_NETDEV_DEV(netdev, &pdev->dev);
3666
3667 adapter->port[i] = netdev;
3668 pi = netdev_priv(netdev);
3669 pi->adapter = adapter;
3670 pi->xact_addr_filt = -1;
3671 pi->rx_offload = RX_CSO;
3672 pi->port_id = i;
3673 netif_carrier_off(netdev);
3674 netif_tx_stop_all_queues(netdev);
3675 netdev->irq = pdev->irq;
3676
35d35682 3677 netdev->features |= NETIF_F_SG | TSO_FLAGS;
b8ff05a9 3678 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
87b6cf51 3679 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
b8ff05a9
DM
3680 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3681 netdev->vlan_features = netdev->features & VLAN_FEAT;
3682
3683 netdev->netdev_ops = &cxgb4_netdev_ops;
3684 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3685 }
3686
3687 pci_set_drvdata(pdev, adapter);
3688
3689 if (adapter->flags & FW_OK) {
060e0c75 3690 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
3691 if (err)
3692 goto out_free_dev;
3693 }
3694
3695 /*
3696 * Configure queues and allocate tables now, they can be needed as
3697 * soon as the first register_netdev completes.
3698 */
3699 cfg_queues(adapter);
3700
3701 adapter->l2t = t4_init_l2t();
3702 if (!adapter->l2t) {
3703 /* We tolerate a lack of L2T, giving up some functionality */
3704 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3705 adapter->params.offload = 0;
3706 }
3707
3708 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3709 dev_warn(&pdev->dev, "could not allocate TID table, "
3710 "continuing\n");
3711 adapter->params.offload = 0;
3712 }
3713
f7cabcdd
DM
3714 /* See what interrupts we'll be using */
3715 if (msi > 1 && enable_msix(adapter) == 0)
3716 adapter->flags |= USING_MSIX;
3717 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3718 adapter->flags |= USING_MSI;
3719
671b0060
DM
3720 err = init_rss(adapter);
3721 if (err)
3722 goto out_free_dev;
3723
b8ff05a9
DM
3724 /*
3725 * The card is now ready to go. If any errors occur during device
3726 * registration we do not fail the whole card but rather proceed only
3727 * with the ports we manage to register successfully. However we must
3728 * register at least one net device.
3729 */
3730 for_each_port(adapter, i) {
3731 err = register_netdev(adapter->port[i]);
3732 if (err)
3733 dev_warn(&pdev->dev,
3734 "cannot register net device %s, skipping\n",
3735 adapter->port[i]->name);
3736 else {
3737 /*
3738 * Change the name we use for messages to the name of
3739 * the first successfully registered interface.
3740 */
3741 if (!adapter->registered_device_map)
3742 adapter->name = adapter->port[i]->name;
3743
3744 __set_bit(i, &adapter->registered_device_map);
3745 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3746 }
3747 }
3748 if (!adapter->registered_device_map) {
3749 dev_err(&pdev->dev, "could not register any net devices\n");
3750 goto out_free_dev;
3751 }
3752
3753 if (cxgb4_debugfs_root) {
3754 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3755 cxgb4_debugfs_root);
3756 setup_debugfs(adapter);
3757 }
3758
b8ff05a9
DM
3759 if (is_offload(adapter))
3760 attach_ulds(adapter);
3761
3762 print_port_info(adapter);
3763
3764sriov:
3765#ifdef CONFIG_PCI_IOV
3766 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3767 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3768 dev_info(&pdev->dev,
3769 "instantiated %u virtual functions\n",
3770 num_vf[func]);
3771#endif
3772 return 0;
3773
3774 out_free_dev:
06546391 3775 free_some_resources(adapter);
b8ff05a9
DM
3776 out_unmap_bar:
3777 iounmap(adapter->regs);
3778 out_free_adapter:
3779 kfree(adapter);
3780 out_disable_device:
3781 pci_disable_pcie_error_reporting(pdev);
3782 pci_disable_device(pdev);
3783 out_release_regions:
3784 pci_release_regions(pdev);
3785 pci_set_drvdata(pdev, NULL);
3786 return err;
3787}
3788
3789static void __devexit remove_one(struct pci_dev *pdev)
3790{
3791 struct adapter *adapter = pci_get_drvdata(pdev);
3792
3793 pci_disable_sriov(pdev);
3794
3795 if (adapter) {
3796 int i;
3797
3798 if (is_offload(adapter))
3799 detach_ulds(adapter);
3800
3801 for_each_port(adapter, i)
3802 if (test_bit(i, &adapter->registered_device_map))
3803 unregister_netdev(adapter->port[i]);
3804
3805 if (adapter->debugfs_root)
3806 debugfs_remove_recursive(adapter->debugfs_root);
3807
aaefae9b
DM
3808 if (adapter->flags & FULL_INIT_DONE)
3809 cxgb_down(adapter);
b8ff05a9 3810
06546391 3811 free_some_resources(adapter);
b8ff05a9
DM
3812 iounmap(adapter->regs);
3813 kfree(adapter);
3814 pci_disable_pcie_error_reporting(pdev);
3815 pci_disable_device(pdev);
3816 pci_release_regions(pdev);
3817 pci_set_drvdata(pdev, NULL);
3818 } else if (PCI_FUNC(pdev->devfn) > 0)
3819 pci_release_regions(pdev);
3820}
3821
3822static struct pci_driver cxgb4_driver = {
3823 .name = KBUILD_MODNAME,
3824 .id_table = cxgb4_pci_tbl,
3825 .probe = init_one,
3826 .remove = __devexit_p(remove_one),
204dc3c0 3827 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
3828};
3829
3830static int __init cxgb4_init_module(void)
3831{
3832 int ret;
3833
3834 /* Debugfs support is optional, just warn if this fails */
3835 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3836 if (!cxgb4_debugfs_root)
3837 pr_warning("could not create debugfs entry, continuing\n");
3838
3839 ret = pci_register_driver(&cxgb4_driver);
3840 if (ret < 0)
3841 debugfs_remove(cxgb4_debugfs_root);
3842 return ret;
3843}
3844
3845static void __exit cxgb4_cleanup_module(void)
3846{
3847 pci_unregister_driver(&cxgb4_driver);
3848 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3849}
3850
3851module_init(cxgb4_init_module);
3852module_exit(cxgb4_cleanup_module);