bnxt_en: Add IPV6 hardware RFS support.
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
c0c050c5
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11
12#include <linux/stringify.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/errno.h>
16#include <linux/ioport.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/interrupt.h>
20#include <linux/pci.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24#include <linux/dma-mapping.h>
25#include <linux/bitops.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/delay.h>
29#include <asm/byteorder.h>
30#include <asm/page.h>
31#include <linux/time.h>
32#include <linux/mii.h>
33#include <linux/if.h>
34#include <linux/if_vlan.h>
5ac67d8b 35#include <linux/rtc.h>
c0c050c5
MC
36#include <net/ip.h>
37#include <net/tcp.h>
38#include <net/udp.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
ad51b8e9 41#include <net/udp_tunnel.h>
c0c050c5
MC
42#include <linux/workqueue.h>
43#include <linux/prefetch.h>
44#include <linux/cache.h>
45#include <linux/log2.h>
46#include <linux/aer.h>
47#include <linux/bitmap.h>
48#include <linux/cpu_rmap.h>
49
50#include "bnxt_hsi.h"
51#include "bnxt.h"
a588e458 52#include "bnxt_ulp.h"
c0c050c5
MC
53#include "bnxt_sriov.h"
54#include "bnxt_ethtool.h"
7df4ae9f 55#include "bnxt_dcb.h"
c0c050c5
MC
56
57#define BNXT_TX_TIMEOUT (5 * HZ)
58
59static const char version[] =
60 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
61
62MODULE_LICENSE("GPL");
63MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
64MODULE_VERSION(DRV_MODULE_VERSION);
65
66#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
67#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
68#define BNXT_RX_COPY_THRESH 256
69
4419dbe6 70#define BNXT_TX_PUSH_THRESH 164
c0c050c5
MC
71
72enum board_idx {
fbc9a523 73 BCM57301,
c0c050c5
MC
74 BCM57302,
75 BCM57304,
1f681688 76 BCM57417_NPAR,
fa853dda 77 BCM58700,
b24eb6ae
MC
78 BCM57311,
79 BCM57312,
fbc9a523 80 BCM57402,
c0c050c5
MC
81 BCM57404,
82 BCM57406,
1f681688
MC
83 BCM57402_NPAR,
84 BCM57407,
b24eb6ae
MC
85 BCM57412,
86 BCM57414,
87 BCM57416,
88 BCM57417,
1f681688 89 BCM57412_NPAR,
5049e33b 90 BCM57314,
1f681688
MC
91 BCM57417_SFP,
92 BCM57416_SFP,
93 BCM57404_NPAR,
94 BCM57406_NPAR,
95 BCM57407_SFP,
adbc8305 96 BCM57407_NPAR,
1f681688
MC
97 BCM57414_NPAR,
98 BCM57416_NPAR,
adbc8305
MC
99 NETXTREME_E_VF,
100 NETXTREME_C_VF,
c0c050c5
MC
101};
102
103/* indexed by enum above */
104static const struct {
105 char *name;
106} board_info[] = {
adbc8305
MC
107 { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
108 { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
109 { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
1f681688 110 { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
adbc8305
MC
111 { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
112 { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
113 { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
114 { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
115 { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
116 { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
1f681688 117 { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
adbc8305
MC
118 { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
119 { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
120 { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
121 { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
122 { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
1f681688 123 { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
adbc8305
MC
124 { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
125 { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
126 { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
1f681688
MC
127 { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
128 { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
adbc8305
MC
129 { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
130 { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
1f681688
MC
131 { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
132 { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
adbc8305
MC
133 { "Broadcom NetXtreme-E Ethernet Virtual Function" },
134 { "Broadcom NetXtreme-C Ethernet Virtual Function" },
c0c050c5
MC
135};
136
137static const struct pci_device_id bnxt_pci_tbl[] = {
adbc8305 138 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 139 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
140 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
141 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 142 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 143 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
144 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
145 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 146 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
147 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
148 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
149 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
150 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
151 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
152 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
153 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
154 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 155 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 156 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
157 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
158 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
159 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
160 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
161 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
162 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
163 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 164 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 165 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 166 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 167 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
c0c050c5 168#ifdef CONFIG_BNXT_SRIOV
adbc8305
MC
169 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
170 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
171 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
172 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
173 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
174 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
c0c050c5
MC
175#endif
176 { 0 }
177};
178
179MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
180
181static const u16 bnxt_vf_req_snif[] = {
182 HWRM_FUNC_CFG,
183 HWRM_PORT_PHY_QCFG,
184 HWRM_CFA_L2_FILTER_ALLOC,
185};
186
25be8623 187static const u16 bnxt_async_events_arr[] = {
87c374de
MC
188 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
189 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
190 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
191 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
192 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
25be8623
MC
193};
194
c0c050c5
MC
195static bool bnxt_vf_pciid(enum board_idx idx)
196{
adbc8305 197 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
c0c050c5
MC
198}
199
200#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
201#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
202#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
203
204#define BNXT_CP_DB_REARM(db, raw_cons) \
205 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
206
207#define BNXT_CP_DB(db, raw_cons) \
208 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
209
210#define BNXT_CP_DB_IRQ_DIS(db) \
211 writel(DB_CP_IRQ_DIS_FLAGS, db)
212
213static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
214{
215 /* Tell compiler to fetch tx indices from memory. */
216 barrier();
217
218 return bp->tx_ring_size -
219 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
220}
221
222static const u16 bnxt_lhint_arr[] = {
223 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
224 TX_BD_FLAGS_LHINT_512_TO_1023,
225 TX_BD_FLAGS_LHINT_1024_TO_2047,
226 TX_BD_FLAGS_LHINT_1024_TO_2047,
227 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
228 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
229 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
230 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
231 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
232 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
233 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
234 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
235 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
236 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
237 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
238 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
239 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
240 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
241 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
242};
243
244static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
245{
246 struct bnxt *bp = netdev_priv(dev);
247 struct tx_bd *txbd;
248 struct tx_bd_ext *txbd1;
249 struct netdev_queue *txq;
250 int i;
251 dma_addr_t mapping;
252 unsigned int length, pad = 0;
253 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
254 u16 prod, last_frag;
255 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
256 struct bnxt_tx_ring_info *txr;
257 struct bnxt_sw_tx_bd *tx_buf;
258
259 i = skb_get_queue_mapping(skb);
260 if (unlikely(i >= bp->tx_nr_rings)) {
261 dev_kfree_skb_any(skb);
262 return NETDEV_TX_OK;
263 }
264
b6ab4b01 265 txr = &bp->tx_ring[i];
c0c050c5
MC
266 txq = netdev_get_tx_queue(dev, i);
267 prod = txr->tx_prod;
268
269 free_size = bnxt_tx_avail(bp, txr);
270 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
271 netif_tx_stop_queue(txq);
272 return NETDEV_TX_BUSY;
273 }
274
275 length = skb->len;
276 len = skb_headlen(skb);
277 last_frag = skb_shinfo(skb)->nr_frags;
278
279 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
280
281 txbd->tx_bd_opaque = prod;
282
283 tx_buf = &txr->tx_buf_ring[prod];
284 tx_buf->skb = skb;
285 tx_buf->nr_frags = last_frag;
286
287 vlan_tag_flags = 0;
288 cfa_action = 0;
289 if (skb_vlan_tag_present(skb)) {
290 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
291 skb_vlan_tag_get(skb);
292 /* Currently supports 8021Q, 8021AD vlan offloads
293 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
294 */
295 if (skb->vlan_proto == htons(ETH_P_8021Q))
296 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
297 }
298
299 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
4419dbe6
MC
300 struct tx_push_buffer *tx_push_buf = txr->tx_push;
301 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
302 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
303 void *pdata = tx_push_buf->data;
304 u64 *end;
305 int j, push_len;
c0c050c5
MC
306
307 /* Set COAL_NOW to be ready quickly for the next push */
308 tx_push->tx_bd_len_flags_type =
309 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
310 TX_BD_TYPE_LONG_TX_BD |
311 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
312 TX_BD_FLAGS_COAL_NOW |
313 TX_BD_FLAGS_PACKET_END |
314 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
315
316 if (skb->ip_summed == CHECKSUM_PARTIAL)
317 tx_push1->tx_bd_hsize_lflags =
318 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
319 else
320 tx_push1->tx_bd_hsize_lflags = 0;
321
322 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
323 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
324
fbb0fa8b
MC
325 end = pdata + length;
326 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
327 *end = 0;
328
c0c050c5
MC
329 skb_copy_from_linear_data(skb, pdata, len);
330 pdata += len;
331 for (j = 0; j < last_frag; j++) {
332 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
333 void *fptr;
334
335 fptr = skb_frag_address_safe(frag);
336 if (!fptr)
337 goto normal_tx;
338
339 memcpy(pdata, fptr, skb_frag_size(frag));
340 pdata += skb_frag_size(frag);
341 }
342
4419dbe6
MC
343 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
344 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
345 prod = NEXT_TX(prod);
346 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
347 memcpy(txbd, tx_push1, sizeof(*txbd));
348 prod = NEXT_TX(prod);
4419dbe6 349 tx_push->doorbell =
c0c050c5
MC
350 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
351 txr->tx_prod = prod;
352
b9a8460a 353 tx_buf->is_push = 1;
c0c050c5 354 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 355 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 356
4419dbe6
MC
357 push_len = (length + sizeof(*tx_push) + 7) / 8;
358 if (push_len > 16) {
359 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
9d13744b
MC
360 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
361 (push_len - 16) << 1);
4419dbe6
MC
362 } else {
363 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
364 push_len);
365 }
c0c050c5 366
c0c050c5
MC
367 goto tx_done;
368 }
369
370normal_tx:
371 if (length < BNXT_MIN_PKT_SIZE) {
372 pad = BNXT_MIN_PKT_SIZE - length;
373 if (skb_pad(skb, pad)) {
374 /* SKB already freed. */
375 tx_buf->skb = NULL;
376 return NETDEV_TX_OK;
377 }
378 length = BNXT_MIN_PKT_SIZE;
379 }
380
381 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
382
383 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
384 dev_kfree_skb_any(skb);
385 tx_buf->skb = NULL;
386 return NETDEV_TX_OK;
387 }
388
389 dma_unmap_addr_set(tx_buf, mapping, mapping);
390 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
391 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
392
393 txbd->tx_bd_haddr = cpu_to_le64(mapping);
394
395 prod = NEXT_TX(prod);
396 txbd1 = (struct tx_bd_ext *)
397 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
398
399 txbd1->tx_bd_hsize_lflags = 0;
400 if (skb_is_gso(skb)) {
401 u32 hdr_len;
402
403 if (skb->encapsulation)
404 hdr_len = skb_inner_network_offset(skb) +
405 skb_inner_network_header_len(skb) +
406 inner_tcp_hdrlen(skb);
407 else
408 hdr_len = skb_transport_offset(skb) +
409 tcp_hdrlen(skb);
410
411 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
412 TX_BD_FLAGS_T_IPID |
413 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
414 length = skb_shinfo(skb)->gso_size;
415 txbd1->tx_bd_mss = cpu_to_le32(length);
416 length += hdr_len;
417 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
418 txbd1->tx_bd_hsize_lflags =
419 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
420 txbd1->tx_bd_mss = 0;
421 }
422
423 length >>= 9;
424 flags |= bnxt_lhint_arr[length];
425 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
426
427 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
428 txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
429 for (i = 0; i < last_frag; i++) {
430 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
431
432 prod = NEXT_TX(prod);
433 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
434
435 len = skb_frag_size(frag);
436 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
437 DMA_TO_DEVICE);
438
439 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
440 goto tx_dma_error;
441
442 tx_buf = &txr->tx_buf_ring[prod];
443 dma_unmap_addr_set(tx_buf, mapping, mapping);
444
445 txbd->tx_bd_haddr = cpu_to_le64(mapping);
446
447 flags = len << TX_BD_LEN_SHIFT;
448 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
449 }
450
451 flags &= ~TX_BD_LEN;
452 txbd->tx_bd_len_flags_type =
453 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
454 TX_BD_FLAGS_PACKET_END);
455
456 netdev_tx_sent_queue(txq, skb->len);
457
458 /* Sync BD data before updating doorbell */
459 wmb();
460
461 prod = NEXT_TX(prod);
462 txr->tx_prod = prod;
463
464 writel(DB_KEY_TX | prod, txr->tx_doorbell);
465 writel(DB_KEY_TX | prod, txr->tx_doorbell);
466
467tx_done:
468
469 mmiowb();
470
471 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
472 netif_tx_stop_queue(txq);
473
474 /* netif_tx_stop_queue() must be done before checking
475 * tx index in bnxt_tx_avail() below, because in
476 * bnxt_tx_int(), we update tx index before checking for
477 * netif_tx_queue_stopped().
478 */
479 smp_mb();
480 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
481 netif_tx_wake_queue(txq);
482 }
483 return NETDEV_TX_OK;
484
485tx_dma_error:
486 last_frag = i;
487
488 /* start back at beginning and unmap skb */
489 prod = txr->tx_prod;
490 tx_buf = &txr->tx_buf_ring[prod];
491 tx_buf->skb = NULL;
492 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
493 skb_headlen(skb), PCI_DMA_TODEVICE);
494 prod = NEXT_TX(prod);
495
496 /* unmap remaining mapped pages */
497 for (i = 0; i < last_frag; i++) {
498 prod = NEXT_TX(prod);
499 tx_buf = &txr->tx_buf_ring[prod];
500 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
501 skb_frag_size(&skb_shinfo(skb)->frags[i]),
502 PCI_DMA_TODEVICE);
503 }
504
505 dev_kfree_skb_any(skb);
506 return NETDEV_TX_OK;
507}
508
509static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
510{
b6ab4b01 511 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
b81a90d3 512 int index = txr - &bp->tx_ring[0];
c0c050c5
MC
513 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
514 u16 cons = txr->tx_cons;
515 struct pci_dev *pdev = bp->pdev;
516 int i;
517 unsigned int tx_bytes = 0;
518
519 for (i = 0; i < nr_pkts; i++) {
520 struct bnxt_sw_tx_bd *tx_buf;
521 struct sk_buff *skb;
522 int j, last;
523
524 tx_buf = &txr->tx_buf_ring[cons];
525 cons = NEXT_TX(cons);
526 skb = tx_buf->skb;
527 tx_buf->skb = NULL;
528
529 if (tx_buf->is_push) {
530 tx_buf->is_push = 0;
531 goto next_tx_int;
532 }
533
534 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
535 skb_headlen(skb), PCI_DMA_TODEVICE);
536 last = tx_buf->nr_frags;
537
538 for (j = 0; j < last; j++) {
539 cons = NEXT_TX(cons);
540 tx_buf = &txr->tx_buf_ring[cons];
541 dma_unmap_page(
542 &pdev->dev,
543 dma_unmap_addr(tx_buf, mapping),
544 skb_frag_size(&skb_shinfo(skb)->frags[j]),
545 PCI_DMA_TODEVICE);
546 }
547
548next_tx_int:
549 cons = NEXT_TX(cons);
550
551 tx_bytes += skb->len;
552 dev_kfree_skb_any(skb);
553 }
554
555 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
556 txr->tx_cons = cons;
557
558 /* Need to make the tx_cons update visible to bnxt_start_xmit()
559 * before checking for netif_tx_queue_stopped(). Without the
560 * memory barrier, there is a small possibility that bnxt_start_xmit()
561 * will miss it and cause the queue to be stopped forever.
562 */
563 smp_mb();
564
565 if (unlikely(netif_tx_queue_stopped(txq)) &&
566 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
567 __netif_tx_lock(txq, smp_processor_id());
568 if (netif_tx_queue_stopped(txq) &&
569 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
570 txr->dev_state != BNXT_DEV_STATE_CLOSING)
571 netif_tx_wake_queue(txq);
572 __netif_tx_unlock(txq);
573 }
574}
575
576static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
577 gfp_t gfp)
578{
579 u8 *data;
580 struct pci_dev *pdev = bp->pdev;
581
582 data = kmalloc(bp->rx_buf_size, gfp);
583 if (!data)
584 return NULL;
585
586 *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
587 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
588
589 if (dma_mapping_error(&pdev->dev, *mapping)) {
590 kfree(data);
591 data = NULL;
592 }
593 return data;
594}
595
596static inline int bnxt_alloc_rx_data(struct bnxt *bp,
597 struct bnxt_rx_ring_info *rxr,
598 u16 prod, gfp_t gfp)
599{
600 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
601 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
602 u8 *data;
603 dma_addr_t mapping;
604
605 data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
606 if (!data)
607 return -ENOMEM;
608
609 rx_buf->data = data;
610 dma_unmap_addr_set(rx_buf, mapping, mapping);
611
612 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
613
614 return 0;
615}
616
617static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
618 u8 *data)
619{
620 u16 prod = rxr->rx_prod;
621 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
622 struct rx_bd *cons_bd, *prod_bd;
623
624 prod_rx_buf = &rxr->rx_buf_ring[prod];
625 cons_rx_buf = &rxr->rx_buf_ring[cons];
626
627 prod_rx_buf->data = data;
628
629 dma_unmap_addr_set(prod_rx_buf, mapping,
630 dma_unmap_addr(cons_rx_buf, mapping));
631
632 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
633 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
634
635 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
636}
637
638static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
639{
640 u16 next, max = rxr->rx_agg_bmap_size;
641
642 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
643 if (next >= max)
644 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
645 return next;
646}
647
648static inline int bnxt_alloc_rx_page(struct bnxt *bp,
649 struct bnxt_rx_ring_info *rxr,
650 u16 prod, gfp_t gfp)
651{
652 struct rx_bd *rxbd =
653 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
654 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
655 struct pci_dev *pdev = bp->pdev;
656 struct page *page;
657 dma_addr_t mapping;
658 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 659 unsigned int offset = 0;
c0c050c5 660
89d0a06c
MC
661 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
662 page = rxr->rx_page;
663 if (!page) {
664 page = alloc_page(gfp);
665 if (!page)
666 return -ENOMEM;
667 rxr->rx_page = page;
668 rxr->rx_page_offset = 0;
669 }
670 offset = rxr->rx_page_offset;
671 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
672 if (rxr->rx_page_offset == PAGE_SIZE)
673 rxr->rx_page = NULL;
674 else
675 get_page(page);
676 } else {
677 page = alloc_page(gfp);
678 if (!page)
679 return -ENOMEM;
680 }
c0c050c5 681
89d0a06c 682 mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
c0c050c5
MC
683 PCI_DMA_FROMDEVICE);
684 if (dma_mapping_error(&pdev->dev, mapping)) {
685 __free_page(page);
686 return -EIO;
687 }
688
689 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
690 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
691
692 __set_bit(sw_prod, rxr->rx_agg_bmap);
693 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
694 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
695
696 rx_agg_buf->page = page;
89d0a06c 697 rx_agg_buf->offset = offset;
c0c050c5
MC
698 rx_agg_buf->mapping = mapping;
699 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
700 rxbd->rx_bd_opaque = sw_prod;
701 return 0;
702}
703
704static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
705 u32 agg_bufs)
706{
707 struct bnxt *bp = bnapi->bp;
708 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
b6ab4b01 709 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
710 u16 prod = rxr->rx_agg_prod;
711 u16 sw_prod = rxr->rx_sw_agg_prod;
712 u32 i;
713
714 for (i = 0; i < agg_bufs; i++) {
715 u16 cons;
716 struct rx_agg_cmp *agg;
717 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
718 struct rx_bd *prod_bd;
719 struct page *page;
720
721 agg = (struct rx_agg_cmp *)
722 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
723 cons = agg->rx_agg_cmp_opaque;
724 __clear_bit(cons, rxr->rx_agg_bmap);
725
726 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
727 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
728
729 __set_bit(sw_prod, rxr->rx_agg_bmap);
730 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
731 cons_rx_buf = &rxr->rx_agg_ring[cons];
732
733 /* It is possible for sw_prod to be equal to cons, so
734 * set cons_rx_buf->page to NULL first.
735 */
736 page = cons_rx_buf->page;
737 cons_rx_buf->page = NULL;
738 prod_rx_buf->page = page;
89d0a06c 739 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
740
741 prod_rx_buf->mapping = cons_rx_buf->mapping;
742
743 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
744
745 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
746 prod_bd->rx_bd_opaque = sw_prod;
747
748 prod = NEXT_RX_AGG(prod);
749 sw_prod = NEXT_RX_AGG(sw_prod);
750 cp_cons = NEXT_CMP(cp_cons);
751 }
752 rxr->rx_agg_prod = prod;
753 rxr->rx_sw_agg_prod = sw_prod;
754}
755
756static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
757 struct bnxt_rx_ring_info *rxr, u16 cons,
758 u16 prod, u8 *data, dma_addr_t dma_addr,
759 unsigned int len)
760{
761 int err;
762 struct sk_buff *skb;
763
764 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
765 if (unlikely(err)) {
766 bnxt_reuse_rx_data(rxr, cons, data);
767 return NULL;
768 }
769
770 skb = build_skb(data, 0);
771 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
772 PCI_DMA_FROMDEVICE);
773 if (!skb) {
774 kfree(data);
775 return NULL;
776 }
777
778 skb_reserve(skb, BNXT_RX_OFFSET);
779 skb_put(skb, len);
780 return skb;
781}
782
783static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
784 struct sk_buff *skb, u16 cp_cons,
785 u32 agg_bufs)
786{
787 struct pci_dev *pdev = bp->pdev;
788 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
b6ab4b01 789 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
790 u16 prod = rxr->rx_agg_prod;
791 u32 i;
792
793 for (i = 0; i < agg_bufs; i++) {
794 u16 cons, frag_len;
795 struct rx_agg_cmp *agg;
796 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
797 struct page *page;
798 dma_addr_t mapping;
799
800 agg = (struct rx_agg_cmp *)
801 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
802 cons = agg->rx_agg_cmp_opaque;
803 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
804 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
805
806 cons_rx_buf = &rxr->rx_agg_ring[cons];
89d0a06c
MC
807 skb_fill_page_desc(skb, i, cons_rx_buf->page,
808 cons_rx_buf->offset, frag_len);
c0c050c5
MC
809 __clear_bit(cons, rxr->rx_agg_bmap);
810
811 /* It is possible for bnxt_alloc_rx_page() to allocate
812 * a sw_prod index that equals the cons index, so we
813 * need to clear the cons entry now.
814 */
815 mapping = dma_unmap_addr(cons_rx_buf, mapping);
816 page = cons_rx_buf->page;
817 cons_rx_buf->page = NULL;
818
819 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
820 struct skb_shared_info *shinfo;
821 unsigned int nr_frags;
822
823 shinfo = skb_shinfo(skb);
824 nr_frags = --shinfo->nr_frags;
825 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
826
827 dev_kfree_skb(skb);
828
829 cons_rx_buf->page = page;
830
831 /* Update prod since possibly some pages have been
832 * allocated already.
833 */
834 rxr->rx_agg_prod = prod;
835 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
836 return NULL;
837 }
838
2839f28b 839 dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
c0c050c5
MC
840 PCI_DMA_FROMDEVICE);
841
842 skb->data_len += frag_len;
843 skb->len += frag_len;
844 skb->truesize += PAGE_SIZE;
845
846 prod = NEXT_RX_AGG(prod);
847 cp_cons = NEXT_CMP(cp_cons);
848 }
849 rxr->rx_agg_prod = prod;
850 return skb;
851}
852
853static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
854 u8 agg_bufs, u32 *raw_cons)
855{
856 u16 last;
857 struct rx_agg_cmp *agg;
858
859 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
860 last = RING_CMP(*raw_cons);
861 agg = (struct rx_agg_cmp *)
862 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
863 return RX_AGG_CMP_VALID(agg, *raw_cons);
864}
865
866static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
867 unsigned int len,
868 dma_addr_t mapping)
869{
870 struct bnxt *bp = bnapi->bp;
871 struct pci_dev *pdev = bp->pdev;
872 struct sk_buff *skb;
873
874 skb = napi_alloc_skb(&bnapi->napi, len);
875 if (!skb)
876 return NULL;
877
878 dma_sync_single_for_cpu(&pdev->dev, mapping,
879 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
880
881 memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
882
883 dma_sync_single_for_device(&pdev->dev, mapping,
884 bp->rx_copy_thresh,
885 PCI_DMA_FROMDEVICE);
886
887 skb_put(skb, len);
888 return skb;
889}
890
fa7e2812
MC
891static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
892 u32 *raw_cons, void *cmp)
893{
894 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
895 struct rx_cmp *rxcmp = cmp;
896 u32 tmp_raw_cons = *raw_cons;
897 u8 cmp_type, agg_bufs = 0;
898
899 cmp_type = RX_CMP_TYPE(rxcmp);
900
901 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
902 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
903 RX_CMP_AGG_BUFS) >>
904 RX_CMP_AGG_BUFS_SHIFT;
905 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
906 struct rx_tpa_end_cmp *tpa_end = cmp;
907
908 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
909 RX_TPA_END_CMP_AGG_BUFS) >>
910 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
911 }
912
913 if (agg_bufs) {
914 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
915 return -EBUSY;
916 }
917 *raw_cons = tmp_raw_cons;
918 return 0;
919}
920
921static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
922{
923 if (!rxr->bnapi->in_reset) {
924 rxr->bnapi->in_reset = true;
925 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
926 schedule_work(&bp->sp_task);
927 }
928 rxr->rx_next_cons = 0xffff;
929}
930
c0c050c5
MC
931static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
932 struct rx_tpa_start_cmp *tpa_start,
933 struct rx_tpa_start_cmp_ext *tpa_start1)
934{
935 u8 agg_id = TPA_START_AGG_ID(tpa_start);
936 u16 cons, prod;
937 struct bnxt_tpa_info *tpa_info;
938 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
939 struct rx_bd *prod_bd;
940 dma_addr_t mapping;
941
942 cons = tpa_start->rx_tpa_start_cmp_opaque;
943 prod = rxr->rx_prod;
944 cons_rx_buf = &rxr->rx_buf_ring[cons];
945 prod_rx_buf = &rxr->rx_buf_ring[prod];
946 tpa_info = &rxr->rx_tpa[agg_id];
947
fa7e2812
MC
948 if (unlikely(cons != rxr->rx_next_cons)) {
949 bnxt_sched_reset(bp, rxr);
950 return;
951 }
952
c0c050c5
MC
953 prod_rx_buf->data = tpa_info->data;
954
955 mapping = tpa_info->mapping;
956 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
957
958 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
959
960 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
961
962 tpa_info->data = cons_rx_buf->data;
963 cons_rx_buf->data = NULL;
964 tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
965
966 tpa_info->len =
967 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
968 RX_TPA_START_CMP_LEN_SHIFT;
969 if (likely(TPA_START_HASH_VALID(tpa_start))) {
970 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
971
972 tpa_info->hash_type = PKT_HASH_TYPE_L4;
973 tpa_info->gso_type = SKB_GSO_TCPV4;
974 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
975 if (hash_type == 3)
976 tpa_info->gso_type = SKB_GSO_TCPV6;
977 tpa_info->rss_hash =
978 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
979 } else {
980 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
981 tpa_info->gso_type = 0;
982 if (netif_msg_rx_err(bp))
983 netdev_warn(bp->dev, "TPA packet without valid hash\n");
984 }
985 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
986 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 987 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
c0c050c5
MC
988
989 rxr->rx_prod = NEXT_RX(prod);
990 cons = NEXT_RX(cons);
376a5b86 991 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
992 cons_rx_buf = &rxr->rx_buf_ring[cons];
993
994 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
995 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
996 cons_rx_buf->data = NULL;
997}
998
999static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1000 u16 cp_cons, u32 agg_bufs)
1001{
1002 if (agg_bufs)
1003 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1004}
1005
94758f8d
MC
1006static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1007 int payload_off, int tcp_ts,
1008 struct sk_buff *skb)
1009{
1010#ifdef CONFIG_INET
1011 struct tcphdr *th;
1012 int len, nw_off;
1013 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1014 u32 hdr_info = tpa_info->hdr_info;
1015 bool loopback = false;
1016
1017 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1018 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1019 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1020
1021 /* If the packet is an internal loopback packet, the offsets will
1022 * have an extra 4 bytes.
1023 */
1024 if (inner_mac_off == 4) {
1025 loopback = true;
1026 } else if (inner_mac_off > 4) {
1027 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1028 ETH_HLEN - 2));
1029
1030 /* We only support inner iPv4/ipv6. If we don't see the
1031 * correct protocol ID, it must be a loopback packet where
1032 * the offsets are off by 4.
1033 */
09a7636a 1034 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1035 loopback = true;
1036 }
1037 if (loopback) {
1038 /* internal loopback packet, subtract all offsets by 4 */
1039 inner_ip_off -= 4;
1040 inner_mac_off -= 4;
1041 outer_ip_off -= 4;
1042 }
1043
1044 nw_off = inner_ip_off - ETH_HLEN;
1045 skb_set_network_header(skb, nw_off);
1046 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1047 struct ipv6hdr *iph = ipv6_hdr(skb);
1048
1049 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1050 len = skb->len - skb_transport_offset(skb);
1051 th = tcp_hdr(skb);
1052 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1053 } else {
1054 struct iphdr *iph = ip_hdr(skb);
1055
1056 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1057 len = skb->len - skb_transport_offset(skb);
1058 th = tcp_hdr(skb);
1059 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1060 }
1061
1062 if (inner_mac_off) { /* tunnel */
1063 struct udphdr *uh = NULL;
1064 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1065 ETH_HLEN - 2));
1066
1067 if (proto == htons(ETH_P_IP)) {
1068 struct iphdr *iph = (struct iphdr *)skb->data;
1069
1070 if (iph->protocol == IPPROTO_UDP)
1071 uh = (struct udphdr *)(iph + 1);
1072 } else {
1073 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1074
1075 if (iph->nexthdr == IPPROTO_UDP)
1076 uh = (struct udphdr *)(iph + 1);
1077 }
1078 if (uh) {
1079 if (uh->check)
1080 skb_shinfo(skb)->gso_type |=
1081 SKB_GSO_UDP_TUNNEL_CSUM;
1082 else
1083 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1084 }
1085 }
1086#endif
1087 return skb;
1088}
1089
c0c050c5
MC
1090#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1091#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1092
309369c9
MC
1093static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1094 int payload_off, int tcp_ts,
c0c050c5
MC
1095 struct sk_buff *skb)
1096{
d1611c3a 1097#ifdef CONFIG_INET
c0c050c5 1098 struct tcphdr *th;
309369c9 1099 int len, nw_off, tcp_opt_len;
27e24189 1100
309369c9 1101 if (tcp_ts)
c0c050c5
MC
1102 tcp_opt_len = 12;
1103
c0c050c5
MC
1104 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1105 struct iphdr *iph;
1106
1107 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1108 ETH_HLEN;
1109 skb_set_network_header(skb, nw_off);
1110 iph = ip_hdr(skb);
1111 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1112 len = skb->len - skb_transport_offset(skb);
1113 th = tcp_hdr(skb);
1114 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1115 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1116 struct ipv6hdr *iph;
1117
1118 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1119 ETH_HLEN;
1120 skb_set_network_header(skb, nw_off);
1121 iph = ipv6_hdr(skb);
1122 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1123 len = skb->len - skb_transport_offset(skb);
1124 th = tcp_hdr(skb);
1125 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1126 } else {
1127 dev_kfree_skb_any(skb);
1128 return NULL;
1129 }
c0c050c5
MC
1130
1131 if (nw_off) { /* tunnel */
1132 struct udphdr *uh = NULL;
1133
1134 if (skb->protocol == htons(ETH_P_IP)) {
1135 struct iphdr *iph = (struct iphdr *)skb->data;
1136
1137 if (iph->protocol == IPPROTO_UDP)
1138 uh = (struct udphdr *)(iph + 1);
1139 } else {
1140 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1141
1142 if (iph->nexthdr == IPPROTO_UDP)
1143 uh = (struct udphdr *)(iph + 1);
1144 }
1145 if (uh) {
1146 if (uh->check)
1147 skb_shinfo(skb)->gso_type |=
1148 SKB_GSO_UDP_TUNNEL_CSUM;
1149 else
1150 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1151 }
1152 }
1153#endif
1154 return skb;
1155}
1156
309369c9
MC
1157static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1158 struct bnxt_tpa_info *tpa_info,
1159 struct rx_tpa_end_cmp *tpa_end,
1160 struct rx_tpa_end_cmp_ext *tpa_end1,
1161 struct sk_buff *skb)
1162{
1163#ifdef CONFIG_INET
1164 int payload_off;
1165 u16 segs;
1166
1167 segs = TPA_END_TPA_SEGS(tpa_end);
1168 if (segs == 1)
1169 return skb;
1170
1171 NAPI_GRO_CB(skb)->count = segs;
1172 skb_shinfo(skb)->gso_size =
1173 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1174 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1175 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1176 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1177 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1178 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1179 if (likely(skb))
1180 tcp_gro_complete(skb);
309369c9
MC
1181#endif
1182 return skb;
1183}
1184
c0c050c5
MC
1185static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1186 struct bnxt_napi *bnapi,
1187 u32 *raw_cons,
1188 struct rx_tpa_end_cmp *tpa_end,
1189 struct rx_tpa_end_cmp_ext *tpa_end1,
1190 bool *agg_event)
1191{
1192 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
b6ab4b01 1193 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1194 u8 agg_id = TPA_END_AGG_ID(tpa_end);
1195 u8 *data, agg_bufs;
1196 u16 cp_cons = RING_CMP(*raw_cons);
1197 unsigned int len;
1198 struct bnxt_tpa_info *tpa_info;
1199 dma_addr_t mapping;
1200 struct sk_buff *skb;
1201
fa7e2812
MC
1202 if (unlikely(bnapi->in_reset)) {
1203 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1204
1205 if (rc < 0)
1206 return ERR_PTR(-EBUSY);
1207 return NULL;
1208 }
1209
c0c050c5
MC
1210 tpa_info = &rxr->rx_tpa[agg_id];
1211 data = tpa_info->data;
1212 prefetch(data);
1213 len = tpa_info->len;
1214 mapping = tpa_info->mapping;
1215
1216 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1217 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1218
1219 if (agg_bufs) {
1220 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1221 return ERR_PTR(-EBUSY);
1222
1223 *agg_event = true;
1224 cp_cons = NEXT_CMP(cp_cons);
1225 }
1226
1227 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
1228 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1229 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1230 agg_bufs, (int)MAX_SKB_FRAGS);
1231 return NULL;
1232 }
1233
1234 if (len <= bp->rx_copy_thresh) {
1235 skb = bnxt_copy_skb(bnapi, data, len, mapping);
1236 if (!skb) {
1237 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1238 return NULL;
1239 }
1240 } else {
1241 u8 *new_data;
1242 dma_addr_t new_mapping;
1243
1244 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1245 if (!new_data) {
1246 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1247 return NULL;
1248 }
1249
1250 tpa_info->data = new_data;
1251 tpa_info->mapping = new_mapping;
1252
1253 skb = build_skb(data, 0);
1254 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
1255 PCI_DMA_FROMDEVICE);
1256
1257 if (!skb) {
1258 kfree(data);
1259 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1260 return NULL;
1261 }
1262 skb_reserve(skb, BNXT_RX_OFFSET);
1263 skb_put(skb, len);
1264 }
1265
1266 if (agg_bufs) {
1267 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1268 if (!skb) {
1269 /* Page reuse already handled by bnxt_rx_pages(). */
1270 return NULL;
1271 }
1272 }
1273 skb->protocol = eth_type_trans(skb, bp->dev);
1274
1275 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1276 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1277
8852ddb4
MC
1278 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1279 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5
MC
1280 u16 vlan_proto = tpa_info->metadata >>
1281 RX_CMP_FLAGS2_METADATA_TPID_SFT;
8852ddb4 1282 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
c0c050c5 1283
8852ddb4 1284 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1285 }
1286
1287 skb_checksum_none_assert(skb);
1288 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1289 skb->ip_summed = CHECKSUM_UNNECESSARY;
1290 skb->csum_level =
1291 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1292 }
1293
1294 if (TPA_END_GRO(tpa_end))
309369c9 1295 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1296
1297 return skb;
1298}
1299
1300/* returns the following:
1301 * 1 - 1 packet successfully received
1302 * 0 - successful TPA_START, packet not completed yet
1303 * -EBUSY - completion ring does not have all the agg buffers yet
1304 * -ENOMEM - packet aborted due to out of memory
1305 * -EIO - packet aborted due to hw error indicated in BD
1306 */
1307static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1308 bool *agg_event)
1309{
1310 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
b6ab4b01 1311 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1312 struct net_device *dev = bp->dev;
1313 struct rx_cmp *rxcmp;
1314 struct rx_cmp_ext *rxcmp1;
1315 u32 tmp_raw_cons = *raw_cons;
1316 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1317 struct bnxt_sw_rx_bd *rx_buf;
1318 unsigned int len;
1319 u8 *data, agg_bufs, cmp_type;
1320 dma_addr_t dma_addr;
1321 struct sk_buff *skb;
1322 int rc = 0;
1323
1324 rxcmp = (struct rx_cmp *)
1325 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1326
1327 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1328 cp_cons = RING_CMP(tmp_raw_cons);
1329 rxcmp1 = (struct rx_cmp_ext *)
1330 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1331
1332 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1333 return -EBUSY;
1334
1335 cmp_type = RX_CMP_TYPE(rxcmp);
1336
1337 prod = rxr->rx_prod;
1338
1339 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1340 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1341 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1342
1343 goto next_rx_no_prod;
1344
1345 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1346 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1347 (struct rx_tpa_end_cmp *)rxcmp,
1348 (struct rx_tpa_end_cmp_ext *)rxcmp1,
1349 agg_event);
1350
1351 if (unlikely(IS_ERR(skb)))
1352 return -EBUSY;
1353
1354 rc = -ENOMEM;
1355 if (likely(skb)) {
1356 skb_record_rx_queue(skb, bnapi->index);
b356a2e7 1357 napi_gro_receive(&bnapi->napi, skb);
c0c050c5
MC
1358 rc = 1;
1359 }
1360 goto next_rx_no_prod;
1361 }
1362
1363 cons = rxcmp->rx_cmp_opaque;
1364 rx_buf = &rxr->rx_buf_ring[cons];
1365 data = rx_buf->data;
fa7e2812
MC
1366 if (unlikely(cons != rxr->rx_next_cons)) {
1367 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1368
1369 bnxt_sched_reset(bp, rxr);
1370 return rc1;
1371 }
c0c050c5
MC
1372 prefetch(data);
1373
1374 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1375 RX_CMP_AGG_BUFS_SHIFT;
1376
1377 if (agg_bufs) {
1378 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1379 return -EBUSY;
1380
1381 cp_cons = NEXT_CMP(cp_cons);
1382 *agg_event = true;
1383 }
1384
1385 rx_buf->data = NULL;
1386 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1387 bnxt_reuse_rx_data(rxr, cons, data);
1388 if (agg_bufs)
1389 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1390
1391 rc = -EIO;
1392 goto next_rx;
1393 }
1394
1395 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1396 dma_addr = dma_unmap_addr(rx_buf, mapping);
1397
1398 if (len <= bp->rx_copy_thresh) {
1399 skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1400 bnxt_reuse_rx_data(rxr, cons, data);
1401 if (!skb) {
1402 rc = -ENOMEM;
1403 goto next_rx;
1404 }
1405 } else {
1406 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1407 if (!skb) {
1408 rc = -ENOMEM;
1409 goto next_rx;
1410 }
1411 }
1412
1413 if (agg_bufs) {
1414 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1415 if (!skb) {
1416 rc = -ENOMEM;
1417 goto next_rx;
1418 }
1419 }
1420
1421 if (RX_CMP_HASH_VALID(rxcmp)) {
1422 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1423 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1424
1425 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1426 if (hash_type != 1 && hash_type != 3)
1427 type = PKT_HASH_TYPE_L3;
1428 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1429 }
1430
1431 skb->protocol = eth_type_trans(skb, dev);
1432
8852ddb4
MC
1433 if ((rxcmp1->rx_cmp_flags2 &
1434 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1435 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5 1436 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
8852ddb4 1437 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
c0c050c5
MC
1438 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1439
8852ddb4 1440 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1441 }
1442
1443 skb_checksum_none_assert(skb);
1444 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1445 if (dev->features & NETIF_F_RXCSUM) {
1446 skb->ip_summed = CHECKSUM_UNNECESSARY;
1447 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1448 }
1449 } else {
665e350d
SB
1450 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1451 if (dev->features & NETIF_F_RXCSUM)
1452 cpr->rx_l4_csum_errors++;
1453 }
c0c050c5
MC
1454 }
1455
1456 skb_record_rx_queue(skb, bnapi->index);
b356a2e7 1457 napi_gro_receive(&bnapi->napi, skb);
c0c050c5
MC
1458 rc = 1;
1459
1460next_rx:
1461 rxr->rx_prod = NEXT_RX(prod);
376a5b86 1462 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1463
1464next_rx_no_prod:
1465 *raw_cons = tmp_raw_cons;
1466
1467 return rc;
1468}
1469
4bb13abf 1470#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
1471 ((data) & \
1472 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 1473
c0c050c5
MC
1474static int bnxt_async_event_process(struct bnxt *bp,
1475 struct hwrm_async_event_cmpl *cmpl)
1476{
1477 u16 event_id = le16_to_cpu(cmpl->event_id);
1478
1479 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1480 switch (event_id) {
87c374de 1481 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
1482 u32 data1 = le32_to_cpu(cmpl->event_data1);
1483 struct bnxt_link_info *link_info = &bp->link_info;
1484
1485 if (BNXT_VF(bp))
1486 goto async_event_process_exit;
1487 if (data1 & 0x20000) {
1488 u16 fw_speed = link_info->force_link_speed;
1489 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1490
1491 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1492 speed);
1493 }
286ef9d6 1494 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117
MC
1495 /* fall thru */
1496 }
87c374de 1497 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 1498 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 1499 break;
87c374de 1500 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 1501 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 1502 break;
87c374de 1503 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
1504 u32 data1 = le32_to_cpu(cmpl->event_data1);
1505 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1506
1507 if (BNXT_VF(bp))
1508 break;
1509
1510 if (bp->pf.port_id != port_id)
1511 break;
1512
4bb13abf
MC
1513 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1514 break;
1515 }
87c374de 1516 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
1517 if (BNXT_PF(bp))
1518 goto async_event_process_exit;
1519 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1520 break;
c0c050c5 1521 default:
19241368 1522 goto async_event_process_exit;
c0c050c5 1523 }
19241368
JH
1524 schedule_work(&bp->sp_task);
1525async_event_process_exit:
a588e458 1526 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
1527 return 0;
1528}
1529
1530static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1531{
1532 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1533 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1534 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1535 (struct hwrm_fwd_req_cmpl *)txcmp;
1536
1537 switch (cmpl_type) {
1538 case CMPL_BASE_TYPE_HWRM_DONE:
1539 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1540 if (seq_id == bp->hwrm_intr_seq_id)
1541 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1542 else
1543 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1544 break;
1545
1546 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1547 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1548
1549 if ((vf_id < bp->pf.first_vf_id) ||
1550 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1551 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1552 vf_id);
1553 return -EINVAL;
1554 }
1555
1556 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1557 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1558 schedule_work(&bp->sp_task);
1559 break;
1560
1561 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1562 bnxt_async_event_process(bp,
1563 (struct hwrm_async_event_cmpl *)txcmp);
1564
1565 default:
1566 break;
1567 }
1568
1569 return 0;
1570}
1571
1572static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1573{
1574 struct bnxt_napi *bnapi = dev_instance;
1575 struct bnxt *bp = bnapi->bp;
1576 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1577 u32 cons = RING_CMP(cpr->cp_raw_cons);
1578
1579 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1580 napi_schedule(&bnapi->napi);
1581 return IRQ_HANDLED;
1582}
1583
1584static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1585{
1586 u32 raw_cons = cpr->cp_raw_cons;
1587 u16 cons = RING_CMP(raw_cons);
1588 struct tx_cmp *txcmp;
1589
1590 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1591
1592 return TX_CMP_VALID(txcmp, raw_cons);
1593}
1594
c0c050c5
MC
1595static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1596{
1597 struct bnxt_napi *bnapi = dev_instance;
1598 struct bnxt *bp = bnapi->bp;
1599 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1600 u32 cons = RING_CMP(cpr->cp_raw_cons);
1601 u32 int_status;
1602
1603 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1604
1605 if (!bnxt_has_work(bp, cpr)) {
11809490 1606 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
1607 /* return if erroneous interrupt */
1608 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1609 return IRQ_NONE;
1610 }
1611
1612 /* disable ring IRQ */
1613 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1614
1615 /* Return here if interrupt is shared and is disabled. */
1616 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1617 return IRQ_HANDLED;
1618
1619 napi_schedule(&bnapi->napi);
1620 return IRQ_HANDLED;
1621}
1622
1623static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1624{
1625 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1626 u32 raw_cons = cpr->cp_raw_cons;
1627 u32 cons;
1628 int tx_pkts = 0;
1629 int rx_pkts = 0;
1630 bool rx_event = false;
1631 bool agg_event = false;
1632 struct tx_cmp *txcmp;
1633
1634 while (1) {
1635 int rc;
1636
1637 cons = RING_CMP(raw_cons);
1638 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1639
1640 if (!TX_CMP_VALID(txcmp, raw_cons))
1641 break;
1642
67a95e20
MC
1643 /* The valid test of the entry must be done first before
1644 * reading any further.
1645 */
b67daab0 1646 dma_rmb();
c0c050c5
MC
1647 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1648 tx_pkts++;
1649 /* return full budget so NAPI will complete. */
1650 if (unlikely(tx_pkts > bp->tx_wake_thresh))
1651 rx_pkts = budget;
1652 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1653 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1654 if (likely(rc >= 0))
1655 rx_pkts += rc;
1656 else if (rc == -EBUSY) /* partial completion */
1657 break;
1658 rx_event = true;
1659 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1660 CMPL_BASE_TYPE_HWRM_DONE) ||
1661 (TX_CMP_TYPE(txcmp) ==
1662 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1663 (TX_CMP_TYPE(txcmp) ==
1664 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1665 bnxt_hwrm_handler(bp, txcmp);
1666 }
1667 raw_cons = NEXT_RAW_CMP(raw_cons);
1668
1669 if (rx_pkts == budget)
1670 break;
1671 }
1672
1673 cpr->cp_raw_cons = raw_cons;
1674 /* ACK completion ring before freeing tx ring and producing new
1675 * buffers in rx/agg rings to prevent overflowing the completion
1676 * ring.
1677 */
1678 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1679
1680 if (tx_pkts)
1681 bnxt_tx_int(bp, bnapi, tx_pkts);
1682
1683 if (rx_event) {
b6ab4b01 1684 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1685
1686 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1687 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1688 if (agg_event) {
1689 writel(DB_KEY_RX | rxr->rx_agg_prod,
1690 rxr->rx_agg_doorbell);
1691 writel(DB_KEY_RX | rxr->rx_agg_prod,
1692 rxr->rx_agg_doorbell);
1693 }
1694 }
1695 return rx_pkts;
1696}
1697
10bbdaf5
PS
1698static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1699{
1700 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1701 struct bnxt *bp = bnapi->bp;
1702 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1703 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1704 struct tx_cmp *txcmp;
1705 struct rx_cmp_ext *rxcmp1;
1706 u32 cp_cons, tmp_raw_cons;
1707 u32 raw_cons = cpr->cp_raw_cons;
1708 u32 rx_pkts = 0;
1709 bool agg_event = false;
1710
1711 while (1) {
1712 int rc;
1713
1714 cp_cons = RING_CMP(raw_cons);
1715 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1716
1717 if (!TX_CMP_VALID(txcmp, raw_cons))
1718 break;
1719
1720 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1721 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1722 cp_cons = RING_CMP(tmp_raw_cons);
1723 rxcmp1 = (struct rx_cmp_ext *)
1724 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1725
1726 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1727 break;
1728
1729 /* force an error to recycle the buffer */
1730 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1731 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1732
1733 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1734 if (likely(rc == -EIO))
1735 rx_pkts++;
1736 else if (rc == -EBUSY) /* partial completion */
1737 break;
1738 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
1739 CMPL_BASE_TYPE_HWRM_DONE)) {
1740 bnxt_hwrm_handler(bp, txcmp);
1741 } else {
1742 netdev_err(bp->dev,
1743 "Invalid completion received on special ring\n");
1744 }
1745 raw_cons = NEXT_RAW_CMP(raw_cons);
1746
1747 if (rx_pkts == budget)
1748 break;
1749 }
1750
1751 cpr->cp_raw_cons = raw_cons;
1752 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1753 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1754 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1755
1756 if (agg_event) {
1757 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
1758 writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
1759 }
1760
1761 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
1762 napi_complete(napi);
1763 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1764 }
1765 return rx_pkts;
1766}
1767
c0c050c5
MC
1768static int bnxt_poll(struct napi_struct *napi, int budget)
1769{
1770 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1771 struct bnxt *bp = bnapi->bp;
1772 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1773 int work_done = 0;
1774
c0c050c5
MC
1775 while (1) {
1776 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1777
1778 if (work_done >= budget)
1779 break;
1780
1781 if (!bnxt_has_work(bp, cpr)) {
e7b95691
MC
1782 if (napi_complete_done(napi, work_done))
1783 BNXT_CP_DB_REARM(cpr->cp_doorbell,
1784 cpr->cp_raw_cons);
c0c050c5
MC
1785 break;
1786 }
1787 }
1788 mmiowb();
c0c050c5
MC
1789 return work_done;
1790}
1791
c0c050c5
MC
1792static void bnxt_free_tx_skbs(struct bnxt *bp)
1793{
1794 int i, max_idx;
1795 struct pci_dev *pdev = bp->pdev;
1796
b6ab4b01 1797 if (!bp->tx_ring)
c0c050c5
MC
1798 return;
1799
1800 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1801 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 1802 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
1803 int j;
1804
c0c050c5
MC
1805 for (j = 0; j < max_idx;) {
1806 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1807 struct sk_buff *skb = tx_buf->skb;
1808 int k, last;
1809
1810 if (!skb) {
1811 j++;
1812 continue;
1813 }
1814
1815 tx_buf->skb = NULL;
1816
1817 if (tx_buf->is_push) {
1818 dev_kfree_skb(skb);
1819 j += 2;
1820 continue;
1821 }
1822
1823 dma_unmap_single(&pdev->dev,
1824 dma_unmap_addr(tx_buf, mapping),
1825 skb_headlen(skb),
1826 PCI_DMA_TODEVICE);
1827
1828 last = tx_buf->nr_frags;
1829 j += 2;
d612a579
MC
1830 for (k = 0; k < last; k++, j++) {
1831 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
1832 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1833
d612a579 1834 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
1835 dma_unmap_page(
1836 &pdev->dev,
1837 dma_unmap_addr(tx_buf, mapping),
1838 skb_frag_size(frag), PCI_DMA_TODEVICE);
1839 }
1840 dev_kfree_skb(skb);
1841 }
1842 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1843 }
1844}
1845
1846static void bnxt_free_rx_skbs(struct bnxt *bp)
1847{
1848 int i, max_idx, max_agg_idx;
1849 struct pci_dev *pdev = bp->pdev;
1850
b6ab4b01 1851 if (!bp->rx_ring)
c0c050c5
MC
1852 return;
1853
1854 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1855 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1856 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 1857 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
1858 int j;
1859
c0c050c5
MC
1860 if (rxr->rx_tpa) {
1861 for (j = 0; j < MAX_TPA; j++) {
1862 struct bnxt_tpa_info *tpa_info =
1863 &rxr->rx_tpa[j];
1864 u8 *data = tpa_info->data;
1865
1866 if (!data)
1867 continue;
1868
1869 dma_unmap_single(
1870 &pdev->dev,
1871 dma_unmap_addr(tpa_info, mapping),
1872 bp->rx_buf_use_size,
1873 PCI_DMA_FROMDEVICE);
1874
1875 tpa_info->data = NULL;
1876
1877 kfree(data);
1878 }
1879 }
1880
1881 for (j = 0; j < max_idx; j++) {
1882 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1883 u8 *data = rx_buf->data;
1884
1885 if (!data)
1886 continue;
1887
1888 dma_unmap_single(&pdev->dev,
1889 dma_unmap_addr(rx_buf, mapping),
1890 bp->rx_buf_use_size,
1891 PCI_DMA_FROMDEVICE);
1892
1893 rx_buf->data = NULL;
1894
1895 kfree(data);
1896 }
1897
1898 for (j = 0; j < max_agg_idx; j++) {
1899 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1900 &rxr->rx_agg_ring[j];
1901 struct page *page = rx_agg_buf->page;
1902
1903 if (!page)
1904 continue;
1905
1906 dma_unmap_page(&pdev->dev,
1907 dma_unmap_addr(rx_agg_buf, mapping),
2839f28b 1908 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
c0c050c5
MC
1909
1910 rx_agg_buf->page = NULL;
1911 __clear_bit(j, rxr->rx_agg_bmap);
1912
1913 __free_page(page);
1914 }
89d0a06c
MC
1915 if (rxr->rx_page) {
1916 __free_page(rxr->rx_page);
1917 rxr->rx_page = NULL;
1918 }
c0c050c5
MC
1919 }
1920}
1921
1922static void bnxt_free_skbs(struct bnxt *bp)
1923{
1924 bnxt_free_tx_skbs(bp);
1925 bnxt_free_rx_skbs(bp);
1926}
1927
1928static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1929{
1930 struct pci_dev *pdev = bp->pdev;
1931 int i;
1932
1933 for (i = 0; i < ring->nr_pages; i++) {
1934 if (!ring->pg_arr[i])
1935 continue;
1936
1937 dma_free_coherent(&pdev->dev, ring->page_size,
1938 ring->pg_arr[i], ring->dma_arr[i]);
1939
1940 ring->pg_arr[i] = NULL;
1941 }
1942 if (ring->pg_tbl) {
1943 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
1944 ring->pg_tbl, ring->pg_tbl_map);
1945 ring->pg_tbl = NULL;
1946 }
1947 if (ring->vmem_size && *ring->vmem) {
1948 vfree(*ring->vmem);
1949 *ring->vmem = NULL;
1950 }
1951}
1952
1953static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1954{
1955 int i;
1956 struct pci_dev *pdev = bp->pdev;
1957
1958 if (ring->nr_pages > 1) {
1959 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
1960 ring->nr_pages * 8,
1961 &ring->pg_tbl_map,
1962 GFP_KERNEL);
1963 if (!ring->pg_tbl)
1964 return -ENOMEM;
1965 }
1966
1967 for (i = 0; i < ring->nr_pages; i++) {
1968 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1969 ring->page_size,
1970 &ring->dma_arr[i],
1971 GFP_KERNEL);
1972 if (!ring->pg_arr[i])
1973 return -ENOMEM;
1974
1975 if (ring->nr_pages > 1)
1976 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
1977 }
1978
1979 if (ring->vmem_size) {
1980 *ring->vmem = vzalloc(ring->vmem_size);
1981 if (!(*ring->vmem))
1982 return -ENOMEM;
1983 }
1984 return 0;
1985}
1986
1987static void bnxt_free_rx_rings(struct bnxt *bp)
1988{
1989 int i;
1990
b6ab4b01 1991 if (!bp->rx_ring)
c0c050c5
MC
1992 return;
1993
1994 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 1995 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
1996 struct bnxt_ring_struct *ring;
1997
c0c050c5
MC
1998 kfree(rxr->rx_tpa);
1999 rxr->rx_tpa = NULL;
2000
2001 kfree(rxr->rx_agg_bmap);
2002 rxr->rx_agg_bmap = NULL;
2003
2004 ring = &rxr->rx_ring_struct;
2005 bnxt_free_ring(bp, ring);
2006
2007 ring = &rxr->rx_agg_ring_struct;
2008 bnxt_free_ring(bp, ring);
2009 }
2010}
2011
2012static int bnxt_alloc_rx_rings(struct bnxt *bp)
2013{
2014 int i, rc, agg_rings = 0, tpa_rings = 0;
2015
b6ab4b01
MC
2016 if (!bp->rx_ring)
2017 return -ENOMEM;
2018
c0c050c5
MC
2019 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2020 agg_rings = 1;
2021
2022 if (bp->flags & BNXT_FLAG_TPA)
2023 tpa_rings = 1;
2024
2025 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2026 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2027 struct bnxt_ring_struct *ring;
2028
c0c050c5
MC
2029 ring = &rxr->rx_ring_struct;
2030
2031 rc = bnxt_alloc_ring(bp, ring);
2032 if (rc)
2033 return rc;
2034
2035 if (agg_rings) {
2036 u16 mem_size;
2037
2038 ring = &rxr->rx_agg_ring_struct;
2039 rc = bnxt_alloc_ring(bp, ring);
2040 if (rc)
2041 return rc;
2042
2043 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2044 mem_size = rxr->rx_agg_bmap_size / 8;
2045 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2046 if (!rxr->rx_agg_bmap)
2047 return -ENOMEM;
2048
2049 if (tpa_rings) {
2050 rxr->rx_tpa = kcalloc(MAX_TPA,
2051 sizeof(struct bnxt_tpa_info),
2052 GFP_KERNEL);
2053 if (!rxr->rx_tpa)
2054 return -ENOMEM;
2055 }
2056 }
2057 }
2058 return 0;
2059}
2060
2061static void bnxt_free_tx_rings(struct bnxt *bp)
2062{
2063 int i;
2064 struct pci_dev *pdev = bp->pdev;
2065
b6ab4b01 2066 if (!bp->tx_ring)
c0c050c5
MC
2067 return;
2068
2069 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2070 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2071 struct bnxt_ring_struct *ring;
2072
c0c050c5
MC
2073 if (txr->tx_push) {
2074 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2075 txr->tx_push, txr->tx_push_mapping);
2076 txr->tx_push = NULL;
2077 }
2078
2079 ring = &txr->tx_ring_struct;
2080
2081 bnxt_free_ring(bp, ring);
2082 }
2083}
2084
2085static int bnxt_alloc_tx_rings(struct bnxt *bp)
2086{
2087 int i, j, rc;
2088 struct pci_dev *pdev = bp->pdev;
2089
2090 bp->tx_push_size = 0;
2091 if (bp->tx_push_thresh) {
2092 int push_size;
2093
2094 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2095 bp->tx_push_thresh);
2096
4419dbe6 2097 if (push_size > 256) {
c0c050c5
MC
2098 push_size = 0;
2099 bp->tx_push_thresh = 0;
2100 }
2101
2102 bp->tx_push_size = push_size;
2103 }
2104
2105 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2106 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2107 struct bnxt_ring_struct *ring;
2108
c0c050c5
MC
2109 ring = &txr->tx_ring_struct;
2110
2111 rc = bnxt_alloc_ring(bp, ring);
2112 if (rc)
2113 return rc;
2114
2115 if (bp->tx_push_size) {
c0c050c5
MC
2116 dma_addr_t mapping;
2117
2118 /* One pre-allocated DMA buffer to backup
2119 * TX push operation
2120 */
2121 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2122 bp->tx_push_size,
2123 &txr->tx_push_mapping,
2124 GFP_KERNEL);
2125
2126 if (!txr->tx_push)
2127 return -ENOMEM;
2128
c0c050c5
MC
2129 mapping = txr->tx_push_mapping +
2130 sizeof(struct tx_push_bd);
4419dbe6 2131 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 2132
4419dbe6 2133 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
c0c050c5
MC
2134 }
2135 ring->queue_id = bp->q_info[j].queue_id;
2136 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2137 j++;
2138 }
2139 return 0;
2140}
2141
2142static void bnxt_free_cp_rings(struct bnxt *bp)
2143{
2144 int i;
2145
2146 if (!bp->bnapi)
2147 return;
2148
2149 for (i = 0; i < bp->cp_nr_rings; i++) {
2150 struct bnxt_napi *bnapi = bp->bnapi[i];
2151 struct bnxt_cp_ring_info *cpr;
2152 struct bnxt_ring_struct *ring;
2153
2154 if (!bnapi)
2155 continue;
2156
2157 cpr = &bnapi->cp_ring;
2158 ring = &cpr->cp_ring_struct;
2159
2160 bnxt_free_ring(bp, ring);
2161 }
2162}
2163
2164static int bnxt_alloc_cp_rings(struct bnxt *bp)
2165{
2166 int i, rc;
2167
2168 for (i = 0; i < bp->cp_nr_rings; i++) {
2169 struct bnxt_napi *bnapi = bp->bnapi[i];
2170 struct bnxt_cp_ring_info *cpr;
2171 struct bnxt_ring_struct *ring;
2172
2173 if (!bnapi)
2174 continue;
2175
2176 cpr = &bnapi->cp_ring;
2177 ring = &cpr->cp_ring_struct;
2178
2179 rc = bnxt_alloc_ring(bp, ring);
2180 if (rc)
2181 return rc;
2182 }
2183 return 0;
2184}
2185
2186static void bnxt_init_ring_struct(struct bnxt *bp)
2187{
2188 int i;
2189
2190 for (i = 0; i < bp->cp_nr_rings; i++) {
2191 struct bnxt_napi *bnapi = bp->bnapi[i];
2192 struct bnxt_cp_ring_info *cpr;
2193 struct bnxt_rx_ring_info *rxr;
2194 struct bnxt_tx_ring_info *txr;
2195 struct bnxt_ring_struct *ring;
2196
2197 if (!bnapi)
2198 continue;
2199
2200 cpr = &bnapi->cp_ring;
2201 ring = &cpr->cp_ring_struct;
2202 ring->nr_pages = bp->cp_nr_pages;
2203 ring->page_size = HW_CMPD_RING_SIZE;
2204 ring->pg_arr = (void **)cpr->cp_desc_ring;
2205 ring->dma_arr = cpr->cp_desc_mapping;
2206 ring->vmem_size = 0;
2207
b6ab4b01 2208 rxr = bnapi->rx_ring;
3b2b7d9d
MC
2209 if (!rxr)
2210 goto skip_rx;
2211
c0c050c5
MC
2212 ring = &rxr->rx_ring_struct;
2213 ring->nr_pages = bp->rx_nr_pages;
2214 ring->page_size = HW_RXBD_RING_SIZE;
2215 ring->pg_arr = (void **)rxr->rx_desc_ring;
2216 ring->dma_arr = rxr->rx_desc_mapping;
2217 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2218 ring->vmem = (void **)&rxr->rx_buf_ring;
2219
2220 ring = &rxr->rx_agg_ring_struct;
2221 ring->nr_pages = bp->rx_agg_nr_pages;
2222 ring->page_size = HW_RXBD_RING_SIZE;
2223 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2224 ring->dma_arr = rxr->rx_agg_desc_mapping;
2225 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2226 ring->vmem = (void **)&rxr->rx_agg_ring;
2227
3b2b7d9d 2228skip_rx:
b6ab4b01 2229 txr = bnapi->tx_ring;
3b2b7d9d
MC
2230 if (!txr)
2231 continue;
2232
c0c050c5
MC
2233 ring = &txr->tx_ring_struct;
2234 ring->nr_pages = bp->tx_nr_pages;
2235 ring->page_size = HW_RXBD_RING_SIZE;
2236 ring->pg_arr = (void **)txr->tx_desc_ring;
2237 ring->dma_arr = txr->tx_desc_mapping;
2238 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2239 ring->vmem = (void **)&txr->tx_buf_ring;
2240 }
2241}
2242
2243static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2244{
2245 int i;
2246 u32 prod;
2247 struct rx_bd **rx_buf_ring;
2248
2249 rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2250 for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2251 int j;
2252 struct rx_bd *rxbd;
2253
2254 rxbd = rx_buf_ring[i];
2255 if (!rxbd)
2256 continue;
2257
2258 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2259 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2260 rxbd->rx_bd_opaque = prod;
2261 }
2262 }
2263}
2264
2265static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2266{
2267 struct net_device *dev = bp->dev;
c0c050c5
MC
2268 struct bnxt_rx_ring_info *rxr;
2269 struct bnxt_ring_struct *ring;
2270 u32 prod, type;
2271 int i;
2272
c0c050c5
MC
2273 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2274 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2275
2276 if (NET_IP_ALIGN == 2)
2277 type |= RX_BD_FLAGS_SOP;
2278
b6ab4b01 2279 rxr = &bp->rx_ring[ring_nr];
c0c050c5
MC
2280 ring = &rxr->rx_ring_struct;
2281 bnxt_init_rxbd_pages(ring, type);
2282
2283 prod = rxr->rx_prod;
2284 for (i = 0; i < bp->rx_ring_size; i++) {
2285 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2286 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2287 ring_nr, i, bp->rx_ring_size);
2288 break;
2289 }
2290 prod = NEXT_RX(prod);
2291 }
2292 rxr->rx_prod = prod;
2293 ring->fw_ring_id = INVALID_HW_RING_ID;
2294
edd0c2cc
MC
2295 ring = &rxr->rx_agg_ring_struct;
2296 ring->fw_ring_id = INVALID_HW_RING_ID;
2297
c0c050c5
MC
2298 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2299 return 0;
2300
2839f28b 2301 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
c0c050c5
MC
2302 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2303
2304 bnxt_init_rxbd_pages(ring, type);
2305
2306 prod = rxr->rx_agg_prod;
2307 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2308 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2309 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2310 ring_nr, i, bp->rx_ring_size);
2311 break;
2312 }
2313 prod = NEXT_RX_AGG(prod);
2314 }
2315 rxr->rx_agg_prod = prod;
c0c050c5
MC
2316
2317 if (bp->flags & BNXT_FLAG_TPA) {
2318 if (rxr->rx_tpa) {
2319 u8 *data;
2320 dma_addr_t mapping;
2321
2322 for (i = 0; i < MAX_TPA; i++) {
2323 data = __bnxt_alloc_rx_data(bp, &mapping,
2324 GFP_KERNEL);
2325 if (!data)
2326 return -ENOMEM;
2327
2328 rxr->rx_tpa[i].data = data;
2329 rxr->rx_tpa[i].mapping = mapping;
2330 }
2331 } else {
2332 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2333 return -ENOMEM;
2334 }
2335 }
2336
2337 return 0;
2338}
2339
2340static int bnxt_init_rx_rings(struct bnxt *bp)
2341{
2342 int i, rc = 0;
2343
2344 for (i = 0; i < bp->rx_nr_rings; i++) {
2345 rc = bnxt_init_one_rx_ring(bp, i);
2346 if (rc)
2347 break;
2348 }
2349
2350 return rc;
2351}
2352
2353static int bnxt_init_tx_rings(struct bnxt *bp)
2354{
2355 u16 i;
2356
2357 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2358 MAX_SKB_FRAGS + 1);
2359
2360 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2361 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2362 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2363
2364 ring->fw_ring_id = INVALID_HW_RING_ID;
2365 }
2366
2367 return 0;
2368}
2369
2370static void bnxt_free_ring_grps(struct bnxt *bp)
2371{
2372 kfree(bp->grp_info);
2373 bp->grp_info = NULL;
2374}
2375
2376static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2377{
2378 int i;
2379
2380 if (irq_re_init) {
2381 bp->grp_info = kcalloc(bp->cp_nr_rings,
2382 sizeof(struct bnxt_ring_grp_info),
2383 GFP_KERNEL);
2384 if (!bp->grp_info)
2385 return -ENOMEM;
2386 }
2387 for (i = 0; i < bp->cp_nr_rings; i++) {
2388 if (irq_re_init)
2389 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2390 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2391 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2392 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2393 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2394 }
2395 return 0;
2396}
2397
2398static void bnxt_free_vnics(struct bnxt *bp)
2399{
2400 kfree(bp->vnic_info);
2401 bp->vnic_info = NULL;
2402 bp->nr_vnics = 0;
2403}
2404
2405static int bnxt_alloc_vnics(struct bnxt *bp)
2406{
2407 int num_vnics = 1;
2408
2409#ifdef CONFIG_RFS_ACCEL
2410 if (bp->flags & BNXT_FLAG_RFS)
2411 num_vnics += bp->rx_nr_rings;
2412#endif
2413
dc52c6c7
PS
2414 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2415 num_vnics++;
2416
c0c050c5
MC
2417 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2418 GFP_KERNEL);
2419 if (!bp->vnic_info)
2420 return -ENOMEM;
2421
2422 bp->nr_vnics = num_vnics;
2423 return 0;
2424}
2425
2426static void bnxt_init_vnics(struct bnxt *bp)
2427{
2428 int i;
2429
2430 for (i = 0; i < bp->nr_vnics; i++) {
2431 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2432
2433 vnic->fw_vnic_id = INVALID_HW_RING_ID;
94ce9caa
PS
2434 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2435 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
c0c050c5
MC
2436 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2437
2438 if (bp->vnic_info[i].rss_hash_key) {
2439 if (i == 0)
2440 prandom_bytes(vnic->rss_hash_key,
2441 HW_HASH_KEY_SIZE);
2442 else
2443 memcpy(vnic->rss_hash_key,
2444 bp->vnic_info[0].rss_hash_key,
2445 HW_HASH_KEY_SIZE);
2446 }
2447 }
2448}
2449
2450static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2451{
2452 int pages;
2453
2454 pages = ring_size / desc_per_pg;
2455
2456 if (!pages)
2457 return 1;
2458
2459 pages++;
2460
2461 while (pages & (pages - 1))
2462 pages++;
2463
2464 return pages;
2465}
2466
2467static void bnxt_set_tpa_flags(struct bnxt *bp)
2468{
2469 bp->flags &= ~BNXT_FLAG_TPA;
2470 if (bp->dev->features & NETIF_F_LRO)
2471 bp->flags |= BNXT_FLAG_LRO;
94758f8d 2472 if (bp->dev->features & NETIF_F_GRO)
c0c050c5
MC
2473 bp->flags |= BNXT_FLAG_GRO;
2474}
2475
2476/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2477 * be set on entry.
2478 */
2479void bnxt_set_ring_params(struct bnxt *bp)
2480{
2481 u32 ring_size, rx_size, rx_space;
2482 u32 agg_factor = 0, agg_ring_size = 0;
2483
2484 /* 8 for CRC and VLAN */
2485 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2486
2487 rx_space = rx_size + NET_SKB_PAD +
2488 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2489
2490 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2491 ring_size = bp->rx_ring_size;
2492 bp->rx_agg_ring_size = 0;
2493 bp->rx_agg_nr_pages = 0;
2494
2495 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 2496 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
2497
2498 bp->flags &= ~BNXT_FLAG_JUMBO;
2499 if (rx_space > PAGE_SIZE) {
2500 u32 jumbo_factor;
2501
2502 bp->flags |= BNXT_FLAG_JUMBO;
2503 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2504 if (jumbo_factor > agg_factor)
2505 agg_factor = jumbo_factor;
2506 }
2507 agg_ring_size = ring_size * agg_factor;
2508
2509 if (agg_ring_size) {
2510 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2511 RX_DESC_CNT);
2512 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2513 u32 tmp = agg_ring_size;
2514
2515 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2516 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2517 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2518 tmp, agg_ring_size);
2519 }
2520 bp->rx_agg_ring_size = agg_ring_size;
2521 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2522 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2523 rx_space = rx_size + NET_SKB_PAD +
2524 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2525 }
2526
2527 bp->rx_buf_use_size = rx_size;
2528 bp->rx_buf_size = rx_space;
2529
2530 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2531 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2532
2533 ring_size = bp->tx_ring_size;
2534 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2535 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2536
2537 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2538 bp->cp_ring_size = ring_size;
2539
2540 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2541 if (bp->cp_nr_pages > MAX_CP_PAGES) {
2542 bp->cp_nr_pages = MAX_CP_PAGES;
2543 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2544 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2545 ring_size, bp->cp_ring_size);
2546 }
2547 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2548 bp->cp_ring_mask = bp->cp_bit - 1;
2549}
2550
2551static void bnxt_free_vnic_attributes(struct bnxt *bp)
2552{
2553 int i;
2554 struct bnxt_vnic_info *vnic;
2555 struct pci_dev *pdev = bp->pdev;
2556
2557 if (!bp->vnic_info)
2558 return;
2559
2560 for (i = 0; i < bp->nr_vnics; i++) {
2561 vnic = &bp->vnic_info[i];
2562
2563 kfree(vnic->fw_grp_ids);
2564 vnic->fw_grp_ids = NULL;
2565
2566 kfree(vnic->uc_list);
2567 vnic->uc_list = NULL;
2568
2569 if (vnic->mc_list) {
2570 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2571 vnic->mc_list, vnic->mc_list_mapping);
2572 vnic->mc_list = NULL;
2573 }
2574
2575 if (vnic->rss_table) {
2576 dma_free_coherent(&pdev->dev, PAGE_SIZE,
2577 vnic->rss_table,
2578 vnic->rss_table_dma_addr);
2579 vnic->rss_table = NULL;
2580 }
2581
2582 vnic->rss_hash_key = NULL;
2583 vnic->flags = 0;
2584 }
2585}
2586
2587static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2588{
2589 int i, rc = 0, size;
2590 struct bnxt_vnic_info *vnic;
2591 struct pci_dev *pdev = bp->pdev;
2592 int max_rings;
2593
2594 for (i = 0; i < bp->nr_vnics; i++) {
2595 vnic = &bp->vnic_info[i];
2596
2597 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2598 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2599
2600 if (mem_size > 0) {
2601 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2602 if (!vnic->uc_list) {
2603 rc = -ENOMEM;
2604 goto out;
2605 }
2606 }
2607 }
2608
2609 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2610 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2611 vnic->mc_list =
2612 dma_alloc_coherent(&pdev->dev,
2613 vnic->mc_list_size,
2614 &vnic->mc_list_mapping,
2615 GFP_KERNEL);
2616 if (!vnic->mc_list) {
2617 rc = -ENOMEM;
2618 goto out;
2619 }
2620 }
2621
2622 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2623 max_rings = bp->rx_nr_rings;
2624 else
2625 max_rings = 1;
2626
2627 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2628 if (!vnic->fw_grp_ids) {
2629 rc = -ENOMEM;
2630 goto out;
2631 }
2632
ae10ae74
MC
2633 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
2634 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
2635 continue;
2636
c0c050c5
MC
2637 /* Allocate rss table and hash key */
2638 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2639 &vnic->rss_table_dma_addr,
2640 GFP_KERNEL);
2641 if (!vnic->rss_table) {
2642 rc = -ENOMEM;
2643 goto out;
2644 }
2645
2646 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2647
2648 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2649 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2650 }
2651 return 0;
2652
2653out:
2654 return rc;
2655}
2656
2657static void bnxt_free_hwrm_resources(struct bnxt *bp)
2658{
2659 struct pci_dev *pdev = bp->pdev;
2660
2661 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2662 bp->hwrm_cmd_resp_dma_addr);
2663
2664 bp->hwrm_cmd_resp_addr = NULL;
2665 if (bp->hwrm_dbg_resp_addr) {
2666 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2667 bp->hwrm_dbg_resp_addr,
2668 bp->hwrm_dbg_resp_dma_addr);
2669
2670 bp->hwrm_dbg_resp_addr = NULL;
2671 }
2672}
2673
2674static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2675{
2676 struct pci_dev *pdev = bp->pdev;
2677
2678 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2679 &bp->hwrm_cmd_resp_dma_addr,
2680 GFP_KERNEL);
2681 if (!bp->hwrm_cmd_resp_addr)
2682 return -ENOMEM;
2683 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2684 HWRM_DBG_REG_BUF_SIZE,
2685 &bp->hwrm_dbg_resp_dma_addr,
2686 GFP_KERNEL);
2687 if (!bp->hwrm_dbg_resp_addr)
2688 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2689
2690 return 0;
2691}
2692
2693static void bnxt_free_stats(struct bnxt *bp)
2694{
2695 u32 size, i;
2696 struct pci_dev *pdev = bp->pdev;
2697
3bdf56c4
MC
2698 if (bp->hw_rx_port_stats) {
2699 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
2700 bp->hw_rx_port_stats,
2701 bp->hw_rx_port_stats_map);
2702 bp->hw_rx_port_stats = NULL;
2703 bp->flags &= ~BNXT_FLAG_PORT_STATS;
2704 }
2705
c0c050c5
MC
2706 if (!bp->bnapi)
2707 return;
2708
2709 size = sizeof(struct ctx_hw_stats);
2710
2711 for (i = 0; i < bp->cp_nr_rings; i++) {
2712 struct bnxt_napi *bnapi = bp->bnapi[i];
2713 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2714
2715 if (cpr->hw_stats) {
2716 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2717 cpr->hw_stats_map);
2718 cpr->hw_stats = NULL;
2719 }
2720 }
2721}
2722
2723static int bnxt_alloc_stats(struct bnxt *bp)
2724{
2725 u32 size, i;
2726 struct pci_dev *pdev = bp->pdev;
2727
2728 size = sizeof(struct ctx_hw_stats);
2729
2730 for (i = 0; i < bp->cp_nr_rings; i++) {
2731 struct bnxt_napi *bnapi = bp->bnapi[i];
2732 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2733
2734 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2735 &cpr->hw_stats_map,
2736 GFP_KERNEL);
2737 if (!cpr->hw_stats)
2738 return -ENOMEM;
2739
2740 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2741 }
3bdf56c4 2742
3e8060fa 2743 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
3bdf56c4
MC
2744 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
2745 sizeof(struct tx_port_stats) + 1024;
2746
2747 bp->hw_rx_port_stats =
2748 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
2749 &bp->hw_rx_port_stats_map,
2750 GFP_KERNEL);
2751 if (!bp->hw_rx_port_stats)
2752 return -ENOMEM;
2753
2754 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
2755 512;
2756 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
2757 sizeof(struct rx_port_stats) + 512;
2758 bp->flags |= BNXT_FLAG_PORT_STATS;
2759 }
c0c050c5
MC
2760 return 0;
2761}
2762
2763static void bnxt_clear_ring_indices(struct bnxt *bp)
2764{
2765 int i;
2766
2767 if (!bp->bnapi)
2768 return;
2769
2770 for (i = 0; i < bp->cp_nr_rings; i++) {
2771 struct bnxt_napi *bnapi = bp->bnapi[i];
2772 struct bnxt_cp_ring_info *cpr;
2773 struct bnxt_rx_ring_info *rxr;
2774 struct bnxt_tx_ring_info *txr;
2775
2776 if (!bnapi)
2777 continue;
2778
2779 cpr = &bnapi->cp_ring;
2780 cpr->cp_raw_cons = 0;
2781
b6ab4b01 2782 txr = bnapi->tx_ring;
3b2b7d9d
MC
2783 if (txr) {
2784 txr->tx_prod = 0;
2785 txr->tx_cons = 0;
2786 }
c0c050c5 2787
b6ab4b01 2788 rxr = bnapi->rx_ring;
3b2b7d9d
MC
2789 if (rxr) {
2790 rxr->rx_prod = 0;
2791 rxr->rx_agg_prod = 0;
2792 rxr->rx_sw_agg_prod = 0;
376a5b86 2793 rxr->rx_next_cons = 0;
3b2b7d9d 2794 }
c0c050c5
MC
2795 }
2796}
2797
2798static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2799{
2800#ifdef CONFIG_RFS_ACCEL
2801 int i;
2802
2803 /* Under rtnl_lock and all our NAPIs have been disabled. It's
2804 * safe to delete the hash table.
2805 */
2806 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2807 struct hlist_head *head;
2808 struct hlist_node *tmp;
2809 struct bnxt_ntuple_filter *fltr;
2810
2811 head = &bp->ntp_fltr_hash_tbl[i];
2812 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2813 hlist_del(&fltr->hash);
2814 kfree(fltr);
2815 }
2816 }
2817 if (irq_reinit) {
2818 kfree(bp->ntp_fltr_bmap);
2819 bp->ntp_fltr_bmap = NULL;
2820 }
2821 bp->ntp_fltr_count = 0;
2822#endif
2823}
2824
2825static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2826{
2827#ifdef CONFIG_RFS_ACCEL
2828 int i, rc = 0;
2829
2830 if (!(bp->flags & BNXT_FLAG_RFS))
2831 return 0;
2832
2833 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2834 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2835
2836 bp->ntp_fltr_count = 0;
2837 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2838 GFP_KERNEL);
2839
2840 if (!bp->ntp_fltr_bmap)
2841 rc = -ENOMEM;
2842
2843 return rc;
2844#else
2845 return 0;
2846#endif
2847}
2848
2849static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2850{
2851 bnxt_free_vnic_attributes(bp);
2852 bnxt_free_tx_rings(bp);
2853 bnxt_free_rx_rings(bp);
2854 bnxt_free_cp_rings(bp);
2855 bnxt_free_ntp_fltrs(bp, irq_re_init);
2856 if (irq_re_init) {
2857 bnxt_free_stats(bp);
2858 bnxt_free_ring_grps(bp);
2859 bnxt_free_vnics(bp);
b6ab4b01
MC
2860 kfree(bp->tx_ring);
2861 bp->tx_ring = NULL;
2862 kfree(bp->rx_ring);
2863 bp->rx_ring = NULL;
c0c050c5
MC
2864 kfree(bp->bnapi);
2865 bp->bnapi = NULL;
2866 } else {
2867 bnxt_clear_ring_indices(bp);
2868 }
2869}
2870
2871static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2872{
01657bcd 2873 int i, j, rc, size, arr_size;
c0c050c5
MC
2874 void *bnapi;
2875
2876 if (irq_re_init) {
2877 /* Allocate bnapi mem pointer array and mem block for
2878 * all queues
2879 */
2880 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2881 bp->cp_nr_rings);
2882 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2883 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2884 if (!bnapi)
2885 return -ENOMEM;
2886
2887 bp->bnapi = bnapi;
2888 bnapi += arr_size;
2889 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2890 bp->bnapi[i] = bnapi;
2891 bp->bnapi[i]->index = i;
2892 bp->bnapi[i]->bp = bp;
2893 }
2894
b6ab4b01
MC
2895 bp->rx_ring = kcalloc(bp->rx_nr_rings,
2896 sizeof(struct bnxt_rx_ring_info),
2897 GFP_KERNEL);
2898 if (!bp->rx_ring)
2899 return -ENOMEM;
2900
2901 for (i = 0; i < bp->rx_nr_rings; i++) {
2902 bp->rx_ring[i].bnapi = bp->bnapi[i];
2903 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
2904 }
2905
2906 bp->tx_ring = kcalloc(bp->tx_nr_rings,
2907 sizeof(struct bnxt_tx_ring_info),
2908 GFP_KERNEL);
2909 if (!bp->tx_ring)
2910 return -ENOMEM;
2911
01657bcd
MC
2912 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
2913 j = 0;
2914 else
2915 j = bp->rx_nr_rings;
2916
2917 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
2918 bp->tx_ring[i].bnapi = bp->bnapi[j];
2919 bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
b6ab4b01
MC
2920 }
2921
c0c050c5
MC
2922 rc = bnxt_alloc_stats(bp);
2923 if (rc)
2924 goto alloc_mem_err;
2925
2926 rc = bnxt_alloc_ntp_fltrs(bp);
2927 if (rc)
2928 goto alloc_mem_err;
2929
2930 rc = bnxt_alloc_vnics(bp);
2931 if (rc)
2932 goto alloc_mem_err;
2933 }
2934
2935 bnxt_init_ring_struct(bp);
2936
2937 rc = bnxt_alloc_rx_rings(bp);
2938 if (rc)
2939 goto alloc_mem_err;
2940
2941 rc = bnxt_alloc_tx_rings(bp);
2942 if (rc)
2943 goto alloc_mem_err;
2944
2945 rc = bnxt_alloc_cp_rings(bp);
2946 if (rc)
2947 goto alloc_mem_err;
2948
2949 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
2950 BNXT_VNIC_UCAST_FLAG;
2951 rc = bnxt_alloc_vnic_attributes(bp);
2952 if (rc)
2953 goto alloc_mem_err;
2954 return 0;
2955
2956alloc_mem_err:
2957 bnxt_free_mem(bp, true);
2958 return rc;
2959}
2960
9d8bc097
MC
2961static void bnxt_disable_int(struct bnxt *bp)
2962{
2963 int i;
2964
2965 if (!bp->bnapi)
2966 return;
2967
2968 for (i = 0; i < bp->cp_nr_rings; i++) {
2969 struct bnxt_napi *bnapi = bp->bnapi[i];
2970 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2971
2972 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
2973 }
2974}
2975
2976static void bnxt_disable_int_sync(struct bnxt *bp)
2977{
2978 int i;
2979
2980 atomic_inc(&bp->intr_sem);
2981
2982 bnxt_disable_int(bp);
2983 for (i = 0; i < bp->cp_nr_rings; i++)
2984 synchronize_irq(bp->irq_tbl[i].vector);
2985}
2986
2987static void bnxt_enable_int(struct bnxt *bp)
2988{
2989 int i;
2990
2991 atomic_set(&bp->intr_sem, 0);
2992 for (i = 0; i < bp->cp_nr_rings; i++) {
2993 struct bnxt_napi *bnapi = bp->bnapi[i];
2994 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2995
2996 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
2997 }
2998}
2999
c0c050c5
MC
3000void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3001 u16 cmpl_ring, u16 target_id)
3002{
a8643e16 3003 struct input *req = request;
c0c050c5 3004
a8643e16
MC
3005 req->req_type = cpu_to_le16(req_type);
3006 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3007 req->target_id = cpu_to_le16(target_id);
c0c050c5
MC
3008 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3009}
3010
fbfbc485
MC
3011static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3012 int timeout, bool silent)
c0c050c5 3013{
a11fa2be 3014 int i, intr_process, rc, tmo_count;
a8643e16 3015 struct input *req = msg;
c0c050c5
MC
3016 u32 *data = msg;
3017 __le32 *resp_len, *valid;
3018 u16 cp_ring_id, len = 0;
3019 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3020
a8643e16 3021 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
c0c050c5 3022 memset(resp, 0, PAGE_SIZE);
a8643e16 3023 cp_ring_id = le16_to_cpu(req->cmpl_ring);
c0c050c5
MC
3024 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3025
3026 /* Write request msg to hwrm channel */
3027 __iowrite32_copy(bp->bar0, data, msg_len / 4);
3028
e6ef2699 3029 for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
d79979a1
MC
3030 writel(0, bp->bar0 + i);
3031
c0c050c5
MC
3032 /* currently supports only one outstanding message */
3033 if (intr_process)
a8643e16 3034 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
c0c050c5
MC
3035
3036 /* Ring channel doorbell */
3037 writel(1, bp->bar0 + 0x100);
3038
ff4fe81d
MC
3039 if (!timeout)
3040 timeout = DFLT_HWRM_CMD_TIMEOUT;
3041
c0c050c5 3042 i = 0;
a11fa2be 3043 tmo_count = timeout * 40;
c0c050c5
MC
3044 if (intr_process) {
3045 /* Wait until hwrm response cmpl interrupt is processed */
3046 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
a11fa2be
MC
3047 i++ < tmo_count) {
3048 usleep_range(25, 40);
c0c050c5
MC
3049 }
3050
3051 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3052 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
a8643e16 3053 le16_to_cpu(req->req_type));
c0c050c5
MC
3054 return -1;
3055 }
3056 } else {
3057 /* Check if response len is updated */
3058 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
a11fa2be 3059 for (i = 0; i < tmo_count; i++) {
c0c050c5
MC
3060 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3061 HWRM_RESP_LEN_SFT;
3062 if (len)
3063 break;
a11fa2be 3064 usleep_range(25, 40);
c0c050c5
MC
3065 }
3066
a11fa2be 3067 if (i >= tmo_count) {
c0c050c5 3068 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
a8643e16 3069 timeout, le16_to_cpu(req->req_type),
8578d6c1 3070 le16_to_cpu(req->seq_id), len);
c0c050c5
MC
3071 return -1;
3072 }
3073
3074 /* Last word of resp contains valid bit */
3075 valid = bp->hwrm_cmd_resp_addr + len - 4;
a11fa2be 3076 for (i = 0; i < 5; i++) {
c0c050c5
MC
3077 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
3078 break;
a11fa2be 3079 udelay(1);
c0c050c5
MC
3080 }
3081
a11fa2be 3082 if (i >= 5) {
c0c050c5 3083 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
a8643e16
MC
3084 timeout, le16_to_cpu(req->req_type),
3085 le16_to_cpu(req->seq_id), len, *valid);
c0c050c5
MC
3086 return -1;
3087 }
3088 }
3089
3090 rc = le16_to_cpu(resp->error_code);
fbfbc485 3091 if (rc && !silent)
c0c050c5
MC
3092 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3093 le16_to_cpu(resp->req_type),
3094 le16_to_cpu(resp->seq_id), rc);
fbfbc485
MC
3095 return rc;
3096}
3097
3098int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3099{
3100 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
c0c050c5
MC
3101}
3102
3103int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3104{
3105 int rc;
3106
3107 mutex_lock(&bp->hwrm_cmd_lock);
3108 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3109 mutex_unlock(&bp->hwrm_cmd_lock);
3110 return rc;
3111}
3112
90e20921
MC
3113int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3114 int timeout)
3115{
3116 int rc;
3117
3118 mutex_lock(&bp->hwrm_cmd_lock);
3119 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3120 mutex_unlock(&bp->hwrm_cmd_lock);
3121 return rc;
3122}
3123
a1653b13
MC
3124int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3125 int bmap_size)
c0c050c5
MC
3126{
3127 struct hwrm_func_drv_rgtr_input req = {0};
25be8623
MC
3128 DECLARE_BITMAP(async_events_bmap, 256);
3129 u32 *events = (u32 *)async_events_bmap;
a1653b13 3130 int i;
c0c050c5
MC
3131
3132 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3133
3134 req.enables =
a1653b13 3135 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
c0c050c5 3136
25be8623
MC
3137 memset(async_events_bmap, 0, sizeof(async_events_bmap));
3138 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3139 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3140
a1653b13
MC
3141 if (bmap && bmap_size) {
3142 for (i = 0; i < bmap_size; i++) {
3143 if (test_bit(i, bmap))
3144 __set_bit(i, async_events_bmap);
3145 }
3146 }
3147
25be8623
MC
3148 for (i = 0; i < 8; i++)
3149 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3150
a1653b13
MC
3151 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3152}
3153
3154static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3155{
3156 struct hwrm_func_drv_rgtr_input req = {0};
3157
3158 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3159
3160 req.enables =
3161 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3162 FUNC_DRV_RGTR_REQ_ENABLES_VER);
3163
11f15ed3 3164 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
c0c050c5
MC
3165 req.ver_maj = DRV_VER_MAJ;
3166 req.ver_min = DRV_VER_MIN;
3167 req.ver_upd = DRV_VER_UPD;
3168
3169 if (BNXT_PF(bp)) {
de68f5de 3170 DECLARE_BITMAP(vf_req_snif_bmap, 256);
c0c050c5 3171 u32 *data = (u32 *)vf_req_snif_bmap;
a1653b13 3172 int i;
c0c050c5 3173
de68f5de 3174 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
c0c050c5
MC
3175 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
3176 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
3177
de68f5de
MC
3178 for (i = 0; i < 8; i++)
3179 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3180
c0c050c5
MC
3181 req.enables |=
3182 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3183 }
3184
3185 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3186}
3187
be58a0da
JH
3188static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3189{
3190 struct hwrm_func_drv_unrgtr_input req = {0};
3191
3192 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3193 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3194}
3195
c0c050c5
MC
3196static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3197{
3198 u32 rc = 0;
3199 struct hwrm_tunnel_dst_port_free_input req = {0};
3200
3201 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3202 req.tunnel_type = tunnel_type;
3203
3204 switch (tunnel_type) {
3205 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3206 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3207 break;
3208 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3209 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3210 break;
3211 default:
3212 break;
3213 }
3214
3215 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3216 if (rc)
3217 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3218 rc);
3219 return rc;
3220}
3221
3222static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3223 u8 tunnel_type)
3224{
3225 u32 rc = 0;
3226 struct hwrm_tunnel_dst_port_alloc_input req = {0};
3227 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3228
3229 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3230
3231 req.tunnel_type = tunnel_type;
3232 req.tunnel_dst_port_val = port;
3233
3234 mutex_lock(&bp->hwrm_cmd_lock);
3235 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3236 if (rc) {
3237 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3238 rc);
3239 goto err_out;
3240 }
3241
57aac71b
CJ
3242 switch (tunnel_type) {
3243 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
c0c050c5 3244 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
3245 break;
3246 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
c0c050c5 3247 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
3248 break;
3249 default:
3250 break;
3251 }
3252
c0c050c5
MC
3253err_out:
3254 mutex_unlock(&bp->hwrm_cmd_lock);
3255 return rc;
3256}
3257
3258static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3259{
3260 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3261 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3262
3263 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
c193554e 3264 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
c0c050c5
MC
3265
3266 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3267 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3268 req.mask = cpu_to_le32(vnic->rx_mask);
3269 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3270}
3271
3272#ifdef CONFIG_RFS_ACCEL
3273static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3274 struct bnxt_ntuple_filter *fltr)
3275{
3276 struct hwrm_cfa_ntuple_filter_free_input req = {0};
3277
3278 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3279 req.ntuple_filter_id = fltr->filter_id;
3280 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3281}
3282
3283#define BNXT_NTP_FLTR_FLAGS \
3284 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
3285 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
3286 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
3287 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
3288 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
3289 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
3290 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
3291 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
3292 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
3293 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
3294 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
3295 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
3296 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 3297 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5
MC
3298
3299static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3300 struct bnxt_ntuple_filter *fltr)
3301{
3302 int rc = 0;
3303 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3304 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3305 bp->hwrm_cmd_resp_addr;
3306 struct flow_keys *keys = &fltr->fkeys;
3307 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3308
3309 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
a54c4d74 3310 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5
MC
3311
3312 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3313
3314 req.ethertype = htons(ETH_P_IP);
3315 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
c193554e 3316 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
c0c050c5
MC
3317 req.ip_protocol = keys->basic.ip_proto;
3318
dda0e746
MC
3319 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
3320 int i;
3321
3322 req.ethertype = htons(ETH_P_IPV6);
3323 req.ip_addr_type =
3324 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
3325 *(struct in6_addr *)&req.src_ipaddr[0] =
3326 keys->addrs.v6addrs.src;
3327 *(struct in6_addr *)&req.dst_ipaddr[0] =
3328 keys->addrs.v6addrs.dst;
3329 for (i = 0; i < 4; i++) {
3330 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3331 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3332 }
3333 } else {
3334 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3335 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3336 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3337 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3338 }
c0c050c5
MC
3339
3340 req.src_port = keys->ports.src;
3341 req.src_port_mask = cpu_to_be16(0xffff);
3342 req.dst_port = keys->ports.dst;
3343 req.dst_port_mask = cpu_to_be16(0xffff);
3344
c193554e 3345 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
c0c050c5
MC
3346 mutex_lock(&bp->hwrm_cmd_lock);
3347 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3348 if (!rc)
3349 fltr->filter_id = resp->ntuple_filter_id;
3350 mutex_unlock(&bp->hwrm_cmd_lock);
3351 return rc;
3352}
3353#endif
3354
3355static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3356 u8 *mac_addr)
3357{
3358 u32 rc = 0;
3359 struct hwrm_cfa_l2_filter_alloc_input req = {0};
3360 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3361
3362 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
dc52c6c7
PS
3363 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3364 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3365 req.flags |=
3366 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
c193554e 3367 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
c0c050c5
MC
3368 req.enables =
3369 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 3370 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5
MC
3371 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3372 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3373 req.l2_addr_mask[0] = 0xff;
3374 req.l2_addr_mask[1] = 0xff;
3375 req.l2_addr_mask[2] = 0xff;
3376 req.l2_addr_mask[3] = 0xff;
3377 req.l2_addr_mask[4] = 0xff;
3378 req.l2_addr_mask[5] = 0xff;
3379
3380 mutex_lock(&bp->hwrm_cmd_lock);
3381 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3382 if (!rc)
3383 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3384 resp->l2_filter_id;
3385 mutex_unlock(&bp->hwrm_cmd_lock);
3386 return rc;
3387}
3388
3389static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3390{
3391 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3392 int rc = 0;
3393
3394 /* Any associated ntuple filters will also be cleared by firmware. */
3395 mutex_lock(&bp->hwrm_cmd_lock);
3396 for (i = 0; i < num_of_vnics; i++) {
3397 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3398
3399 for (j = 0; j < vnic->uc_filter_count; j++) {
3400 struct hwrm_cfa_l2_filter_free_input req = {0};
3401
3402 bnxt_hwrm_cmd_hdr_init(bp, &req,
3403 HWRM_CFA_L2_FILTER_FREE, -1, -1);
3404
3405 req.l2_filter_id = vnic->fw_l2_filter_id[j];
3406
3407 rc = _hwrm_send_message(bp, &req, sizeof(req),
3408 HWRM_CMD_TIMEOUT);
3409 }
3410 vnic->uc_filter_count = 0;
3411 }
3412 mutex_unlock(&bp->hwrm_cmd_lock);
3413
3414 return rc;
3415}
3416
3417static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3418{
3419 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3420 struct hwrm_vnic_tpa_cfg_input req = {0};
3421
3422 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3423
3424 if (tpa_flags) {
3425 u16 mss = bp->dev->mtu - 40;
3426 u32 nsegs, n, segs = 0, flags;
3427
3428 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3429 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3430 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3431 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3432 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3433 if (tpa_flags & BNXT_FLAG_GRO)
3434 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3435
3436 req.flags = cpu_to_le32(flags);
3437
3438 req.enables =
3439 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
3440 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3441 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
3442
3443 /* Number of segs are log2 units, and first packet is not
3444 * included as part of this units.
3445 */
2839f28b
MC
3446 if (mss <= BNXT_RX_PAGE_SIZE) {
3447 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
3448 nsegs = (MAX_SKB_FRAGS - 1) * n;
3449 } else {
2839f28b
MC
3450 n = mss / BNXT_RX_PAGE_SIZE;
3451 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
3452 n++;
3453 nsegs = (MAX_SKB_FRAGS - n) / n;
3454 }
3455
3456 segs = ilog2(nsegs);
3457 req.max_agg_segs = cpu_to_le16(segs);
3458 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
c193554e
MC
3459
3460 req.min_agg_len = cpu_to_le32(512);
c0c050c5
MC
3461 }
3462 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3463
3464 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3465}
3466
3467static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3468{
3469 u32 i, j, max_rings;
3470 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3471 struct hwrm_vnic_rss_cfg_input req = {0};
3472
94ce9caa 3473 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
3474 return 0;
3475
3476 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3477 if (set_rss) {
87da7f79 3478 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
dc52c6c7
PS
3479 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
3480 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3481 max_rings = bp->rx_nr_rings - 1;
3482 else
3483 max_rings = bp->rx_nr_rings;
3484 } else {
c0c050c5 3485 max_rings = 1;
dc52c6c7 3486 }
c0c050c5
MC
3487
3488 /* Fill the RSS indirection table with ring group ids */
3489 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3490 if (j == max_rings)
3491 j = 0;
3492 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3493 }
3494
3495 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3496 req.hash_key_tbl_addr =
3497 cpu_to_le64(vnic->rss_hash_key_dma_addr);
3498 }
94ce9caa 3499 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
c0c050c5
MC
3500 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3501}
3502
3503static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3504{
3505 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3506 struct hwrm_vnic_plcmodes_cfg_input req = {0};
3507
3508 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3509 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3510 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3511 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3512 req.enables =
3513 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3514 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3515 /* thresholds not implemented in firmware yet */
3516 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3517 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3518 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3519 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3520}
3521
94ce9caa
PS
3522static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
3523 u16 ctx_idx)
c0c050c5
MC
3524{
3525 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3526
3527 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3528 req.rss_cos_lb_ctx_id =
94ce9caa 3529 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5
MC
3530
3531 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
94ce9caa 3532 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
3533}
3534
3535static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3536{
94ce9caa 3537 int i, j;
c0c050c5
MC
3538
3539 for (i = 0; i < bp->nr_vnics; i++) {
3540 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3541
94ce9caa
PS
3542 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
3543 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
3544 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
3545 }
c0c050c5
MC
3546 }
3547 bp->rsscos_nr_ctxs = 0;
3548}
3549
94ce9caa 3550static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5
MC
3551{
3552 int rc;
3553 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3554 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3555 bp->hwrm_cmd_resp_addr;
3556
3557 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3558 -1);
3559
3560 mutex_lock(&bp->hwrm_cmd_lock);
3561 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3562 if (!rc)
94ce9caa 3563 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5
MC
3564 le16_to_cpu(resp->rss_cos_lb_ctx_id);
3565 mutex_unlock(&bp->hwrm_cmd_lock);
3566
3567 return rc;
3568}
3569
a588e458 3570int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 3571{
b81a90d3 3572 unsigned int ring = 0, grp_idx;
c0c050c5
MC
3573 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3574 struct hwrm_vnic_cfg_input req = {0};
cf6645f8 3575 u16 def_vlan = 0;
c0c050c5
MC
3576
3577 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
dc52c6c7
PS
3578
3579 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 3580 /* Only RSS support for now TBD: COS & LB */
dc52c6c7
PS
3581 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
3582 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3583 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3584 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74
MC
3585 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
3586 req.rss_rule =
3587 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
3588 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3589 VNIC_CFG_REQ_ENABLES_MRU);
3590 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7
PS
3591 } else {
3592 req.rss_rule = cpu_to_le16(0xffff);
3593 }
94ce9caa 3594
dc52c6c7
PS
3595 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
3596 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
94ce9caa
PS
3597 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
3598 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
3599 } else {
3600 req.cos_rule = cpu_to_le16(0xffff);
3601 }
3602
c0c050c5 3603 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 3604 ring = 0;
c0c050c5 3605 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 3606 ring = vnic_id - 1;
76595193
PS
3607 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
3608 ring = bp->rx_nr_rings - 1;
c0c050c5 3609
b81a90d3 3610 grp_idx = bp->rx_ring[ring].bnapi->index;
c0c050c5
MC
3611 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3612 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3613
3614 req.lb_rule = cpu_to_le16(0xffff);
3615 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3616 VLAN_HLEN);
3617
cf6645f8
MC
3618#ifdef CONFIG_BNXT_SRIOV
3619 if (BNXT_VF(bp))
3620 def_vlan = bp->vf.vlan;
3621#endif
3622 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
c0c050c5 3623 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458
MC
3624 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
3625 req.flags |=
3626 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
c0c050c5
MC
3627
3628 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3629}
3630
3631static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3632{
3633 u32 rc = 0;
3634
3635 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3636 struct hwrm_vnic_free_input req = {0};
3637
3638 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3639 req.vnic_id =
3640 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3641
3642 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3643 if (rc)
3644 return rc;
3645 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3646 }
3647 return rc;
3648}
3649
3650static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3651{
3652 u16 i;
3653
3654 for (i = 0; i < bp->nr_vnics; i++)
3655 bnxt_hwrm_vnic_free_one(bp, i);
3656}
3657
b81a90d3
MC
3658static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
3659 unsigned int start_rx_ring_idx,
3660 unsigned int nr_rings)
c0c050c5 3661{
b81a90d3
MC
3662 int rc = 0;
3663 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
c0c050c5
MC
3664 struct hwrm_vnic_alloc_input req = {0};
3665 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3666
3667 /* map ring groups to this vnic */
b81a90d3
MC
3668 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
3669 grp_idx = bp->rx_ring[i].bnapi->index;
3670 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 3671 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 3672 j, nr_rings);
c0c050c5
MC
3673 break;
3674 }
3675 bp->vnic_info[vnic_id].fw_grp_ids[j] =
b81a90d3 3676 bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
3677 }
3678
94ce9caa
PS
3679 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
3680 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
c0c050c5
MC
3681 if (vnic_id == 0)
3682 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3683
3684 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3685
3686 mutex_lock(&bp->hwrm_cmd_lock);
3687 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3688 if (!rc)
3689 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3690 mutex_unlock(&bp->hwrm_cmd_lock);
3691 return rc;
3692}
3693
8fdefd63
MC
3694static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
3695{
3696 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3697 struct hwrm_vnic_qcaps_input req = {0};
3698 int rc;
3699
3700 if (bp->hwrm_spec_code < 0x10600)
3701 return 0;
3702
3703 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
3704 mutex_lock(&bp->hwrm_cmd_lock);
3705 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3706 if (!rc) {
3707 if (resp->flags &
3708 cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
3709 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
3710 }
3711 mutex_unlock(&bp->hwrm_cmd_lock);
3712 return rc;
3713}
3714
c0c050c5
MC
3715static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3716{
3717 u16 i;
3718 u32 rc = 0;
3719
3720 mutex_lock(&bp->hwrm_cmd_lock);
3721 for (i = 0; i < bp->rx_nr_rings; i++) {
3722 struct hwrm_ring_grp_alloc_input req = {0};
3723 struct hwrm_ring_grp_alloc_output *resp =
3724 bp->hwrm_cmd_resp_addr;
b81a90d3 3725 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5
MC
3726
3727 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3728
b81a90d3
MC
3729 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
3730 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
3731 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
3732 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5
MC
3733
3734 rc = _hwrm_send_message(bp, &req, sizeof(req),
3735 HWRM_CMD_TIMEOUT);
3736 if (rc)
3737 break;
3738
b81a90d3
MC
3739 bp->grp_info[grp_idx].fw_grp_id =
3740 le32_to_cpu(resp->ring_group_id);
c0c050c5
MC
3741 }
3742 mutex_unlock(&bp->hwrm_cmd_lock);
3743 return rc;
3744}
3745
3746static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3747{
3748 u16 i;
3749 u32 rc = 0;
3750 struct hwrm_ring_grp_free_input req = {0};
3751
3752 if (!bp->grp_info)
3753 return 0;
3754
3755 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3756
3757 mutex_lock(&bp->hwrm_cmd_lock);
3758 for (i = 0; i < bp->cp_nr_rings; i++) {
3759 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3760 continue;
3761 req.ring_group_id =
3762 cpu_to_le32(bp->grp_info[i].fw_grp_id);
3763
3764 rc = _hwrm_send_message(bp, &req, sizeof(req),
3765 HWRM_CMD_TIMEOUT);
3766 if (rc)
3767 break;
3768 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3769 }
3770 mutex_unlock(&bp->hwrm_cmd_lock);
3771 return rc;
3772}
3773
3774static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3775 struct bnxt_ring_struct *ring,
3776 u32 ring_type, u32 map_index,
3777 u32 stats_ctx_id)
3778{
3779 int rc = 0, err = 0;
3780 struct hwrm_ring_alloc_input req = {0};
3781 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3782 u16 ring_id;
3783
3784 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3785
3786 req.enables = 0;
3787 if (ring->nr_pages > 1) {
3788 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3789 /* Page size is in log2 units */
3790 req.page_size = BNXT_PAGE_SHIFT;
3791 req.page_tbl_depth = 1;
3792 } else {
3793 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
3794 }
3795 req.fbo = 0;
3796 /* Association of ring index with doorbell index and MSIX number */
3797 req.logical_id = cpu_to_le16(map_index);
3798
3799 switch (ring_type) {
3800 case HWRM_RING_ALLOC_TX:
3801 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3802 /* Association of transmit ring with completion ring */
3803 req.cmpl_ring_id =
3804 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3805 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3806 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3807 req.queue_id = cpu_to_le16(ring->queue_id);
3808 break;
3809 case HWRM_RING_ALLOC_RX:
3810 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3811 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3812 break;
3813 case HWRM_RING_ALLOC_AGG:
3814 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3815 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3816 break;
3817 case HWRM_RING_ALLOC_CMPL:
3818 req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3819 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3820 if (bp->flags & BNXT_FLAG_USING_MSIX)
3821 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3822 break;
3823 default:
3824 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3825 ring_type);
3826 return -1;
3827 }
3828
3829 mutex_lock(&bp->hwrm_cmd_lock);
3830 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3831 err = le16_to_cpu(resp->error_code);
3832 ring_id = le16_to_cpu(resp->ring_id);
3833 mutex_unlock(&bp->hwrm_cmd_lock);
3834
3835 if (rc || err) {
3836 switch (ring_type) {
3837 case RING_FREE_REQ_RING_TYPE_CMPL:
3838 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3839 rc, err);
3840 return -1;
3841
3842 case RING_FREE_REQ_RING_TYPE_RX:
3843 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3844 rc, err);
3845 return -1;
3846
3847 case RING_FREE_REQ_RING_TYPE_TX:
3848 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3849 rc, err);
3850 return -1;
3851
3852 default:
3853 netdev_err(bp->dev, "Invalid ring\n");
3854 return -1;
3855 }
3856 }
3857 ring->fw_ring_id = ring_id;
3858 return rc;
3859}
3860
3861static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3862{
3863 int i, rc = 0;
3864
edd0c2cc
MC
3865 for (i = 0; i < bp->cp_nr_rings; i++) {
3866 struct bnxt_napi *bnapi = bp->bnapi[i];
3867 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3868 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
c0c050c5 3869
33e52d88 3870 cpr->cp_doorbell = bp->bar1 + i * 0x80;
edd0c2cc
MC
3871 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3872 INVALID_STATS_CTX_ID);
3873 if (rc)
3874 goto err_out;
edd0c2cc
MC
3875 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3876 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
3877 }
3878
edd0c2cc 3879 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3880 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 3881 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
b81a90d3
MC
3882 u32 map_idx = txr->bnapi->index;
3883 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
c0c050c5 3884
b81a90d3
MC
3885 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
3886 map_idx, fw_stats_ctx);
edd0c2cc
MC
3887 if (rc)
3888 goto err_out;
b81a90d3 3889 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
c0c050c5
MC
3890 }
3891
edd0c2cc 3892 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3893 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 3894 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 3895 u32 map_idx = rxr->bnapi->index;
c0c050c5 3896
b81a90d3
MC
3897 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
3898 map_idx, INVALID_STATS_CTX_ID);
edd0c2cc
MC
3899 if (rc)
3900 goto err_out;
b81a90d3 3901 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
edd0c2cc 3902 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
b81a90d3 3903 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
3904 }
3905
3906 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3907 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3908 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
3909 struct bnxt_ring_struct *ring =
3910 &rxr->rx_agg_ring_struct;
b81a90d3
MC
3911 u32 grp_idx = rxr->bnapi->index;
3912 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5
MC
3913
3914 rc = hwrm_ring_alloc_send_msg(bp, ring,
3915 HWRM_RING_ALLOC_AGG,
b81a90d3 3916 map_idx,
c0c050c5
MC
3917 INVALID_STATS_CTX_ID);
3918 if (rc)
3919 goto err_out;
3920
b81a90d3 3921 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
c0c050c5
MC
3922 writel(DB_KEY_RX | rxr->rx_agg_prod,
3923 rxr->rx_agg_doorbell);
b81a90d3 3924 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
3925 }
3926 }
3927err_out:
3928 return rc;
3929}
3930
3931static int hwrm_ring_free_send_msg(struct bnxt *bp,
3932 struct bnxt_ring_struct *ring,
3933 u32 ring_type, int cmpl_ring_id)
3934{
3935 int rc;
3936 struct hwrm_ring_free_input req = {0};
3937 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3938 u16 error_code;
3939
74608fc9 3940 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
c0c050c5
MC
3941 req.ring_type = ring_type;
3942 req.ring_id = cpu_to_le16(ring->fw_ring_id);
3943
3944 mutex_lock(&bp->hwrm_cmd_lock);
3945 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3946 error_code = le16_to_cpu(resp->error_code);
3947 mutex_unlock(&bp->hwrm_cmd_lock);
3948
3949 if (rc || error_code) {
3950 switch (ring_type) {
3951 case RING_FREE_REQ_RING_TYPE_CMPL:
3952 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3953 rc);
3954 return rc;
3955 case RING_FREE_REQ_RING_TYPE_RX:
3956 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3957 rc);
3958 return rc;
3959 case RING_FREE_REQ_RING_TYPE_TX:
3960 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3961 rc);
3962 return rc;
3963 default:
3964 netdev_err(bp->dev, "Invalid ring\n");
3965 return -1;
3966 }
3967 }
3968 return 0;
3969}
3970
edd0c2cc 3971static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 3972{
edd0c2cc 3973 int i;
c0c050c5
MC
3974
3975 if (!bp->bnapi)
edd0c2cc 3976 return;
c0c050c5 3977
edd0c2cc 3978 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3979 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 3980 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
b81a90d3
MC
3981 u32 grp_idx = txr->bnapi->index;
3982 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
edd0c2cc
MC
3983
3984 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3985 hwrm_ring_free_send_msg(bp, ring,
3986 RING_FREE_REQ_RING_TYPE_TX,
3987 close_path ? cmpl_ring_id :
3988 INVALID_HW_RING_ID);
3989 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
3990 }
3991 }
3992
edd0c2cc 3993 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3994 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 3995 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3
MC
3996 u32 grp_idx = rxr->bnapi->index;
3997 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
edd0c2cc
MC
3998
3999 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4000 hwrm_ring_free_send_msg(bp, ring,
4001 RING_FREE_REQ_RING_TYPE_RX,
4002 close_path ? cmpl_ring_id :
4003 INVALID_HW_RING_ID);
4004 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
4005 bp->grp_info[grp_idx].rx_fw_ring_id =
4006 INVALID_HW_RING_ID;
c0c050c5
MC
4007 }
4008 }
4009
edd0c2cc 4010 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 4011 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 4012 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3
MC
4013 u32 grp_idx = rxr->bnapi->index;
4014 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
edd0c2cc
MC
4015
4016 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4017 hwrm_ring_free_send_msg(bp, ring,
4018 RING_FREE_REQ_RING_TYPE_RX,
4019 close_path ? cmpl_ring_id :
4020 INVALID_HW_RING_ID);
4021 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
4022 bp->grp_info[grp_idx].agg_fw_ring_id =
4023 INVALID_HW_RING_ID;
c0c050c5
MC
4024 }
4025 }
4026
9d8bc097
MC
4027 /* The completion rings are about to be freed. After that the
4028 * IRQ doorbell will not work anymore. So we need to disable
4029 * IRQ here.
4030 */
4031 bnxt_disable_int_sync(bp);
4032
edd0c2cc
MC
4033 for (i = 0; i < bp->cp_nr_rings; i++) {
4034 struct bnxt_napi *bnapi = bp->bnapi[i];
4035 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4036 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4037
4038 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4039 hwrm_ring_free_send_msg(bp, ring,
4040 RING_FREE_REQ_RING_TYPE_CMPL,
4041 INVALID_HW_RING_ID);
4042 ring->fw_ring_id = INVALID_HW_RING_ID;
4043 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
4044 }
4045 }
c0c050c5
MC
4046}
4047
bb053f52
MC
4048static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
4049 u32 buf_tmrs, u16 flags,
4050 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4051{
4052 req->flags = cpu_to_le16(flags);
4053 req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
4054 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
4055 req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
4056 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
4057 /* Minimum time between 2 interrupts set to buf_tmr x 2 */
4058 req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
4059 req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
4060 req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
4061}
4062
c0c050c5
MC
4063int bnxt_hwrm_set_coal(struct bnxt *bp)
4064{
4065 int i, rc = 0;
dfc9c94a
MC
4066 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
4067 req_tx = {0}, *req;
c0c050c5
MC
4068 u16 max_buf, max_buf_irq;
4069 u16 buf_tmr, buf_tmr_irq;
4070 u32 flags;
4071
dfc9c94a
MC
4072 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
4073 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4074 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
4075 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
c0c050c5 4076
dfb5b894
MC
4077 /* Each rx completion (2 records) should be DMAed immediately.
4078 * DMA 1/4 of the completion buffers at a time.
4079 */
4080 max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
c0c050c5
MC
4081 /* max_buf must not be zero */
4082 max_buf = clamp_t(u16, max_buf, 1, 63);
dfb5b894
MC
4083 max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
4084 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
4085 /* buf timer set to 1/4 of interrupt timer */
4086 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4087 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
4088 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
c0c050c5
MC
4089
4090 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4091
4092 /* RING_IDLE generates more IRQs for lower latency. Enable it only
4093 * if coal_ticks is less than 25 us.
4094 */
dfb5b894 4095 if (bp->rx_coal_ticks < 25)
c0c050c5
MC
4096 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4097
bb053f52 4098 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
dfc9c94a
MC
4099 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
4100
4101 /* max_buf must not be zero */
4102 max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
4103 max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
4104 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
4105 /* buf timer set to 1/4 of interrupt timer */
4106 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4107 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
4108 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4109
4110 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4111 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4112 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
c0c050c5
MC
4113
4114 mutex_lock(&bp->hwrm_cmd_lock);
4115 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 4116 struct bnxt_napi *bnapi = bp->bnapi[i];
c0c050c5 4117
dfc9c94a
MC
4118 req = &req_rx;
4119 if (!bnapi->rx_ring)
4120 req = &req_tx;
4121 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
4122
4123 rc = _hwrm_send_message(bp, req, sizeof(*req),
c0c050c5
MC
4124 HWRM_CMD_TIMEOUT);
4125 if (rc)
4126 break;
4127 }
4128 mutex_unlock(&bp->hwrm_cmd_lock);
4129 return rc;
4130}
4131
4132static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
4133{
4134 int rc = 0, i;
4135 struct hwrm_stat_ctx_free_input req = {0};
4136
4137 if (!bp->bnapi)
4138 return 0;
4139
3e8060fa
PS
4140 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4141 return 0;
4142
c0c050c5
MC
4143 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
4144
4145 mutex_lock(&bp->hwrm_cmd_lock);
4146 for (i = 0; i < bp->cp_nr_rings; i++) {
4147 struct bnxt_napi *bnapi = bp->bnapi[i];
4148 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4149
4150 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
4151 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
4152
4153 rc = _hwrm_send_message(bp, &req, sizeof(req),
4154 HWRM_CMD_TIMEOUT);
4155 if (rc)
4156 break;
4157
4158 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4159 }
4160 }
4161 mutex_unlock(&bp->hwrm_cmd_lock);
4162 return rc;
4163}
4164
4165static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
4166{
4167 int rc = 0, i;
4168 struct hwrm_stat_ctx_alloc_input req = {0};
4169 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4170
3e8060fa
PS
4171 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4172 return 0;
4173
c0c050c5
MC
4174 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
4175
51f30785 4176 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5
MC
4177
4178 mutex_lock(&bp->hwrm_cmd_lock);
4179 for (i = 0; i < bp->cp_nr_rings; i++) {
4180 struct bnxt_napi *bnapi = bp->bnapi[i];
4181 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4182
4183 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
4184
4185 rc = _hwrm_send_message(bp, &req, sizeof(req),
4186 HWRM_CMD_TIMEOUT);
4187 if (rc)
4188 break;
4189
4190 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
4191
4192 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
4193 }
4194 mutex_unlock(&bp->hwrm_cmd_lock);
89aa8445 4195 return rc;
c0c050c5
MC
4196}
4197
cf6645f8
MC
4198static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4199{
4200 struct hwrm_func_qcfg_input req = {0};
567b2abe 4201 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
cf6645f8
MC
4202 int rc;
4203
4204 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4205 req.fid = cpu_to_le16(0xffff);
4206 mutex_lock(&bp->hwrm_cmd_lock);
4207 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4208 if (rc)
4209 goto func_qcfg_exit;
4210
4211#ifdef CONFIG_BNXT_SRIOV
4212 if (BNXT_VF(bp)) {
cf6645f8
MC
4213 struct bnxt_vf_info *vf = &bp->vf;
4214
4215 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4216 }
4217#endif
567b2abe
SB
4218 switch (resp->port_partition_type) {
4219 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4220 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
4221 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4222 bp->port_partition_type = resp->port_partition_type;
4223 break;
4224 }
cf6645f8
MC
4225
4226func_qcfg_exit:
4227 mutex_unlock(&bp->hwrm_cmd_lock);
4228 return rc;
4229}
4230
7b08f661 4231static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5
MC
4232{
4233 int rc = 0;
4234 struct hwrm_func_qcaps_input req = {0};
4235 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4236
4237 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
4238 req.fid = cpu_to_le16(0xffff);
4239
4240 mutex_lock(&bp->hwrm_cmd_lock);
4241 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4242 if (rc)
4243 goto hwrm_func_qcaps_exit;
4244
e4060d30
MC
4245 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
4246 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
4247 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
4248 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
4249
7cc5a20e
MC
4250 bp->tx_push_thresh = 0;
4251 if (resp->flags &
4252 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
4253 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
4254
c0c050c5
MC
4255 if (BNXT_PF(bp)) {
4256 struct bnxt_pf_info *pf = &bp->pf;
4257
4258 pf->fw_fid = le16_to_cpu(resp->fid);
4259 pf->port_id = le16_to_cpu(resp->port_id);
87027db1 4260 bp->dev->dev_port = pf->port_id;
11f15ed3 4261 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
bdd4347b 4262 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
c0c050c5
MC
4263 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4264 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4265 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
c0c050c5 4266 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
b72d4a68
MC
4267 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4268 if (!pf->max_hw_ring_grps)
4269 pf->max_hw_ring_grps = pf->max_tx_rings;
c0c050c5
MC
4270 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4271 pf->max_vnics = le16_to_cpu(resp->max_vnics);
4272 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4273 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
4274 pf->max_vfs = le16_to_cpu(resp->max_vfs);
4275 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
4276 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
4277 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
4278 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
4279 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
4280 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
4281 } else {
379a80a1 4282#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
4283 struct bnxt_vf_info *vf = &bp->vf;
4284
4285 vf->fw_fid = le16_to_cpu(resp->fid);
c0c050c5
MC
4286
4287 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4288 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4289 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4290 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
b72d4a68
MC
4291 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4292 if (!vf->max_hw_ring_grps)
4293 vf->max_hw_ring_grps = vf->max_tx_rings;
c0c050c5
MC
4294 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4295 vf->max_vnics = le16_to_cpu(resp->max_vnics);
4296 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7cc5a20e
MC
4297
4298 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
001154eb
MC
4299 mutex_unlock(&bp->hwrm_cmd_lock);
4300
4301 if (is_valid_ether_addr(vf->mac_addr)) {
7cc5a20e
MC
4302 /* overwrite netdev dev_adr with admin VF MAC */
4303 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
001154eb 4304 } else {
7cc5a20e 4305 random_ether_addr(bp->dev->dev_addr);
001154eb
MC
4306 rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
4307 }
4308 return rc;
379a80a1 4309#endif
c0c050c5
MC
4310 }
4311
c0c050c5
MC
4312hwrm_func_qcaps_exit:
4313 mutex_unlock(&bp->hwrm_cmd_lock);
4314 return rc;
4315}
4316
4317static int bnxt_hwrm_func_reset(struct bnxt *bp)
4318{
4319 struct hwrm_func_reset_input req = {0};
4320
4321 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
4322 req.enables = 0;
4323
4324 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
4325}
4326
4327static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
4328{
4329 int rc = 0;
4330 struct hwrm_queue_qportcfg_input req = {0};
4331 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
4332 u8 i, *qptr;
4333
4334 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
4335
4336 mutex_lock(&bp->hwrm_cmd_lock);
4337 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4338 if (rc)
4339 goto qportcfg_exit;
4340
4341 if (!resp->max_configurable_queues) {
4342 rc = -EINVAL;
4343 goto qportcfg_exit;
4344 }
4345 bp->max_tc = resp->max_configurable_queues;
87c374de 4346 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
4347 if (bp->max_tc > BNXT_MAX_QUEUE)
4348 bp->max_tc = BNXT_MAX_QUEUE;
4349
441cabbb
MC
4350 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
4351 bp->max_tc = 1;
4352
87c374de
MC
4353 if (bp->max_lltc > bp->max_tc)
4354 bp->max_lltc = bp->max_tc;
4355
c0c050c5
MC
4356 qptr = &resp->queue_id0;
4357 for (i = 0; i < bp->max_tc; i++) {
4358 bp->q_info[i].queue_id = *qptr++;
4359 bp->q_info[i].queue_profile = *qptr++;
4360 }
4361
4362qportcfg_exit:
4363 mutex_unlock(&bp->hwrm_cmd_lock);
4364 return rc;
4365}
4366
4367static int bnxt_hwrm_ver_get(struct bnxt *bp)
4368{
4369 int rc;
4370 struct hwrm_ver_get_input req = {0};
4371 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
4372
e6ef2699 4373 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
c0c050c5
MC
4374 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
4375 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
4376 req.hwrm_intf_min = HWRM_VERSION_MINOR;
4377 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
4378 mutex_lock(&bp->hwrm_cmd_lock);
4379 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4380 if (rc)
4381 goto hwrm_ver_get_exit;
4382
4383 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
4384
11f15ed3
MC
4385 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
4386 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
c193554e
MC
4387 if (resp->hwrm_intf_maj < 1) {
4388 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
c0c050c5 4389 resp->hwrm_intf_maj, resp->hwrm_intf_min,
c193554e
MC
4390 resp->hwrm_intf_upd);
4391 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 4392 }
3ebf6f0a 4393 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
c0c050c5
MC
4394 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
4395 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
4396
ff4fe81d
MC
4397 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
4398 if (!bp->hwrm_cmd_timeout)
4399 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
4400
e6ef2699
MC
4401 if (resp->hwrm_intf_maj >= 1)
4402 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4403
659c805c 4404 bp->chip_num = le16_to_cpu(resp->chip_num);
3e8060fa
PS
4405 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
4406 !resp->chip_metal)
4407 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 4408
c0c050c5
MC
4409hwrm_ver_get_exit:
4410 mutex_unlock(&bp->hwrm_cmd_lock);
4411 return rc;
4412}
4413
5ac67d8b
RS
4414int bnxt_hwrm_fw_set_time(struct bnxt *bp)
4415{
878786d9 4416#if IS_ENABLED(CONFIG_RTC_LIB)
5ac67d8b
RS
4417 struct hwrm_fw_set_time_input req = {0};
4418 struct rtc_time tm;
4419 struct timeval tv;
4420
4421 if (bp->hwrm_spec_code < 0x10400)
4422 return -EOPNOTSUPP;
4423
4424 do_gettimeofday(&tv);
4425 rtc_time_to_tm(tv.tv_sec, &tm);
4426 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
4427 req.year = cpu_to_le16(1900 + tm.tm_year);
4428 req.month = 1 + tm.tm_mon;
4429 req.day = tm.tm_mday;
4430 req.hour = tm.tm_hour;
4431 req.minute = tm.tm_min;
4432 req.second = tm.tm_sec;
4433 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
878786d9
RS
4434#else
4435 return -EOPNOTSUPP;
4436#endif
5ac67d8b
RS
4437}
4438
3bdf56c4
MC
4439static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4440{
4441 int rc;
4442 struct bnxt_pf_info *pf = &bp->pf;
4443 struct hwrm_port_qstats_input req = {0};
4444
4445 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4446 return 0;
4447
4448 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4449 req.port_id = cpu_to_le16(pf->port_id);
4450 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4451 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4452 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4453 return rc;
4454}
4455
c0c050c5
MC
4456static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4457{
4458 if (bp->vxlan_port_cnt) {
4459 bnxt_hwrm_tunnel_dst_port_free(
4460 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4461 }
4462 bp->vxlan_port_cnt = 0;
4463 if (bp->nge_port_cnt) {
4464 bnxt_hwrm_tunnel_dst_port_free(
4465 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4466 }
4467 bp->nge_port_cnt = 0;
4468}
4469
4470static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4471{
4472 int rc, i;
4473 u32 tpa_flags = 0;
4474
4475 if (set_tpa)
4476 tpa_flags = bp->flags & BNXT_FLAG_TPA;
4477 for (i = 0; i < bp->nr_vnics; i++) {
4478 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4479 if (rc) {
4480 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4481 rc, i);
4482 return rc;
4483 }
4484 }
4485 return 0;
4486}
4487
4488static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4489{
4490 int i;
4491
4492 for (i = 0; i < bp->nr_vnics; i++)
4493 bnxt_hwrm_vnic_set_rss(bp, i, false);
4494}
4495
4496static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
4497 bool irq_re_init)
4498{
4499 if (bp->vnic_info) {
4500 bnxt_hwrm_clear_vnic_filter(bp);
4501 /* clear all RSS setting before free vnic ctx */
4502 bnxt_hwrm_clear_vnic_rss(bp);
4503 bnxt_hwrm_vnic_ctx_free(bp);
4504 /* before free the vnic, undo the vnic tpa settings */
4505 if (bp->flags & BNXT_FLAG_TPA)
4506 bnxt_set_tpa(bp, false);
4507 bnxt_hwrm_vnic_free(bp);
4508 }
4509 bnxt_hwrm_ring_free(bp, close_path);
4510 bnxt_hwrm_ring_grp_free(bp);
4511 if (irq_re_init) {
4512 bnxt_hwrm_stat_ctx_free(bp);
4513 bnxt_hwrm_free_tunnel_ports(bp);
4514 }
4515}
4516
4517static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
4518{
ae10ae74 4519 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
4520 int rc;
4521
ae10ae74
MC
4522 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
4523 goto skip_rss_ctx;
4524
c0c050c5 4525 /* allocate context for vnic */
94ce9caa 4526 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
4527 if (rc) {
4528 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4529 vnic_id, rc);
4530 goto vnic_setup_err;
4531 }
4532 bp->rsscos_nr_ctxs++;
4533
94ce9caa
PS
4534 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4535 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
4536 if (rc) {
4537 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
4538 vnic_id, rc);
4539 goto vnic_setup_err;
4540 }
4541 bp->rsscos_nr_ctxs++;
4542 }
4543
ae10ae74 4544skip_rss_ctx:
c0c050c5
MC
4545 /* configure default vnic, ring grp */
4546 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
4547 if (rc) {
4548 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
4549 vnic_id, rc);
4550 goto vnic_setup_err;
4551 }
4552
4553 /* Enable RSS hashing on vnic */
4554 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
4555 if (rc) {
4556 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
4557 vnic_id, rc);
4558 goto vnic_setup_err;
4559 }
4560
4561 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4562 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
4563 if (rc) {
4564 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
4565 vnic_id, rc);
4566 }
4567 }
4568
4569vnic_setup_err:
4570 return rc;
4571}
4572
4573static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4574{
4575#ifdef CONFIG_RFS_ACCEL
4576 int i, rc = 0;
4577
4578 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 4579 struct bnxt_vnic_info *vnic;
c0c050c5
MC
4580 u16 vnic_id = i + 1;
4581 u16 ring_id = i;
4582
4583 if (vnic_id >= bp->nr_vnics)
4584 break;
4585
ae10ae74
MC
4586 vnic = &bp->vnic_info[vnic_id];
4587 vnic->flags |= BNXT_VNIC_RFS_FLAG;
4588 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
4589 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 4590 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
4591 if (rc) {
4592 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4593 vnic_id, rc);
4594 break;
4595 }
4596 rc = bnxt_setup_vnic(bp, vnic_id);
4597 if (rc)
4598 break;
4599 }
4600 return rc;
4601#else
4602 return 0;
4603#endif
4604}
4605
17c71ac3
MC
4606/* Allow PF and VF with default VLAN to be in promiscuous mode */
4607static bool bnxt_promisc_ok(struct bnxt *bp)
4608{
4609#ifdef CONFIG_BNXT_SRIOV
4610 if (BNXT_VF(bp) && !bp->vf.vlan)
4611 return false;
4612#endif
4613 return true;
4614}
4615
dc52c6c7
PS
4616static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
4617{
4618 unsigned int rc = 0;
4619
4620 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
4621 if (rc) {
4622 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4623 rc);
4624 return rc;
4625 }
4626
4627 rc = bnxt_hwrm_vnic_cfg(bp, 1);
4628 if (rc) {
4629 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4630 rc);
4631 return rc;
4632 }
4633 return rc;
4634}
4635
b664f008 4636static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 4637static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 4638
c0c050c5
MC
4639static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4640{
7d2837dd 4641 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 4642 int rc = 0;
76595193 4643 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
4644
4645 if (irq_re_init) {
4646 rc = bnxt_hwrm_stat_ctx_alloc(bp);
4647 if (rc) {
4648 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
4649 rc);
4650 goto err_out;
4651 }
4652 }
4653
4654 rc = bnxt_hwrm_ring_alloc(bp);
4655 if (rc) {
4656 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
4657 goto err_out;
4658 }
4659
4660 rc = bnxt_hwrm_ring_grp_alloc(bp);
4661 if (rc) {
4662 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
4663 goto err_out;
4664 }
4665
76595193
PS
4666 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4667 rx_nr_rings--;
4668
c0c050c5 4669 /* default vnic 0 */
76595193 4670 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
4671 if (rc) {
4672 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
4673 goto err_out;
4674 }
4675
4676 rc = bnxt_setup_vnic(bp, 0);
4677 if (rc)
4678 goto err_out;
4679
4680 if (bp->flags & BNXT_FLAG_RFS) {
4681 rc = bnxt_alloc_rfs_vnics(bp);
4682 if (rc)
4683 goto err_out;
4684 }
4685
4686 if (bp->flags & BNXT_FLAG_TPA) {
4687 rc = bnxt_set_tpa(bp, true);
4688 if (rc)
4689 goto err_out;
4690 }
4691
4692 if (BNXT_VF(bp))
4693 bnxt_update_vf_mac(bp);
4694
4695 /* Filter for default vnic 0 */
4696 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
4697 if (rc) {
4698 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
4699 goto err_out;
4700 }
7d2837dd 4701 vnic->uc_filter_count = 1;
c0c050c5 4702
7d2837dd 4703 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 4704
17c71ac3 4705 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7d2837dd
MC
4706 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4707
4708 if (bp->dev->flags & IFF_ALLMULTI) {
4709 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4710 vnic->mc_list_count = 0;
4711 } else {
4712 u32 mask = 0;
4713
4714 bnxt_mc_list_updated(bp, &mask);
4715 vnic->rx_mask |= mask;
4716 }
c0c050c5 4717
b664f008
MC
4718 rc = bnxt_cfg_rx_mode(bp);
4719 if (rc)
c0c050c5 4720 goto err_out;
c0c050c5
MC
4721
4722 rc = bnxt_hwrm_set_coal(bp);
4723 if (rc)
4724 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
4725 rc);
4726
4727 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4728 rc = bnxt_setup_nitroa0_vnic(bp);
4729 if (rc)
4730 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
4731 rc);
4732 }
c0c050c5 4733
cf6645f8
MC
4734 if (BNXT_VF(bp)) {
4735 bnxt_hwrm_func_qcfg(bp);
4736 netdev_update_features(bp->dev);
4737 }
4738
c0c050c5
MC
4739 return 0;
4740
4741err_out:
4742 bnxt_hwrm_resource_free(bp, 0, true);
4743
4744 return rc;
4745}
4746
4747static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
4748{
4749 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
4750 return 0;
4751}
4752
4753static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
4754{
4755 bnxt_init_rx_rings(bp);
4756 bnxt_init_tx_rings(bp);
4757 bnxt_init_ring_grps(bp, irq_re_init);
4758 bnxt_init_vnics(bp);
4759
4760 return bnxt_init_chip(bp, irq_re_init);
4761}
4762
c0c050c5
MC
4763static int bnxt_set_real_num_queues(struct bnxt *bp)
4764{
4765 int rc;
4766 struct net_device *dev = bp->dev;
4767
4768 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4769 if (rc)
4770 return rc;
4771
4772 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4773 if (rc)
4774 return rc;
4775
4776#ifdef CONFIG_RFS_ACCEL
45019a18 4777 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 4778 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
4779#endif
4780
4781 return rc;
4782}
4783
6e6c5a57
MC
4784static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
4785 bool shared)
4786{
4787 int _rx = *rx, _tx = *tx;
4788
4789 if (shared) {
4790 *rx = min_t(int, _rx, max);
4791 *tx = min_t(int, _tx, max);
4792 } else {
4793 if (max < 2)
4794 return -ENOMEM;
4795
4796 while (_rx + _tx > max) {
4797 if (_rx > _tx && _rx > 1)
4798 _rx--;
4799 else if (_tx > 1)
4800 _tx--;
4801 }
4802 *rx = _rx;
4803 *tx = _tx;
4804 }
4805 return 0;
4806}
4807
7809592d
MC
4808static void bnxt_setup_msix(struct bnxt *bp)
4809{
4810 const int len = sizeof(bp->irq_tbl[0].name);
4811 struct net_device *dev = bp->dev;
4812 int tcs, i;
4813
4814 tcs = netdev_get_num_tc(dev);
4815 if (tcs > 1) {
4816 bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4817 if (bp->tx_nr_rings_per_tc == 0) {
4818 netdev_reset_tc(dev);
4819 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4820 } else {
4821 int i, off, count;
4822
4823 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4824 for (i = 0; i < tcs; i++) {
4825 count = bp->tx_nr_rings_per_tc;
4826 off = i * count;
4827 netdev_set_tc_queue(dev, i, count, off);
4828 }
4829 }
4830 }
4831
4832 for (i = 0; i < bp->cp_nr_rings; i++) {
4833 char *attr;
4834
4835 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4836 attr = "TxRx";
4837 else if (i < bp->rx_nr_rings)
4838 attr = "rx";
4839 else
4840 attr = "tx";
4841
4842 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
4843 i);
4844 bp->irq_tbl[i].handler = bnxt_msix;
4845 }
4846}
4847
4848static void bnxt_setup_inta(struct bnxt *bp)
4849{
4850 const int len = sizeof(bp->irq_tbl[0].name);
4851
4852 if (netdev_get_num_tc(bp->dev))
4853 netdev_reset_tc(bp->dev);
4854
4855 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
4856 0);
4857 bp->irq_tbl[0].handler = bnxt_inta;
4858}
4859
4860static int bnxt_setup_int_mode(struct bnxt *bp)
4861{
4862 int rc;
4863
4864 if (bp->flags & BNXT_FLAG_USING_MSIX)
4865 bnxt_setup_msix(bp);
4866 else
4867 bnxt_setup_inta(bp);
4868
4869 rc = bnxt_set_real_num_queues(bp);
4870 return rc;
4871}
4872
8079e8f1
MC
4873static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
4874{
4875#if defined(CONFIG_BNXT_SRIOV)
4876 if (BNXT_VF(bp))
4877 return bp->vf.max_rsscos_ctxs;
4878#endif
4879 return bp->pf.max_rsscos_ctxs;
4880}
4881
4882static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
4883{
4884#if defined(CONFIG_BNXT_SRIOV)
4885 if (BNXT_VF(bp))
4886 return bp->vf.max_vnics;
4887#endif
4888 return bp->pf.max_vnics;
4889}
4890
e4060d30
MC
4891unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
4892{
4893#if defined(CONFIG_BNXT_SRIOV)
4894 if (BNXT_VF(bp))
4895 return bp->vf.max_stat_ctxs;
4896#endif
4897 return bp->pf.max_stat_ctxs;
4898}
4899
a588e458
MC
4900void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
4901{
4902#if defined(CONFIG_BNXT_SRIOV)
4903 if (BNXT_VF(bp))
4904 bp->vf.max_stat_ctxs = max;
4905 else
4906#endif
4907 bp->pf.max_stat_ctxs = max;
4908}
4909
e4060d30
MC
4910unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
4911{
4912#if defined(CONFIG_BNXT_SRIOV)
4913 if (BNXT_VF(bp))
4914 return bp->vf.max_cp_rings;
4915#endif
4916 return bp->pf.max_cp_rings;
4917}
4918
a588e458
MC
4919void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
4920{
4921#if defined(CONFIG_BNXT_SRIOV)
4922 if (BNXT_VF(bp))
4923 bp->vf.max_cp_rings = max;
4924 else
4925#endif
4926 bp->pf.max_cp_rings = max;
4927}
4928
7809592d
MC
4929static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
4930{
4931#if defined(CONFIG_BNXT_SRIOV)
4932 if (BNXT_VF(bp))
4933 return bp->vf.max_irqs;
4934#endif
4935 return bp->pf.max_irqs;
4936}
4937
33c2657e
MC
4938void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
4939{
4940#if defined(CONFIG_BNXT_SRIOV)
4941 if (BNXT_VF(bp))
4942 bp->vf.max_irqs = max_irqs;
4943 else
4944#endif
4945 bp->pf.max_irqs = max_irqs;
4946}
4947
7809592d 4948static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 4949{
01657bcd 4950 int i, total_vecs, rc = 0, min = 1;
7809592d 4951 struct msix_entry *msix_ent;
c0c050c5 4952
7809592d 4953 total_vecs = bnxt_get_max_func_irqs(bp);
c0c050c5
MC
4954 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4955 if (!msix_ent)
4956 return -ENOMEM;
4957
4958 for (i = 0; i < total_vecs; i++) {
4959 msix_ent[i].entry = i;
4960 msix_ent[i].vector = 0;
4961 }
4962
01657bcd
MC
4963 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
4964 min = 2;
4965
4966 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
c0c050c5
MC
4967 if (total_vecs < 0) {
4968 rc = -ENODEV;
4969 goto msix_setup_exit;
4970 }
4971
4972 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
4973 if (bp->irq_tbl) {
7809592d
MC
4974 for (i = 0; i < total_vecs; i++)
4975 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 4976
7809592d 4977 bp->total_irqs = total_vecs;
c0c050c5 4978 /* Trim rings based upon num of vectors allocated */
6e6c5a57 4979 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
01657bcd 4980 total_vecs, min == 1);
6e6c5a57
MC
4981 if (rc)
4982 goto msix_setup_exit;
4983
c0c050c5 4984 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7809592d
MC
4985 bp->cp_nr_rings = (min == 1) ?
4986 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
4987 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 4988
c0c050c5
MC
4989 } else {
4990 rc = -ENOMEM;
4991 goto msix_setup_exit;
4992 }
4993 bp->flags |= BNXT_FLAG_USING_MSIX;
4994 kfree(msix_ent);
4995 return 0;
4996
4997msix_setup_exit:
7809592d
MC
4998 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
4999 kfree(bp->irq_tbl);
5000 bp->irq_tbl = NULL;
c0c050c5
MC
5001 pci_disable_msix(bp->pdev);
5002 kfree(msix_ent);
5003 return rc;
5004}
5005
7809592d 5006static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 5007{
c0c050c5 5008 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
5009 if (!bp->irq_tbl)
5010 return -ENOMEM;
5011
5012 bp->total_irqs = 1;
c0c050c5
MC
5013 bp->rx_nr_rings = 1;
5014 bp->tx_nr_rings = 1;
5015 bp->cp_nr_rings = 1;
5016 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
01657bcd 5017 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 5018 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 5019 return 0;
c0c050c5
MC
5020}
5021
7809592d 5022static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5
MC
5023{
5024 int rc = 0;
5025
5026 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 5027 rc = bnxt_init_msix(bp);
c0c050c5 5028
1fa72e29 5029 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 5030 /* fallback to INTA */
7809592d 5031 rc = bnxt_init_inta(bp);
c0c050c5
MC
5032 }
5033 return rc;
5034}
5035
7809592d
MC
5036static void bnxt_clear_int_mode(struct bnxt *bp)
5037{
5038 if (bp->flags & BNXT_FLAG_USING_MSIX)
5039 pci_disable_msix(bp->pdev);
5040
5041 kfree(bp->irq_tbl);
5042 bp->irq_tbl = NULL;
5043 bp->flags &= ~BNXT_FLAG_USING_MSIX;
5044}
5045
c0c050c5
MC
5046static void bnxt_free_irq(struct bnxt *bp)
5047{
5048 struct bnxt_irq *irq;
5049 int i;
5050
5051#ifdef CONFIG_RFS_ACCEL
5052 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
5053 bp->dev->rx_cpu_rmap = NULL;
5054#endif
5055 if (!bp->irq_tbl)
5056 return;
5057
5058 for (i = 0; i < bp->cp_nr_rings; i++) {
5059 irq = &bp->irq_tbl[i];
5060 if (irq->requested)
5061 free_irq(irq->vector, bp->bnapi[i]);
5062 irq->requested = 0;
5063 }
c0c050c5
MC
5064}
5065
5066static int bnxt_request_irq(struct bnxt *bp)
5067{
b81a90d3 5068 int i, j, rc = 0;
c0c050c5
MC
5069 unsigned long flags = 0;
5070#ifdef CONFIG_RFS_ACCEL
5071 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
5072#endif
5073
5074 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
5075 flags = IRQF_SHARED;
5076
b81a90d3 5077 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
c0c050c5
MC
5078 struct bnxt_irq *irq = &bp->irq_tbl[i];
5079#ifdef CONFIG_RFS_ACCEL
b81a90d3 5080 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
5081 rc = irq_cpu_rmap_add(rmap, irq->vector);
5082 if (rc)
5083 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
5084 j);
5085 j++;
c0c050c5
MC
5086 }
5087#endif
5088 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5089 bp->bnapi[i]);
5090 if (rc)
5091 break;
5092
5093 irq->requested = 1;
5094 }
5095 return rc;
5096}
5097
5098static void bnxt_del_napi(struct bnxt *bp)
5099{
5100 int i;
5101
5102 if (!bp->bnapi)
5103 return;
5104
5105 for (i = 0; i < bp->cp_nr_rings; i++) {
5106 struct bnxt_napi *bnapi = bp->bnapi[i];
5107
5108 napi_hash_del(&bnapi->napi);
5109 netif_napi_del(&bnapi->napi);
5110 }
e5f6f564
ED
5111 /* We called napi_hash_del() before netif_napi_del(), we need
5112 * to respect an RCU grace period before freeing napi structures.
5113 */
5114 synchronize_net();
c0c050c5
MC
5115}
5116
5117static void bnxt_init_napi(struct bnxt *bp)
5118{
5119 int i;
10bbdaf5 5120 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
5121 struct bnxt_napi *bnapi;
5122
5123 if (bp->flags & BNXT_FLAG_USING_MSIX) {
10bbdaf5
PS
5124 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5125 cp_nr_rings--;
5126 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5
MC
5127 bnapi = bp->bnapi[i];
5128 netif_napi_add(bp->dev, &bnapi->napi,
5129 bnxt_poll, 64);
c0c050c5 5130 }
10bbdaf5
PS
5131 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5132 bnapi = bp->bnapi[cp_nr_rings];
5133 netif_napi_add(bp->dev, &bnapi->napi,
5134 bnxt_poll_nitroa0, 64);
10bbdaf5 5135 }
c0c050c5
MC
5136 } else {
5137 bnapi = bp->bnapi[0];
5138 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
5139 }
5140}
5141
5142static void bnxt_disable_napi(struct bnxt *bp)
5143{
5144 int i;
5145
5146 if (!bp->bnapi)
5147 return;
5148
b356a2e7 5149 for (i = 0; i < bp->cp_nr_rings; i++)
c0c050c5 5150 napi_disable(&bp->bnapi[i]->napi);
c0c050c5
MC
5151}
5152
5153static void bnxt_enable_napi(struct bnxt *bp)
5154{
5155 int i;
5156
5157 for (i = 0; i < bp->cp_nr_rings; i++) {
fa7e2812 5158 bp->bnapi[i]->in_reset = false;
c0c050c5
MC
5159 napi_enable(&bp->bnapi[i]->napi);
5160 }
5161}
5162
7df4ae9f 5163void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
5164{
5165 int i;
c0c050c5
MC
5166 struct bnxt_tx_ring_info *txr;
5167 struct netdev_queue *txq;
5168
b6ab4b01 5169 if (bp->tx_ring) {
c0c050c5 5170 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5171 txr = &bp->tx_ring[i];
c0c050c5 5172 txq = netdev_get_tx_queue(bp->dev, i);
c0c050c5 5173 txr->dev_state = BNXT_DEV_STATE_CLOSING;
c0c050c5
MC
5174 }
5175 }
5176 /* Stop all TX queues */
5177 netif_tx_disable(bp->dev);
5178 netif_carrier_off(bp->dev);
5179}
5180
7df4ae9f 5181void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
5182{
5183 int i;
c0c050c5
MC
5184 struct bnxt_tx_ring_info *txr;
5185 struct netdev_queue *txq;
5186
5187 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5188 txr = &bp->tx_ring[i];
c0c050c5
MC
5189 txq = netdev_get_tx_queue(bp->dev, i);
5190 txr->dev_state = 0;
5191 }
5192 netif_tx_wake_all_queues(bp->dev);
5193 if (bp->link_info.link_up)
5194 netif_carrier_on(bp->dev);
5195}
5196
5197static void bnxt_report_link(struct bnxt *bp)
5198{
5199 if (bp->link_info.link_up) {
5200 const char *duplex;
5201 const char *flow_ctrl;
5202 u16 speed;
5203
5204 netif_carrier_on(bp->dev);
5205 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
5206 duplex = "full";
5207 else
5208 duplex = "half";
5209 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
5210 flow_ctrl = "ON - receive & transmit";
5211 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
5212 flow_ctrl = "ON - transmit";
5213 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
5214 flow_ctrl = "ON - receive";
5215 else
5216 flow_ctrl = "none";
5217 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
5218 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
5219 speed, duplex, flow_ctrl);
170ce013
MC
5220 if (bp->flags & BNXT_FLAG_EEE_CAP)
5221 netdev_info(bp->dev, "EEE is %s\n",
5222 bp->eee.eee_active ? "active" :
5223 "not active");
c0c050c5
MC
5224 } else {
5225 netif_carrier_off(bp->dev);
5226 netdev_err(bp->dev, "NIC Link is Down\n");
5227 }
5228}
5229
170ce013
MC
5230static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5231{
5232 int rc = 0;
5233 struct hwrm_port_phy_qcaps_input req = {0};
5234 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
93ed8117 5235 struct bnxt_link_info *link_info = &bp->link_info;
170ce013
MC
5236
5237 if (bp->hwrm_spec_code < 0x10201)
5238 return 0;
5239
5240 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
5241
5242 mutex_lock(&bp->hwrm_cmd_lock);
5243 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5244 if (rc)
5245 goto hwrm_phy_qcaps_exit;
5246
5247 if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
5248 struct ethtool_eee *eee = &bp->eee;
5249 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
5250
5251 bp->flags |= BNXT_FLAG_EEE_CAP;
5252 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5253 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
5254 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
5255 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5256 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5257 }
93ed8117
MC
5258 link_info->support_auto_speeds =
5259 le16_to_cpu(resp->supported_speeds_auto_mode);
170ce013
MC
5260
5261hwrm_phy_qcaps_exit:
5262 mutex_unlock(&bp->hwrm_cmd_lock);
5263 return rc;
5264}
5265
c0c050c5
MC
5266static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5267{
5268 int rc = 0;
5269 struct bnxt_link_info *link_info = &bp->link_info;
5270 struct hwrm_port_phy_qcfg_input req = {0};
5271 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5272 u8 link_up = link_info->link_up;
286ef9d6 5273 u16 diff;
c0c050c5
MC
5274
5275 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
5276
5277 mutex_lock(&bp->hwrm_cmd_lock);
5278 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5279 if (rc) {
5280 mutex_unlock(&bp->hwrm_cmd_lock);
5281 return rc;
5282 }
5283
5284 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
5285 link_info->phy_link_status = resp->link;
5286 link_info->duplex = resp->duplex;
5287 link_info->pause = resp->pause;
5288 link_info->auto_mode = resp->auto_mode;
5289 link_info->auto_pause_setting = resp->auto_pause;
3277360e 5290 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 5291 link_info->force_pause_setting = resp->force_pause;
c193554e 5292 link_info->duplex_setting = resp->duplex;
c0c050c5
MC
5293 if (link_info->phy_link_status == BNXT_LINK_LINK)
5294 link_info->link_speed = le16_to_cpu(resp->link_speed);
5295 else
5296 link_info->link_speed = 0;
5297 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
c0c050c5
MC
5298 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
5299 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
3277360e
MC
5300 link_info->lp_auto_link_speeds =
5301 le16_to_cpu(resp->link_partner_adv_speeds);
c0c050c5
MC
5302 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
5303 link_info->phy_ver[0] = resp->phy_maj;
5304 link_info->phy_ver[1] = resp->phy_min;
5305 link_info->phy_ver[2] = resp->phy_bld;
5306 link_info->media_type = resp->media_type;
03efbec0 5307 link_info->phy_type = resp->phy_type;
11f15ed3 5308 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
5309 link_info->phy_addr = resp->eee_config_phy_addr &
5310 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 5311 link_info->module_status = resp->module_status;
170ce013
MC
5312
5313 if (bp->flags & BNXT_FLAG_EEE_CAP) {
5314 struct ethtool_eee *eee = &bp->eee;
5315 u16 fw_speeds;
5316
5317 eee->eee_active = 0;
5318 if (resp->eee_config_phy_addr &
5319 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
5320 eee->eee_active = 1;
5321 fw_speeds = le16_to_cpu(
5322 resp->link_partner_adv_eee_link_speed_mask);
5323 eee->lp_advertised =
5324 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5325 }
5326
5327 /* Pull initial EEE config */
5328 if (!chng_link_state) {
5329 if (resp->eee_config_phy_addr &
5330 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
5331 eee->eee_enabled = 1;
c0c050c5 5332
170ce013
MC
5333 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
5334 eee->advertised =
5335 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5336
5337 if (resp->eee_config_phy_addr &
5338 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
5339 __le32 tmr;
5340
5341 eee->tx_lpi_enabled = 1;
5342 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
5343 eee->tx_lpi_timer = le32_to_cpu(tmr) &
5344 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
5345 }
5346 }
5347 }
c0c050c5
MC
5348 /* TODO: need to add more logic to report VF link */
5349 if (chng_link_state) {
5350 if (link_info->phy_link_status == BNXT_LINK_LINK)
5351 link_info->link_up = 1;
5352 else
5353 link_info->link_up = 0;
5354 if (link_up != link_info->link_up)
5355 bnxt_report_link(bp);
5356 } else {
5357 /* alwasy link down if not require to update link state */
5358 link_info->link_up = 0;
5359 }
5360 mutex_unlock(&bp->hwrm_cmd_lock);
286ef9d6
MC
5361
5362 diff = link_info->support_auto_speeds ^ link_info->advertising;
5363 if ((link_info->support_auto_speeds | diff) !=
5364 link_info->support_auto_speeds) {
5365 /* An advertised speed is no longer supported, so we need to
5366 * update the advertisement settings. See bnxt_reset() for
5367 * comments about the rtnl_lock() sequence below.
5368 */
5369 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5370 rtnl_lock();
5371 link_info->advertising = link_info->support_auto_speeds;
5372 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
5373 (link_info->autoneg & BNXT_AUTONEG_SPEED))
5374 bnxt_hwrm_set_link_setting(bp, true, false);
5375 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5376 rtnl_unlock();
5377 }
c0c050c5
MC
5378 return 0;
5379}
5380
10289bec
MC
5381static void bnxt_get_port_module_status(struct bnxt *bp)
5382{
5383 struct bnxt_link_info *link_info = &bp->link_info;
5384 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5385 u8 module_status;
5386
5387 if (bnxt_update_link(bp, true))
5388 return;
5389
5390 module_status = link_info->module_status;
5391 switch (module_status) {
5392 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
5393 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
5394 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
5395 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5396 bp->pf.port_id);
5397 if (bp->hwrm_spec_code >= 0x10201) {
5398 netdev_warn(bp->dev, "Module part number %s\n",
5399 resp->phy_vendor_partnumber);
5400 }
5401 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
5402 netdev_warn(bp->dev, "TX is disabled\n");
5403 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
5404 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
5405 }
5406}
5407
c0c050c5
MC
5408static void
5409bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
5410{
5411 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
5412 if (bp->hwrm_spec_code >= 0x10201)
5413 req->auto_pause =
5414 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
5415 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5416 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
5417 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 5418 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
5419 req->enables |=
5420 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5421 } else {
5422 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5423 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
5424 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5425 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
5426 req->enables |=
5427 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
5428 if (bp->hwrm_spec_code >= 0x10201) {
5429 req->auto_pause = req->force_pause;
5430 req->enables |= cpu_to_le32(
5431 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5432 }
c0c050c5
MC
5433 }
5434}
5435
5436static void bnxt_hwrm_set_link_common(struct bnxt *bp,
5437 struct hwrm_port_phy_cfg_input *req)
5438{
5439 u8 autoneg = bp->link_info.autoneg;
5440 u16 fw_link_speed = bp->link_info.req_link_speed;
68515a18 5441 u16 advertising = bp->link_info.advertising;
c0c050c5
MC
5442
5443 if (autoneg & BNXT_AUTONEG_SPEED) {
5444 req->auto_mode |=
11f15ed3 5445 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
c0c050c5
MC
5446
5447 req->enables |= cpu_to_le32(
5448 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
5449 req->auto_link_speed_mask = cpu_to_le16(advertising);
5450
5451 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
5452 req->flags |=
5453 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
5454 } else {
5455 req->force_link_speed = cpu_to_le16(fw_link_speed);
5456 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
5457 }
5458
c0c050c5
MC
5459 /* tell chimp that the setting takes effect immediately */
5460 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
5461}
5462
5463int bnxt_hwrm_set_pause(struct bnxt *bp)
5464{
5465 struct hwrm_port_phy_cfg_input req = {0};
5466 int rc;
5467
5468 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5469 bnxt_hwrm_set_pause_common(bp, &req);
5470
5471 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
5472 bp->link_info.force_link_chng)
5473 bnxt_hwrm_set_link_common(bp, &req);
5474
5475 mutex_lock(&bp->hwrm_cmd_lock);
5476 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5477 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
5478 /* since changing of pause setting doesn't trigger any link
5479 * change event, the driver needs to update the current pause
5480 * result upon successfully return of the phy_cfg command
5481 */
5482 bp->link_info.pause =
5483 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
5484 bp->link_info.auto_pause_setting = 0;
5485 if (!bp->link_info.force_link_chng)
5486 bnxt_report_link(bp);
5487 }
5488 bp->link_info.force_link_chng = false;
5489 mutex_unlock(&bp->hwrm_cmd_lock);
5490 return rc;
5491}
5492
939f7f0c
MC
5493static void bnxt_hwrm_set_eee(struct bnxt *bp,
5494 struct hwrm_port_phy_cfg_input *req)
5495{
5496 struct ethtool_eee *eee = &bp->eee;
5497
5498 if (eee->eee_enabled) {
5499 u16 eee_speeds;
5500 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
5501
5502 if (eee->tx_lpi_enabled)
5503 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
5504 else
5505 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
5506
5507 req->flags |= cpu_to_le32(flags);
5508 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
5509 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
5510 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
5511 } else {
5512 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
5513 }
5514}
5515
5516int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5
MC
5517{
5518 struct hwrm_port_phy_cfg_input req = {0};
5519
5520 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5521 if (set_pause)
5522 bnxt_hwrm_set_pause_common(bp, &req);
5523
5524 bnxt_hwrm_set_link_common(bp, &req);
939f7f0c
MC
5525
5526 if (set_eee)
5527 bnxt_hwrm_set_eee(bp, &req);
c0c050c5
MC
5528 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5529}
5530
33f7d55f
MC
5531static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
5532{
5533 struct hwrm_port_phy_cfg_input req = {0};
5534
567b2abe 5535 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
5536 return 0;
5537
5538 if (pci_num_vf(bp->pdev))
5539 return 0;
5540
5541 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
16d663a6 5542 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
33f7d55f
MC
5543 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5544}
5545
939f7f0c
MC
5546static bool bnxt_eee_config_ok(struct bnxt *bp)
5547{
5548 struct ethtool_eee *eee = &bp->eee;
5549 struct bnxt_link_info *link_info = &bp->link_info;
5550
5551 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
5552 return true;
5553
5554 if (eee->eee_enabled) {
5555 u32 advertising =
5556 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
5557
5558 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
5559 eee->eee_enabled = 0;
5560 return false;
5561 }
5562 if (eee->advertised & ~advertising) {
5563 eee->advertised = advertising & eee->supported;
5564 return false;
5565 }
5566 }
5567 return true;
5568}
5569
c0c050c5
MC
5570static int bnxt_update_phy_setting(struct bnxt *bp)
5571{
5572 int rc;
5573 bool update_link = false;
5574 bool update_pause = false;
939f7f0c 5575 bool update_eee = false;
c0c050c5
MC
5576 struct bnxt_link_info *link_info = &bp->link_info;
5577
5578 rc = bnxt_update_link(bp, true);
5579 if (rc) {
5580 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
5581 rc);
5582 return rc;
5583 }
5584 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
5585 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
5586 link_info->req_flow_ctrl)
c0c050c5
MC
5587 update_pause = true;
5588 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
5589 link_info->force_pause_setting != link_info->req_flow_ctrl)
5590 update_pause = true;
c0c050c5
MC
5591 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
5592 if (BNXT_AUTO_MODE(link_info->auto_mode))
5593 update_link = true;
5594 if (link_info->req_link_speed != link_info->force_link_speed)
5595 update_link = true;
de73018f
MC
5596 if (link_info->req_duplex != link_info->duplex_setting)
5597 update_link = true;
c0c050c5
MC
5598 } else {
5599 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
5600 update_link = true;
5601 if (link_info->advertising != link_info->auto_link_speeds)
5602 update_link = true;
c0c050c5
MC
5603 }
5604
16d663a6
MC
5605 /* The last close may have shutdown the link, so need to call
5606 * PHY_CFG to bring it back up.
5607 */
5608 if (!netif_carrier_ok(bp->dev))
5609 update_link = true;
5610
939f7f0c
MC
5611 if (!bnxt_eee_config_ok(bp))
5612 update_eee = true;
5613
c0c050c5 5614 if (update_link)
939f7f0c 5615 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
5616 else if (update_pause)
5617 rc = bnxt_hwrm_set_pause(bp);
5618 if (rc) {
5619 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
5620 rc);
5621 return rc;
5622 }
5623
5624 return rc;
5625}
5626
11809490
JH
5627/* Common routine to pre-map certain register block to different GRC window.
5628 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
5629 * in PF and 3 windows in VF that can be customized to map in different
5630 * register blocks.
5631 */
5632static void bnxt_preset_reg_win(struct bnxt *bp)
5633{
5634 if (BNXT_PF(bp)) {
5635 /* CAG registers map to GRC window #4 */
5636 writel(BNXT_CAG_REG_BASE,
5637 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
5638 }
5639}
5640
c0c050c5
MC
5641static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5642{
5643 int rc = 0;
5644
11809490 5645 bnxt_preset_reg_win(bp);
c0c050c5
MC
5646 netif_carrier_off(bp->dev);
5647 if (irq_re_init) {
5648 rc = bnxt_setup_int_mode(bp);
5649 if (rc) {
5650 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
5651 rc);
5652 return rc;
5653 }
5654 }
5655 if ((bp->flags & BNXT_FLAG_RFS) &&
5656 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
5657 /* disable RFS if falling back to INTA */
5658 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
5659 bp->flags &= ~BNXT_FLAG_RFS;
5660 }
5661
5662 rc = bnxt_alloc_mem(bp, irq_re_init);
5663 if (rc) {
5664 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
5665 goto open_err_free_mem;
5666 }
5667
5668 if (irq_re_init) {
5669 bnxt_init_napi(bp);
5670 rc = bnxt_request_irq(bp);
5671 if (rc) {
5672 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
5673 goto open_err;
5674 }
5675 }
5676
5677 bnxt_enable_napi(bp);
5678
5679 rc = bnxt_init_nic(bp, irq_re_init);
5680 if (rc) {
5681 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
5682 goto open_err;
5683 }
5684
5685 if (link_re_init) {
5686 rc = bnxt_update_phy_setting(bp);
5687 if (rc)
ba41d46f 5688 netdev_warn(bp->dev, "failed to update phy settings\n");
c0c050c5
MC
5689 }
5690
7cdd5fc3 5691 if (irq_re_init)
ad51b8e9 5692 udp_tunnel_get_rx_info(bp->dev);
c0c050c5 5693
caefe526 5694 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
5695 bnxt_enable_int(bp);
5696 /* Enable TX queues */
5697 bnxt_tx_enable(bp);
5698 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec
MC
5699 /* Poll link status and check for SFP+ module status */
5700 bnxt_get_port_module_status(bp);
c0c050c5
MC
5701
5702 return 0;
5703
5704open_err:
5705 bnxt_disable_napi(bp);
5706 bnxt_del_napi(bp);
5707
5708open_err_free_mem:
5709 bnxt_free_skbs(bp);
5710 bnxt_free_irq(bp);
5711 bnxt_free_mem(bp, true);
5712 return rc;
5713}
5714
5715/* rtnl_lock held */
5716int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5717{
5718 int rc = 0;
5719
5720 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
5721 if (rc) {
5722 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
5723 dev_close(bp->dev);
5724 }
5725 return rc;
5726}
5727
5728static int bnxt_open(struct net_device *dev)
5729{
5730 struct bnxt *bp = netdev_priv(dev);
c0c050c5 5731
c0c050c5
MC
5732 return __bnxt_open_nic(bp, true, true);
5733}
5734
c0c050c5
MC
5735int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
5736{
5737 int rc = 0;
5738
5739#ifdef CONFIG_BNXT_SRIOV
5740 if (bp->sriov_cfg) {
5741 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
5742 !bp->sriov_cfg,
5743 BNXT_SRIOV_CFG_WAIT_TMO);
5744 if (rc)
5745 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
5746 }
5747#endif
5748 /* Change device state to avoid TX queue wake up's */
5749 bnxt_tx_disable(bp);
5750
caefe526 5751 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec
MC
5752 smp_mb__after_atomic();
5753 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
5754 msleep(20);
c0c050c5 5755
9d8bc097 5756 /* Flush rings and and disable interrupts */
c0c050c5
MC
5757 bnxt_shutdown_nic(bp, irq_re_init);
5758
5759 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
5760
5761 bnxt_disable_napi(bp);
c0c050c5
MC
5762 del_timer_sync(&bp->timer);
5763 bnxt_free_skbs(bp);
5764
5765 if (irq_re_init) {
5766 bnxt_free_irq(bp);
5767 bnxt_del_napi(bp);
5768 }
5769 bnxt_free_mem(bp, irq_re_init);
5770 return rc;
5771}
5772
5773static int bnxt_close(struct net_device *dev)
5774{
5775 struct bnxt *bp = netdev_priv(dev);
5776
5777 bnxt_close_nic(bp, true, true);
33f7d55f 5778 bnxt_hwrm_shutdown_link(bp);
c0c050c5
MC
5779 return 0;
5780}
5781
5782/* rtnl_lock held */
5783static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5784{
5785 switch (cmd) {
5786 case SIOCGMIIPHY:
5787 /* fallthru */
5788 case SIOCGMIIREG: {
5789 if (!netif_running(dev))
5790 return -EAGAIN;
5791
5792 return 0;
5793 }
5794
5795 case SIOCSMIIREG:
5796 if (!netif_running(dev))
5797 return -EAGAIN;
5798
5799 return 0;
5800
5801 default:
5802 /* do nothing */
5803 break;
5804 }
5805 return -EOPNOTSUPP;
5806}
5807
5808static struct rtnl_link_stats64 *
5809bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5810{
5811 u32 i;
5812 struct bnxt *bp = netdev_priv(dev);
5813
5814 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5815
5816 if (!bp->bnapi)
5817 return stats;
5818
5819 /* TODO check if we need to synchronize with bnxt_close path */
5820 for (i = 0; i < bp->cp_nr_rings; i++) {
5821 struct bnxt_napi *bnapi = bp->bnapi[i];
5822 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5823 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
5824
5825 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
5826 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
5827 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
5828
5829 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
5830 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
5831 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
5832
5833 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
5834 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
5835 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
5836
5837 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
5838 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
5839 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
5840
5841 stats->rx_missed_errors +=
5842 le64_to_cpu(hw_stats->rx_discard_pkts);
5843
5844 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
5845
c0c050c5
MC
5846 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
5847 }
5848
9947f83f
MC
5849 if (bp->flags & BNXT_FLAG_PORT_STATS) {
5850 struct rx_port_stats *rx = bp->hw_rx_port_stats;
5851 struct tx_port_stats *tx = bp->hw_tx_port_stats;
5852
5853 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
5854 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
5855 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
5856 le64_to_cpu(rx->rx_ovrsz_frames) +
5857 le64_to_cpu(rx->rx_runt_frames);
5858 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
5859 le64_to_cpu(rx->rx_jbr_frames);
5860 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
5861 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
5862 stats->tx_errors = le64_to_cpu(tx->tx_err);
5863 }
5864
c0c050c5
MC
5865 return stats;
5866}
5867
5868static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
5869{
5870 struct net_device *dev = bp->dev;
5871 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5872 struct netdev_hw_addr *ha;
5873 u8 *haddr;
5874 int mc_count = 0;
5875 bool update = false;
5876 int off = 0;
5877
5878 netdev_for_each_mc_addr(ha, dev) {
5879 if (mc_count >= BNXT_MAX_MC_ADDRS) {
5880 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5881 vnic->mc_list_count = 0;
5882 return false;
5883 }
5884 haddr = ha->addr;
5885 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
5886 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
5887 update = true;
5888 }
5889 off += ETH_ALEN;
5890 mc_count++;
5891 }
5892 if (mc_count)
5893 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
5894
5895 if (mc_count != vnic->mc_list_count) {
5896 vnic->mc_list_count = mc_count;
5897 update = true;
5898 }
5899 return update;
5900}
5901
5902static bool bnxt_uc_list_updated(struct bnxt *bp)
5903{
5904 struct net_device *dev = bp->dev;
5905 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5906 struct netdev_hw_addr *ha;
5907 int off = 0;
5908
5909 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
5910 return true;
5911
5912 netdev_for_each_uc_addr(ha, dev) {
5913 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
5914 return true;
5915
5916 off += ETH_ALEN;
5917 }
5918 return false;
5919}
5920
5921static void bnxt_set_rx_mode(struct net_device *dev)
5922{
5923 struct bnxt *bp = netdev_priv(dev);
5924 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5925 u32 mask = vnic->rx_mask;
5926 bool mc_update = false;
5927 bool uc_update;
5928
5929 if (!netif_running(dev))
5930 return;
5931
5932 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
5933 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
5934 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
5935
17c71ac3 5936 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
c0c050c5
MC
5937 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5938
5939 uc_update = bnxt_uc_list_updated(bp);
5940
5941 if (dev->flags & IFF_ALLMULTI) {
5942 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5943 vnic->mc_list_count = 0;
5944 } else {
5945 mc_update = bnxt_mc_list_updated(bp, &mask);
5946 }
5947
5948 if (mask != vnic->rx_mask || uc_update || mc_update) {
5949 vnic->rx_mask = mask;
5950
5951 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
5952 schedule_work(&bp->sp_task);
5953 }
5954}
5955
b664f008 5956static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
5957{
5958 struct net_device *dev = bp->dev;
5959 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5960 struct netdev_hw_addr *ha;
5961 int i, off = 0, rc;
5962 bool uc_update;
5963
5964 netif_addr_lock_bh(dev);
5965 uc_update = bnxt_uc_list_updated(bp);
5966 netif_addr_unlock_bh(dev);
5967
5968 if (!uc_update)
5969 goto skip_uc;
5970
5971 mutex_lock(&bp->hwrm_cmd_lock);
5972 for (i = 1; i < vnic->uc_filter_count; i++) {
5973 struct hwrm_cfa_l2_filter_free_input req = {0};
5974
5975 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
5976 -1);
5977
5978 req.l2_filter_id = vnic->fw_l2_filter_id[i];
5979
5980 rc = _hwrm_send_message(bp, &req, sizeof(req),
5981 HWRM_CMD_TIMEOUT);
5982 }
5983 mutex_unlock(&bp->hwrm_cmd_lock);
5984
5985 vnic->uc_filter_count = 1;
5986
5987 netif_addr_lock_bh(dev);
5988 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
5989 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5990 } else {
5991 netdev_for_each_uc_addr(ha, dev) {
5992 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
5993 off += ETH_ALEN;
5994 vnic->uc_filter_count++;
5995 }
5996 }
5997 netif_addr_unlock_bh(dev);
5998
5999 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
6000 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
6001 if (rc) {
6002 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
6003 rc);
6004 vnic->uc_filter_count = i;
b664f008 6005 return rc;
c0c050c5
MC
6006 }
6007 }
6008
6009skip_uc:
6010 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
6011 if (rc)
6012 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
6013 rc);
b664f008
MC
6014
6015 return rc;
c0c050c5
MC
6016}
6017
8079e8f1
MC
6018/* If the chip and firmware supports RFS */
6019static bool bnxt_rfs_supported(struct bnxt *bp)
6020{
6021 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6022 return true;
ae10ae74
MC
6023 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6024 return true;
8079e8f1
MC
6025 return false;
6026}
6027
6028/* If runtime conditions support RFS */
2bcfa6f6
MC
6029static bool bnxt_rfs_capable(struct bnxt *bp)
6030{
6031#ifdef CONFIG_RFS_ACCEL
8079e8f1 6032 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6
MC
6033
6034 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
6035 return false;
6036
6037 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
6038 max_vnics = bnxt_get_max_func_vnics(bp);
6039 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
6040
6041 /* RSS contexts not a limiting factor */
6042 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6043 max_rss_ctxs = max_vnics;
8079e8f1 6044 if (vnics > max_vnics || vnics > max_rss_ctxs) {
a2304909
VV
6045 netdev_warn(bp->dev,
6046 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
8079e8f1 6047 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 6048 return false;
a2304909 6049 }
2bcfa6f6
MC
6050
6051 return true;
6052#else
6053 return false;
6054#endif
6055}
6056
c0c050c5
MC
6057static netdev_features_t bnxt_fix_features(struct net_device *dev,
6058 netdev_features_t features)
6059{
2bcfa6f6
MC
6060 struct bnxt *bp = netdev_priv(dev);
6061
a2304909 6062 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 6063 features &= ~NETIF_F_NTUPLE;
5a9f6b23
MC
6064
6065 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
6066 * turned on or off together.
6067 */
6068 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
6069 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
6070 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
6071 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6072 NETIF_F_HW_VLAN_STAG_RX);
6073 else
6074 features |= NETIF_F_HW_VLAN_CTAG_RX |
6075 NETIF_F_HW_VLAN_STAG_RX;
6076 }
cf6645f8
MC
6077#ifdef CONFIG_BNXT_SRIOV
6078 if (BNXT_VF(bp)) {
6079 if (bp->vf.vlan) {
6080 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6081 NETIF_F_HW_VLAN_STAG_RX);
6082 }
6083 }
6084#endif
c0c050c5
MC
6085 return features;
6086}
6087
6088static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
6089{
6090 struct bnxt *bp = netdev_priv(dev);
6091 u32 flags = bp->flags;
6092 u32 changes;
6093 int rc = 0;
6094 bool re_init = false;
6095 bool update_tpa = false;
6096
6097 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
3e8060fa 6098 if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
c0c050c5
MC
6099 flags |= BNXT_FLAG_GRO;
6100 if (features & NETIF_F_LRO)
6101 flags |= BNXT_FLAG_LRO;
6102
6103 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6104 flags |= BNXT_FLAG_STRIP_VLAN;
6105
6106 if (features & NETIF_F_NTUPLE)
6107 flags |= BNXT_FLAG_RFS;
6108
6109 changes = flags ^ bp->flags;
6110 if (changes & BNXT_FLAG_TPA) {
6111 update_tpa = true;
6112 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
6113 (flags & BNXT_FLAG_TPA) == 0)
6114 re_init = true;
6115 }
6116
6117 if (changes & ~BNXT_FLAG_TPA)
6118 re_init = true;
6119
6120 if (flags != bp->flags) {
6121 u32 old_flags = bp->flags;
6122
6123 bp->flags = flags;
6124
2bcfa6f6 6125 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
c0c050c5
MC
6126 if (update_tpa)
6127 bnxt_set_ring_params(bp);
6128 return rc;
6129 }
6130
6131 if (re_init) {
6132 bnxt_close_nic(bp, false, false);
6133 if (update_tpa)
6134 bnxt_set_ring_params(bp);
6135
6136 return bnxt_open_nic(bp, false, false);
6137 }
6138 if (update_tpa) {
6139 rc = bnxt_set_tpa(bp,
6140 (flags & BNXT_FLAG_TPA) ?
6141 true : false);
6142 if (rc)
6143 bp->flags = old_flags;
6144 }
6145 }
6146 return rc;
6147}
6148
9f554590
MC
6149static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
6150{
b6ab4b01 6151 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
6152 int i = bnapi->index;
6153
3b2b7d9d
MC
6154 if (!txr)
6155 return;
6156
9f554590
MC
6157 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6158 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
6159 txr->tx_cons);
6160}
6161
6162static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
6163{
b6ab4b01 6164 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
6165 int i = bnapi->index;
6166
3b2b7d9d
MC
6167 if (!rxr)
6168 return;
6169
9f554590
MC
6170 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6171 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
6172 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
6173 rxr->rx_sw_agg_prod);
6174}
6175
6176static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
6177{
6178 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6179 int i = bnapi->index;
6180
6181 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6182 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
6183}
6184
c0c050c5
MC
6185static void bnxt_dbg_dump_states(struct bnxt *bp)
6186{
6187 int i;
6188 struct bnxt_napi *bnapi;
c0c050c5
MC
6189
6190 for (i = 0; i < bp->cp_nr_rings; i++) {
6191 bnapi = bp->bnapi[i];
c0c050c5 6192 if (netif_msg_drv(bp)) {
9f554590
MC
6193 bnxt_dump_tx_sw_state(bnapi);
6194 bnxt_dump_rx_sw_state(bnapi);
6195 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
6196 }
6197 }
6198}
6199
6988bd92 6200static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 6201{
6988bd92
MC
6202 if (!silent)
6203 bnxt_dbg_dump_states(bp);
028de140
MC
6204 if (netif_running(bp->dev)) {
6205 bnxt_close_nic(bp, false, false);
6206 bnxt_open_nic(bp, false, false);
6207 }
c0c050c5
MC
6208}
6209
6210static void bnxt_tx_timeout(struct net_device *dev)
6211{
6212 struct bnxt *bp = netdev_priv(dev);
6213
6214 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6215 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6216 schedule_work(&bp->sp_task);
6217}
6218
6219#ifdef CONFIG_NET_POLL_CONTROLLER
6220static void bnxt_poll_controller(struct net_device *dev)
6221{
6222 struct bnxt *bp = netdev_priv(dev);
6223 int i;
6224
6225 for (i = 0; i < bp->cp_nr_rings; i++) {
6226 struct bnxt_irq *irq = &bp->irq_tbl[i];
6227
6228 disable_irq(irq->vector);
6229 irq->handler(irq->vector, bp->bnapi[i]);
6230 enable_irq(irq->vector);
6231 }
6232}
6233#endif
6234
6235static void bnxt_timer(unsigned long data)
6236{
6237 struct bnxt *bp = (struct bnxt *)data;
6238 struct net_device *dev = bp->dev;
6239
6240 if (!netif_running(dev))
6241 return;
6242
6243 if (atomic_read(&bp->intr_sem) != 0)
6244 goto bnxt_restart_timer;
6245
3bdf56c4
MC
6246 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
6247 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6248 schedule_work(&bp->sp_task);
6249 }
c0c050c5
MC
6250bnxt_restart_timer:
6251 mod_timer(&bp->timer, jiffies + bp->current_interval);
6252}
6253
6988bd92
MC
6254/* Only called from bnxt_sp_task() */
6255static void bnxt_reset(struct bnxt *bp, bool silent)
6256{
6257 /* bnxt_reset_task() calls bnxt_close_nic() which waits
6258 * for BNXT_STATE_IN_SP_TASK to clear.
6259 * If there is a parallel dev_close(), bnxt_close() may be holding
6260 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
6261 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6262 */
6263 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6264 rtnl_lock();
6265 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6266 bnxt_reset_task(bp, silent);
6267 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6268 rtnl_unlock();
6269}
6270
c0c050c5
MC
6271static void bnxt_cfg_ntp_filters(struct bnxt *);
6272
6273static void bnxt_sp_task(struct work_struct *work)
6274{
6275 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
6276 int rc;
6277
4cebdcec
MC
6278 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6279 smp_mb__after_atomic();
6280 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6281 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 6282 return;
4cebdcec 6283 }
c0c050c5
MC
6284
6285 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
6286 bnxt_cfg_rx_mode(bp);
6287
6288 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
6289 bnxt_cfg_ntp_filters(bp);
6290 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
286ef9d6
MC
6291 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6292 &bp->sp_event))
6293 bnxt_hwrm_phy_qcaps(bp);
6294
c0c050c5
MC
6295 rc = bnxt_update_link(bp, true);
6296 if (rc)
6297 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6298 rc);
6299 }
6300 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
6301 bnxt_hwrm_exec_fwd_req(bp);
6302 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6303 bnxt_hwrm_tunnel_dst_port_alloc(
6304 bp, bp->vxlan_port,
6305 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6306 }
6307 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6308 bnxt_hwrm_tunnel_dst_port_free(
6309 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6310 }
7cdd5fc3
AD
6311 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6312 bnxt_hwrm_tunnel_dst_port_alloc(
6313 bp, bp->nge_port,
6314 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6315 }
6316 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6317 bnxt_hwrm_tunnel_dst_port_free(
6318 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6319 }
6988bd92
MC
6320 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
6321 bnxt_reset(bp, false);
4cebdcec 6322
fc0f1929
MC
6323 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
6324 bnxt_reset(bp, true);
6325
4bb13abf 6326 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
10289bec 6327 bnxt_get_port_module_status(bp);
4bb13abf 6328
3bdf56c4
MC
6329 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6330 bnxt_hwrm_port_qstats(bp);
6331
4cebdcec
MC
6332 smp_mb__before_atomic();
6333 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
6334}
6335
6336static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
6337{
6338 int rc;
6339 struct bnxt *bp = netdev_priv(dev);
6340
6341 SET_NETDEV_DEV(dev, &pdev->dev);
6342
6343 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6344 rc = pci_enable_device(pdev);
6345 if (rc) {
6346 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
6347 goto init_err;
6348 }
6349
6350 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6351 dev_err(&pdev->dev,
6352 "Cannot find PCI device base address, aborting\n");
6353 rc = -ENODEV;
6354 goto init_err_disable;
6355 }
6356
6357 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6358 if (rc) {
6359 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
6360 goto init_err_disable;
6361 }
6362
6363 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
6364 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6365 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
6366 goto init_err_disable;
6367 }
6368
6369 pci_set_master(pdev);
6370
6371 bp->dev = dev;
6372 bp->pdev = pdev;
6373
6374 bp->bar0 = pci_ioremap_bar(pdev, 0);
6375 if (!bp->bar0) {
6376 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
6377 rc = -ENOMEM;
6378 goto init_err_release;
6379 }
6380
6381 bp->bar1 = pci_ioremap_bar(pdev, 2);
6382 if (!bp->bar1) {
6383 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
6384 rc = -ENOMEM;
6385 goto init_err_release;
6386 }
6387
6388 bp->bar2 = pci_ioremap_bar(pdev, 4);
6389 if (!bp->bar2) {
6390 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
6391 rc = -ENOMEM;
6392 goto init_err_release;
6393 }
6394
6316ea6d
SB
6395 pci_enable_pcie_error_reporting(pdev);
6396
c0c050c5
MC
6397 INIT_WORK(&bp->sp_task, bnxt_sp_task);
6398
6399 spin_lock_init(&bp->ntp_fltr_lock);
6400
6401 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
6402 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
6403
dfb5b894 6404 /* tick values in micro seconds */
dfc9c94a
MC
6405 bp->rx_coal_ticks = 12;
6406 bp->rx_coal_bufs = 30;
dfb5b894
MC
6407 bp->rx_coal_ticks_irq = 1;
6408 bp->rx_coal_bufs_irq = 2;
c0c050c5 6409
dfc9c94a
MC
6410 bp->tx_coal_ticks = 25;
6411 bp->tx_coal_bufs = 30;
6412 bp->tx_coal_ticks_irq = 2;
6413 bp->tx_coal_bufs_irq = 2;
6414
51f30785
MC
6415 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
6416
c0c050c5
MC
6417 init_timer(&bp->timer);
6418 bp->timer.data = (unsigned long)bp;
6419 bp->timer.function = bnxt_timer;
6420 bp->current_interval = BNXT_TIMER_INTERVAL;
6421
caefe526 6422 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
6423
6424 return 0;
6425
6426init_err_release:
6427 if (bp->bar2) {
6428 pci_iounmap(pdev, bp->bar2);
6429 bp->bar2 = NULL;
6430 }
6431
6432 if (bp->bar1) {
6433 pci_iounmap(pdev, bp->bar1);
6434 bp->bar1 = NULL;
6435 }
6436
6437 if (bp->bar0) {
6438 pci_iounmap(pdev, bp->bar0);
6439 bp->bar0 = NULL;
6440 }
6441
6442 pci_release_regions(pdev);
6443
6444init_err_disable:
6445 pci_disable_device(pdev);
6446
6447init_err:
6448 return rc;
6449}
6450
6451/* rtnl_lock held */
6452static int bnxt_change_mac_addr(struct net_device *dev, void *p)
6453{
6454 struct sockaddr *addr = p;
1fc2cfd0
JH
6455 struct bnxt *bp = netdev_priv(dev);
6456 int rc = 0;
c0c050c5
MC
6457
6458 if (!is_valid_ether_addr(addr->sa_data))
6459 return -EADDRNOTAVAIL;
6460
84c33dd3
MC
6461 rc = bnxt_approve_mac(bp, addr->sa_data);
6462 if (rc)
6463 return rc;
bdd4347b 6464
1fc2cfd0
JH
6465 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
6466 return 0;
6467
c0c050c5 6468 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
6469 if (netif_running(dev)) {
6470 bnxt_close_nic(bp, false, false);
6471 rc = bnxt_open_nic(bp, false, false);
6472 }
c0c050c5 6473
1fc2cfd0 6474 return rc;
c0c050c5
MC
6475}
6476
6477/* rtnl_lock held */
6478static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
6479{
6480 struct bnxt *bp = netdev_priv(dev);
6481
c0c050c5
MC
6482 if (netif_running(dev))
6483 bnxt_close_nic(bp, false, false);
6484
6485 dev->mtu = new_mtu;
6486 bnxt_set_ring_params(bp);
6487
6488 if (netif_running(dev))
6489 return bnxt_open_nic(bp, false, false);
6490
6491 return 0;
6492}
6493
c5e3deb8 6494int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
6495{
6496 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 6497 bool sh = false;
16e5cc64 6498
c0c050c5
MC
6499 if (tc > bp->max_tc) {
6500 netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
6501 tc, bp->max_tc);
6502 return -EINVAL;
6503 }
6504
6505 if (netdev_get_num_tc(dev) == tc)
6506 return 0;
6507
3ffb6a39
MC
6508 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6509 sh = true;
6510
c0c050c5 6511 if (tc) {
6e6c5a57 6512 int max_rx_rings, max_tx_rings, rc;
c0c050c5 6513
01657bcd 6514 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57 6515 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
c0c050c5
MC
6516 return -ENOMEM;
6517 }
6518
6519 /* Needs to close the device and do hw resource re-allocations */
6520 if (netif_running(bp->dev))
6521 bnxt_close_nic(bp, true, false);
6522
6523 if (tc) {
6524 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
6525 netdev_set_num_tc(dev, tc);
6526 } else {
6527 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6528 netdev_reset_tc(dev);
6529 }
3ffb6a39
MC
6530 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6531 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
6532 bp->num_stat_ctxs = bp->cp_nr_rings;
6533
6534 if (netif_running(bp->dev))
6535 return bnxt_open_nic(bp, true, false);
6536
6537 return 0;
6538}
6539
c5e3deb8
MC
6540static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6541 struct tc_to_netdev *ntc)
6542{
6543 if (ntc->type != TC_SETUP_MQPRIO)
6544 return -EINVAL;
6545
6546 return bnxt_setup_mq_tc(dev, ntc->tc);
6547}
6548
c0c050c5
MC
6549#ifdef CONFIG_RFS_ACCEL
6550static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
6551 struct bnxt_ntuple_filter *f2)
6552{
6553 struct flow_keys *keys1 = &f1->fkeys;
6554 struct flow_keys *keys2 = &f2->fkeys;
6555
6556 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
6557 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
6558 keys1->ports.ports == keys2->ports.ports &&
6559 keys1->basic.ip_proto == keys2->basic.ip_proto &&
6560 keys1->basic.n_proto == keys2->basic.n_proto &&
a54c4d74
MC
6561 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
6562 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
6563 return true;
6564
6565 return false;
6566}
6567
6568static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
6569 u16 rxq_index, u32 flow_id)
6570{
6571 struct bnxt *bp = netdev_priv(dev);
6572 struct bnxt_ntuple_filter *fltr, *new_fltr;
6573 struct flow_keys *fkeys;
6574 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 6575 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5
MC
6576 struct hlist_head *head;
6577
6578 if (skb->encapsulation)
6579 return -EPROTONOSUPPORT;
6580
a54c4d74
MC
6581 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
6582 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6583 int off = 0, j;
6584
6585 netif_addr_lock_bh(dev);
6586 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
6587 if (ether_addr_equal(eth->h_dest,
6588 vnic->uc_list + off)) {
6589 l2_idx = j + 1;
6590 break;
6591 }
6592 }
6593 netif_addr_unlock_bh(dev);
6594 if (!l2_idx)
6595 return -EINVAL;
6596 }
c0c050c5
MC
6597 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
6598 if (!new_fltr)
6599 return -ENOMEM;
6600
6601 fkeys = &new_fltr->fkeys;
6602 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
6603 rc = -EPROTONOSUPPORT;
6604 goto err_free;
6605 }
6606
dda0e746
MC
6607 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
6608 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
6609 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
6610 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
6611 rc = -EPROTONOSUPPORT;
6612 goto err_free;
6613 }
dda0e746
MC
6614 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
6615 bp->hwrm_spec_code < 0x10601) {
6616 rc = -EPROTONOSUPPORT;
6617 goto err_free;
6618 }
c0c050c5 6619
a54c4d74 6620 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
6621 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
6622
6623 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
6624 head = &bp->ntp_fltr_hash_tbl[idx];
6625 rcu_read_lock();
6626 hlist_for_each_entry_rcu(fltr, head, hash) {
6627 if (bnxt_fltr_match(fltr, new_fltr)) {
6628 rcu_read_unlock();
6629 rc = 0;
6630 goto err_free;
6631 }
6632 }
6633 rcu_read_unlock();
6634
6635 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
6636 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
6637 BNXT_NTP_FLTR_MAX_FLTR, 0);
6638 if (bit_id < 0) {
c0c050c5
MC
6639 spin_unlock_bh(&bp->ntp_fltr_lock);
6640 rc = -ENOMEM;
6641 goto err_free;
6642 }
6643
84e86b98 6644 new_fltr->sw_id = (u16)bit_id;
c0c050c5 6645 new_fltr->flow_id = flow_id;
a54c4d74 6646 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
6647 new_fltr->rxq = rxq_index;
6648 hlist_add_head_rcu(&new_fltr->hash, head);
6649 bp->ntp_fltr_count++;
6650 spin_unlock_bh(&bp->ntp_fltr_lock);
6651
6652 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
6653 schedule_work(&bp->sp_task);
6654
6655 return new_fltr->sw_id;
6656
6657err_free:
6658 kfree(new_fltr);
6659 return rc;
6660}
6661
6662static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6663{
6664 int i;
6665
6666 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
6667 struct hlist_head *head;
6668 struct hlist_node *tmp;
6669 struct bnxt_ntuple_filter *fltr;
6670 int rc;
6671
6672 head = &bp->ntp_fltr_hash_tbl[i];
6673 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
6674 bool del = false;
6675
6676 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
6677 if (rps_may_expire_flow(bp->dev, fltr->rxq,
6678 fltr->flow_id,
6679 fltr->sw_id)) {
6680 bnxt_hwrm_cfa_ntuple_filter_free(bp,
6681 fltr);
6682 del = true;
6683 }
6684 } else {
6685 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
6686 fltr);
6687 if (rc)
6688 del = true;
6689 else
6690 set_bit(BNXT_FLTR_VALID, &fltr->state);
6691 }
6692
6693 if (del) {
6694 spin_lock_bh(&bp->ntp_fltr_lock);
6695 hlist_del_rcu(&fltr->hash);
6696 bp->ntp_fltr_count--;
6697 spin_unlock_bh(&bp->ntp_fltr_lock);
6698 synchronize_rcu();
6699 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
6700 kfree(fltr);
6701 }
6702 }
6703 }
19241368
JH
6704 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
6705 netdev_info(bp->dev, "Receive PF driver unload event!");
c0c050c5
MC
6706}
6707
6708#else
6709
6710static void bnxt_cfg_ntp_filters(struct bnxt *bp)
6711{
6712}
6713
6714#endif /* CONFIG_RFS_ACCEL */
6715
ad51b8e9
AD
6716static void bnxt_udp_tunnel_add(struct net_device *dev,
6717 struct udp_tunnel_info *ti)
c0c050c5
MC
6718{
6719 struct bnxt *bp = netdev_priv(dev);
6720
ad51b8e9 6721 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
6722 return;
6723
ad51b8e9 6724 if (!netif_running(dev))
c0c050c5
MC
6725 return;
6726
ad51b8e9
AD
6727 switch (ti->type) {
6728 case UDP_TUNNEL_TYPE_VXLAN:
6729 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
6730 return;
c0c050c5 6731
ad51b8e9
AD
6732 bp->vxlan_port_cnt++;
6733 if (bp->vxlan_port_cnt == 1) {
6734 bp->vxlan_port = ti->port;
6735 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
6736 schedule_work(&bp->sp_task);
6737 }
6738 break;
7cdd5fc3
AD
6739 case UDP_TUNNEL_TYPE_GENEVE:
6740 if (bp->nge_port_cnt && bp->nge_port != ti->port)
6741 return;
6742
6743 bp->nge_port_cnt++;
6744 if (bp->nge_port_cnt == 1) {
6745 bp->nge_port = ti->port;
6746 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
6747 }
6748 break;
ad51b8e9
AD
6749 default:
6750 return;
c0c050c5 6751 }
ad51b8e9
AD
6752
6753 schedule_work(&bp->sp_task);
c0c050c5
MC
6754}
6755
ad51b8e9
AD
6756static void bnxt_udp_tunnel_del(struct net_device *dev,
6757 struct udp_tunnel_info *ti)
c0c050c5
MC
6758{
6759 struct bnxt *bp = netdev_priv(dev);
6760
ad51b8e9 6761 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
6762 return;
6763
ad51b8e9 6764 if (!netif_running(dev))
c0c050c5
MC
6765 return;
6766
ad51b8e9
AD
6767 switch (ti->type) {
6768 case UDP_TUNNEL_TYPE_VXLAN:
6769 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
6770 return;
c0c050c5
MC
6771 bp->vxlan_port_cnt--;
6772
ad51b8e9
AD
6773 if (bp->vxlan_port_cnt != 0)
6774 return;
6775
6776 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
6777 break;
7cdd5fc3
AD
6778 case UDP_TUNNEL_TYPE_GENEVE:
6779 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
6780 return;
6781 bp->nge_port_cnt--;
6782
6783 if (bp->nge_port_cnt != 0)
6784 return;
6785
6786 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
6787 break;
ad51b8e9
AD
6788 default:
6789 return;
c0c050c5 6790 }
ad51b8e9
AD
6791
6792 schedule_work(&bp->sp_task);
c0c050c5
MC
6793}
6794
6795static const struct net_device_ops bnxt_netdev_ops = {
6796 .ndo_open = bnxt_open,
6797 .ndo_start_xmit = bnxt_start_xmit,
6798 .ndo_stop = bnxt_close,
6799 .ndo_get_stats64 = bnxt_get_stats64,
6800 .ndo_set_rx_mode = bnxt_set_rx_mode,
6801 .ndo_do_ioctl = bnxt_ioctl,
6802 .ndo_validate_addr = eth_validate_addr,
6803 .ndo_set_mac_address = bnxt_change_mac_addr,
6804 .ndo_change_mtu = bnxt_change_mtu,
6805 .ndo_fix_features = bnxt_fix_features,
6806 .ndo_set_features = bnxt_set_features,
6807 .ndo_tx_timeout = bnxt_tx_timeout,
6808#ifdef CONFIG_BNXT_SRIOV
6809 .ndo_get_vf_config = bnxt_get_vf_config,
6810 .ndo_set_vf_mac = bnxt_set_vf_mac,
6811 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
6812 .ndo_set_vf_rate = bnxt_set_vf_bw,
6813 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
6814 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
6815#endif
6816#ifdef CONFIG_NET_POLL_CONTROLLER
6817 .ndo_poll_controller = bnxt_poll_controller,
6818#endif
6819 .ndo_setup_tc = bnxt_setup_tc,
6820#ifdef CONFIG_RFS_ACCEL
6821 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
6822#endif
ad51b8e9
AD
6823 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
6824 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
c0c050c5
MC
6825};
6826
6827static void bnxt_remove_one(struct pci_dev *pdev)
6828{
6829 struct net_device *dev = pci_get_drvdata(pdev);
6830 struct bnxt *bp = netdev_priv(dev);
6831
6832 if (BNXT_PF(bp))
6833 bnxt_sriov_disable(bp);
6834
6316ea6d 6835 pci_disable_pcie_error_reporting(pdev);
c0c050c5
MC
6836 unregister_netdev(dev);
6837 cancel_work_sync(&bp->sp_task);
6838 bp->sp_event = 0;
6839
7809592d 6840 bnxt_clear_int_mode(bp);
be58a0da 6841 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 6842 bnxt_free_hwrm_resources(bp);
7df4ae9f 6843 bnxt_dcb_free(bp);
c0c050c5
MC
6844 pci_iounmap(pdev, bp->bar2);
6845 pci_iounmap(pdev, bp->bar1);
6846 pci_iounmap(pdev, bp->bar0);
a588e458
MC
6847 kfree(bp->edev);
6848 bp->edev = NULL;
c0c050c5
MC
6849 free_netdev(dev);
6850
6851 pci_release_regions(pdev);
6852 pci_disable_device(pdev);
6853}
6854
6855static int bnxt_probe_phy(struct bnxt *bp)
6856{
6857 int rc = 0;
6858 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 6859
170ce013
MC
6860 rc = bnxt_hwrm_phy_qcaps(bp);
6861 if (rc) {
6862 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
6863 rc);
6864 return rc;
6865 }
6866
c0c050c5
MC
6867 rc = bnxt_update_link(bp, false);
6868 if (rc) {
6869 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
6870 rc);
6871 return rc;
6872 }
6873
93ed8117
MC
6874 /* Older firmware does not have supported_auto_speeds, so assume
6875 * that all supported speeds can be autonegotiated.
6876 */
6877 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
6878 link_info->support_auto_speeds = link_info->support_speeds;
6879
c0c050c5 6880 /*initialize the ethool setting copy with NVM settings */
0d8abf02 6881 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
c9ee9516
MC
6882 link_info->autoneg = BNXT_AUTONEG_SPEED;
6883 if (bp->hwrm_spec_code >= 0x10201) {
6884 if (link_info->auto_pause_setting &
6885 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
6886 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6887 } else {
6888 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
6889 }
0d8abf02 6890 link_info->advertising = link_info->auto_link_speeds;
0d8abf02
MC
6891 } else {
6892 link_info->req_link_speed = link_info->force_link_speed;
6893 link_info->req_duplex = link_info->duplex_setting;
c0c050c5 6894 }
c9ee9516
MC
6895 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
6896 link_info->req_flow_ctrl =
6897 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
6898 else
6899 link_info->req_flow_ctrl = link_info->force_pause_setting;
c0c050c5
MC
6900 return rc;
6901}
6902
6903static int bnxt_get_max_irq(struct pci_dev *pdev)
6904{
6905 u16 ctrl;
6906
6907 if (!pdev->msix_cap)
6908 return 1;
6909
6910 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
6911 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
6912}
6913
6e6c5a57
MC
6914static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
6915 int *max_cp)
c0c050c5 6916{
6e6c5a57 6917 int max_ring_grps = 0;
c0c050c5 6918
379a80a1 6919#ifdef CONFIG_BNXT_SRIOV
415b6f19 6920 if (!BNXT_PF(bp)) {
c0c050c5
MC
6921 *max_tx = bp->vf.max_tx_rings;
6922 *max_rx = bp->vf.max_rx_rings;
6e6c5a57
MC
6923 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
6924 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
b72d4a68 6925 max_ring_grps = bp->vf.max_hw_ring_grps;
415b6f19 6926 } else
379a80a1 6927#endif
415b6f19
AB
6928 {
6929 *max_tx = bp->pf.max_tx_rings;
6930 *max_rx = bp->pf.max_rx_rings;
6931 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
6932 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
6933 max_ring_grps = bp->pf.max_hw_ring_grps;
c0c050c5 6934 }
76595193
PS
6935 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
6936 *max_cp -= 1;
6937 *max_rx -= 2;
6938 }
c0c050c5
MC
6939 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6940 *max_rx >>= 1;
b72d4a68 6941 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
6942}
6943
6944int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
6945{
6946 int rx, tx, cp;
6947
6948 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
6949 if (!rx || !tx || !cp)
6950 return -ENOMEM;
6951
6952 *max_rx = rx;
6953 *max_tx = tx;
6954 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
6955}
6956
e4060d30
MC
6957static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
6958 bool shared)
6959{
6960 int rc;
6961
6962 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
6963 if (rc)
6964 return rc;
6965
6966 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
6967 int max_cp, max_stat, max_irq;
6968
6969 /* Reserve minimum resources for RoCE */
6970 max_cp = bnxt_get_max_func_cp_rings(bp);
6971 max_stat = bnxt_get_max_func_stat_ctxs(bp);
6972 max_irq = bnxt_get_max_func_irqs(bp);
6973 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
6974 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
6975 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
6976 return 0;
6977
6978 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
6979 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
6980 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
6981 max_cp = min_t(int, max_cp, max_irq);
6982 max_cp = min_t(int, max_cp, max_stat);
6983 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
6984 if (rc)
6985 rc = 0;
6986 }
6987 return rc;
6988}
6989
6e6c5a57
MC
6990static int bnxt_set_dflt_rings(struct bnxt *bp)
6991{
6992 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6993 bool sh = true;
6994
6995 if (sh)
6996 bp->flags |= BNXT_FLAG_SHARED_RINGS;
6997 dflt_rings = netif_get_num_default_rss_queues();
e4060d30 6998 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
6999 if (rc)
7000 return rc;
7001 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
7002 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
7003 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7004 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7005 bp->tx_nr_rings + bp->rx_nr_rings;
7006 bp->num_stat_ctxs = bp->cp_nr_rings;
76595193
PS
7007 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7008 bp->rx_nr_rings++;
7009 bp->cp_nr_rings++;
7010 }
6e6c5a57 7011 return rc;
c0c050c5
MC
7012}
7013
7b08f661
MC
7014void bnxt_restore_pf_fw_resources(struct bnxt *bp)
7015{
7016 ASSERT_RTNL();
7017 bnxt_hwrm_func_qcaps(bp);
a588e458 7018 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
7b08f661
MC
7019}
7020
90c4f788
AK
7021static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7022{
7023 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7024 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7025
7026 if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
7027 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7028 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7029 else
7030 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
7031 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
7032 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
7033 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
7034 "Unknown", width);
7035}
7036
c0c050c5
MC
7037static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7038{
7039 static int version_printed;
7040 struct net_device *dev;
7041 struct bnxt *bp;
6e6c5a57 7042 int rc, max_irqs;
c0c050c5 7043
fa853dda
PS
7044 if (pdev->device == 0x16cd && pci_is_bridge(pdev))
7045 return -ENODEV;
7046
c0c050c5
MC
7047 if (version_printed++ == 0)
7048 pr_info("%s", version);
7049
7050 max_irqs = bnxt_get_max_irq(pdev);
7051 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
7052 if (!dev)
7053 return -ENOMEM;
7054
7055 bp = netdev_priv(dev);
7056
7057 if (bnxt_vf_pciid(ent->driver_data))
7058 bp->flags |= BNXT_FLAG_VF;
7059
2bcfa6f6 7060 if (pdev->msix_cap)
c0c050c5 7061 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
7062
7063 rc = bnxt_init_board(pdev, dev);
7064 if (rc < 0)
7065 goto init_err_free;
7066
7067 dev->netdev_ops = &bnxt_netdev_ops;
7068 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
7069 dev->ethtool_ops = &bnxt_ethtool_ops;
7070
7071 pci_set_drvdata(pdev, dev);
7072
3e8060fa
PS
7073 rc = bnxt_alloc_hwrm_resources(bp);
7074 if (rc)
7075 goto init_err;
7076
7077 mutex_init(&bp->hwrm_cmd_lock);
7078 rc = bnxt_hwrm_ver_get(bp);
7079 if (rc)
7080 goto init_err;
7081
5ac67d8b
RS
7082 bnxt_hwrm_fw_set_time(bp);
7083
c0c050c5
MC
7084 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7085 NETIF_F_TSO | NETIF_F_TSO6 |
7086 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 7087 NETIF_F_GSO_IPXIP4 |
152971ee
AD
7088 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7089 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
7090 NETIF_F_RXCSUM | NETIF_F_GRO;
7091
7092 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
7093 dev->hw_features |= NETIF_F_LRO;
c0c050c5 7094
c0c050c5
MC
7095 dev->hw_enc_features =
7096 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7097 NETIF_F_TSO | NETIF_F_TSO6 |
7098 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 7099 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 7100 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
152971ee
AD
7101 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
7102 NETIF_F_GSO_GRE_CSUM;
c0c050c5
MC
7103 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
7104 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
7105 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
7106 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
7107 dev->priv_flags |= IFF_UNICAST_FLT;
7108
e1c6dcca
JW
7109 /* MTU range: 60 - 9500 */
7110 dev->min_mtu = ETH_ZLEN;
7111 dev->max_mtu = 9500;
7112
7df4ae9f
MC
7113 bnxt_dcb_init(bp);
7114
c0c050c5
MC
7115#ifdef CONFIG_BNXT_SRIOV
7116 init_waitqueue_head(&bp->sriov_cfg_wait);
7117#endif
309369c9 7118 bp->gro_func = bnxt_gro_func_5730x;
94758f8d
MC
7119 if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
7120 bp->gro_func = bnxt_gro_func_5731x;
309369c9 7121
c0c050c5
MC
7122 rc = bnxt_hwrm_func_drv_rgtr(bp);
7123 if (rc)
7124 goto init_err;
7125
a1653b13
MC
7126 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
7127 if (rc)
7128 goto init_err;
7129
a588e458
MC
7130 bp->ulp_probe = bnxt_ulp_probe;
7131
c0c050c5
MC
7132 /* Get the MAX capabilities for this function */
7133 rc = bnxt_hwrm_func_qcaps(bp);
7134 if (rc) {
7135 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
7136 rc);
7137 rc = -1;
7138 goto init_err;
7139 }
7140
7141 rc = bnxt_hwrm_queue_qportcfg(bp);
7142 if (rc) {
7143 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
7144 rc);
7145 rc = -1;
7146 goto init_err;
7147 }
7148
567b2abe
SB
7149 bnxt_hwrm_func_qcfg(bp);
7150
c0c050c5
MC
7151 bnxt_set_tpa_flags(bp);
7152 bnxt_set_ring_params(bp);
33c2657e 7153 bnxt_set_max_func_irqs(bp, max_irqs);
6e6c5a57 7154 bnxt_set_dflt_rings(bp);
c0c050c5 7155
87da7f79
MC
7156 /* Default RSS hash cfg. */
7157 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
7158 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
7159 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
7160 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
7161 if (!BNXT_CHIP_NUM_57X0X(bp->chip_num) &&
7162 !BNXT_CHIP_TYPE_NITRO_A0(bp) &&
7163 bp->hwrm_spec_code >= 0x10501) {
7164 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
7165 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
7166 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
7167 }
7168
8fdefd63 7169 bnxt_hwrm_vnic_qcaps(bp);
8079e8f1 7170 if (bnxt_rfs_supported(bp)) {
2bcfa6f6
MC
7171 dev->hw_features |= NETIF_F_NTUPLE;
7172 if (bnxt_rfs_capable(bp)) {
7173 bp->flags |= BNXT_FLAG_RFS;
7174 dev->features |= NETIF_F_NTUPLE;
7175 }
7176 }
7177
c0c050c5
MC
7178 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
7179 bp->flags |= BNXT_FLAG_STRIP_VLAN;
7180
7181 rc = bnxt_probe_phy(bp);
7182 if (rc)
7183 goto init_err;
7184
aa8ed021
MC
7185 rc = bnxt_hwrm_func_reset(bp);
7186 if (rc)
7187 goto init_err;
7188
7809592d 7189 rc = bnxt_init_int_mode(bp);
c0c050c5
MC
7190 if (rc)
7191 goto init_err;
7192
7809592d
MC
7193 rc = register_netdev(dev);
7194 if (rc)
7195 goto init_err_clr_int;
7196
c0c050c5
MC
7197 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
7198 board_info[ent->driver_data].name,
7199 (long)pci_resource_start(pdev, 0), dev->dev_addr);
7200
90c4f788
AK
7201 bnxt_parse_log_pcie_link(bp);
7202
c0c050c5
MC
7203 return 0;
7204
7809592d
MC
7205init_err_clr_int:
7206 bnxt_clear_int_mode(bp);
7207
c0c050c5
MC
7208init_err:
7209 pci_iounmap(pdev, bp->bar0);
7210 pci_release_regions(pdev);
7211 pci_disable_device(pdev);
7212
7213init_err_free:
7214 free_netdev(dev);
7215 return rc;
7216}
7217
6316ea6d
SB
7218/**
7219 * bnxt_io_error_detected - called when PCI error is detected
7220 * @pdev: Pointer to PCI device
7221 * @state: The current pci connection state
7222 *
7223 * This function is called after a PCI bus error affecting
7224 * this device has been detected.
7225 */
7226static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
7227 pci_channel_state_t state)
7228{
7229 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 7230 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
7231
7232 netdev_info(netdev, "PCI I/O error detected\n");
7233
7234 rtnl_lock();
7235 netif_device_detach(netdev);
7236
a588e458
MC
7237 bnxt_ulp_stop(bp);
7238
6316ea6d
SB
7239 if (state == pci_channel_io_perm_failure) {
7240 rtnl_unlock();
7241 return PCI_ERS_RESULT_DISCONNECT;
7242 }
7243
7244 if (netif_running(netdev))
7245 bnxt_close(netdev);
7246
7247 pci_disable_device(pdev);
7248 rtnl_unlock();
7249
7250 /* Request a slot slot reset. */
7251 return PCI_ERS_RESULT_NEED_RESET;
7252}
7253
7254/**
7255 * bnxt_io_slot_reset - called after the pci bus has been reset.
7256 * @pdev: Pointer to PCI device
7257 *
7258 * Restart the card from scratch, as if from a cold-boot.
7259 * At this point, the card has exprienced a hard reset,
7260 * followed by fixups by BIOS, and has its config space
7261 * set up identically to what it was at cold boot.
7262 */
7263static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
7264{
7265 struct net_device *netdev = pci_get_drvdata(pdev);
7266 struct bnxt *bp = netdev_priv(netdev);
7267 int err = 0;
7268 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
7269
7270 netdev_info(bp->dev, "PCI Slot Reset\n");
7271
7272 rtnl_lock();
7273
7274 if (pci_enable_device(pdev)) {
7275 dev_err(&pdev->dev,
7276 "Cannot re-enable PCI device after reset.\n");
7277 } else {
7278 pci_set_master(pdev);
7279
aa8ed021
MC
7280 err = bnxt_hwrm_func_reset(bp);
7281 if (!err && netif_running(netdev))
6316ea6d
SB
7282 err = bnxt_open(netdev);
7283
a588e458 7284 if (!err) {
6316ea6d 7285 result = PCI_ERS_RESULT_RECOVERED;
a588e458
MC
7286 bnxt_ulp_start(bp);
7287 }
6316ea6d
SB
7288 }
7289
7290 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
7291 dev_close(netdev);
7292
7293 rtnl_unlock();
7294
7295 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7296 if (err) {
7297 dev_err(&pdev->dev,
7298 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7299 err); /* non-fatal, continue */
7300 }
7301
7302 return PCI_ERS_RESULT_RECOVERED;
7303}
7304
7305/**
7306 * bnxt_io_resume - called when traffic can start flowing again.
7307 * @pdev: Pointer to PCI device
7308 *
7309 * This callback is called when the error recovery driver tells
7310 * us that its OK to resume normal operation.
7311 */
7312static void bnxt_io_resume(struct pci_dev *pdev)
7313{
7314 struct net_device *netdev = pci_get_drvdata(pdev);
7315
7316 rtnl_lock();
7317
7318 netif_device_attach(netdev);
7319
7320 rtnl_unlock();
7321}
7322
7323static const struct pci_error_handlers bnxt_err_handler = {
7324 .error_detected = bnxt_io_error_detected,
7325 .slot_reset = bnxt_io_slot_reset,
7326 .resume = bnxt_io_resume
7327};
7328
c0c050c5
MC
7329static struct pci_driver bnxt_pci_driver = {
7330 .name = DRV_MODULE_NAME,
7331 .id_table = bnxt_pci_tbl,
7332 .probe = bnxt_init_one,
7333 .remove = bnxt_remove_one,
6316ea6d 7334 .err_handler = &bnxt_err_handler,
c0c050c5
MC
7335#if defined(CONFIG_BNXT_SRIOV)
7336 .sriov_configure = bnxt_sriov_configure,
7337#endif
7338};
7339
7340module_pci_driver(bnxt_pci_driver);