ASoc: Another series to convert to struct
[linux-block.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool/helpers.h>
58 #include <linux/align.h>
59 #include <net/netdev_queues.h>
60
61 #include "bnxt_hsi.h"
62 #include "bnxt.h"
63 #include "bnxt_hwrm.h"
64 #include "bnxt_ulp.h"
65 #include "bnxt_sriov.h"
66 #include "bnxt_ethtool.h"
67 #include "bnxt_dcb.h"
68 #include "bnxt_xdp.h"
69 #include "bnxt_ptp.h"
70 #include "bnxt_vfr.h"
71 #include "bnxt_tc.h"
72 #include "bnxt_devlink.h"
73 #include "bnxt_debugfs.h"
74
75 #define BNXT_TX_TIMEOUT         (5 * HZ)
76 #define BNXT_DEF_MSG_ENABLE     (NETIF_MSG_DRV | NETIF_MSG_HW | \
77                                  NETIF_MSG_TX_ERR)
78
79 MODULE_LICENSE("GPL");
80 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
81
82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84 #define BNXT_RX_COPY_THRESH 256
85
86 #define BNXT_TX_PUSH_THRESH 164
87
88 /* indexed by enum board_idx */
89 static const struct {
90         char *name;
91 } board_info[] = {
92         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
93         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
94         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
95         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
96         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
97         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
98         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
99         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
100         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
101         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
102         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
103         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
104         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
105         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
106         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
107         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
108         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
109         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
110         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
111         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
112         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
113         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
114         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
115         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
116         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
117         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
118         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
119         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
120         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
121         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
123         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
124         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
125         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
126         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
127         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
128         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
129         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
130         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
131         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
132         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
133         [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
134         [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
135         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
136         [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
137 };
138
139 static const struct pci_device_id bnxt_pci_tbl[] = {
140         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
141         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
142         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
143         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
144         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
145         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
146         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
147         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
148         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
149         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
150         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
151         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
152         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
153         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
154         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
155         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
156         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
157         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
158         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
159         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
160         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
161         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
162         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
163         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
164         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
165         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
166         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
167         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
168         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
169         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
170         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
171         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
172         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
173         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
174         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
175         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
176         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
177         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
178         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
179         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
180         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
181         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
182         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
183         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
184         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
185         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
186 #ifdef CONFIG_BNXT_SRIOV
187         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
188         { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
189         { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
190         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
191         { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
192         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
193         { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
194         { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
195         { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
196         { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
197         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
198         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
199         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
200         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
201         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
202         { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
203         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
204         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
205         { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
206         { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
207         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
208 #endif
209         { 0 }
210 };
211
212 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
213
214 static const u16 bnxt_vf_req_snif[] = {
215         HWRM_FUNC_CFG,
216         HWRM_FUNC_VF_CFG,
217         HWRM_PORT_PHY_QCFG,
218         HWRM_CFA_L2_FILTER_ALLOC,
219 };
220
221 static const u16 bnxt_async_events_arr[] = {
222         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
223         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
224         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
225         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
226         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
227         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
228         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
229         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
230         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
231         ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
232         ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
233         ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
234         ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
235         ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
236         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
237         ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
238 };
239
240 static struct workqueue_struct *bnxt_pf_wq;
241
242 static bool bnxt_vf_pciid(enum board_idx idx)
243 {
244         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
245                 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
246                 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
247                 idx == NETXTREME_E_P5_VF_HV);
248 }
249
250 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
251 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
252 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
253
254 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
255                 writel(DB_CP_IRQ_DIS_FLAGS, db)
256
257 #define BNXT_DB_CQ(db, idx)                                             \
258         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
259
260 #define BNXT_DB_NQ_P5(db, idx)                                          \
261         bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx),   \
262                     (db)->doorbell)
263
264 #define BNXT_DB_CQ_ARM(db, idx)                                         \
265         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
266
267 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
268         bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
269                     (db)->doorbell)
270
271 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
272 {
273         if (bp->flags & BNXT_FLAG_CHIP_P5)
274                 BNXT_DB_NQ_P5(db, idx);
275         else
276                 BNXT_DB_CQ(db, idx);
277 }
278
279 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
280 {
281         if (bp->flags & BNXT_FLAG_CHIP_P5)
282                 BNXT_DB_NQ_ARM_P5(db, idx);
283         else
284                 BNXT_DB_CQ_ARM(db, idx);
285 }
286
287 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
288 {
289         if (bp->flags & BNXT_FLAG_CHIP_P5)
290                 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
291                             RING_CMP(idx), db->doorbell);
292         else
293                 BNXT_DB_CQ(db, idx);
294 }
295
296 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
297 {
298         if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
299                 return;
300
301         if (BNXT_PF(bp))
302                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
303         else
304                 schedule_delayed_work(&bp->fw_reset_task, delay);
305 }
306
307 static void __bnxt_queue_sp_work(struct bnxt *bp)
308 {
309         if (BNXT_PF(bp))
310                 queue_work(bnxt_pf_wq, &bp->sp_task);
311         else
312                 schedule_work(&bp->sp_task);
313 }
314
315 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
316 {
317         set_bit(event, &bp->sp_event);
318         __bnxt_queue_sp_work(bp);
319 }
320
321 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
322 {
323         if (!rxr->bnapi->in_reset) {
324                 rxr->bnapi->in_reset = true;
325                 if (bp->flags & BNXT_FLAG_CHIP_P5)
326                         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
327                 else
328                         set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
329                 __bnxt_queue_sp_work(bp);
330         }
331         rxr->rx_next_cons = 0xffff;
332 }
333
334 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
335                           int idx)
336 {
337         struct bnxt_napi *bnapi = txr->bnapi;
338
339         if (bnapi->tx_fault)
340                 return;
341
342         netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)",
343                    txr->txq_index, bnapi->tx_pkts,
344                    txr->tx_cons, txr->tx_prod, idx);
345         WARN_ON_ONCE(1);
346         bnapi->tx_fault = 1;
347         bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
348 }
349
350 const u16 bnxt_lhint_arr[] = {
351         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
352         TX_BD_FLAGS_LHINT_512_TO_1023,
353         TX_BD_FLAGS_LHINT_1024_TO_2047,
354         TX_BD_FLAGS_LHINT_1024_TO_2047,
355         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
358         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
359         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
360         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
361         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
362         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
363         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
364         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
365         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
366         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
367         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
368         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
369         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
370 };
371
372 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
373 {
374         struct metadata_dst *md_dst = skb_metadata_dst(skb);
375
376         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
377                 return 0;
378
379         return md_dst->u.port_info.port_id;
380 }
381
382 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
383                              u16 prod)
384 {
385         bnxt_db_write(bp, &txr->tx_db, prod);
386         txr->kick_pending = 0;
387 }
388
389 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
390 {
391         struct bnxt *bp = netdev_priv(dev);
392         struct tx_bd *txbd;
393         struct tx_bd_ext *txbd1;
394         struct netdev_queue *txq;
395         int i;
396         dma_addr_t mapping;
397         unsigned int length, pad = 0;
398         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
399         u16 prod, last_frag;
400         struct pci_dev *pdev = bp->pdev;
401         struct bnxt_tx_ring_info *txr;
402         struct bnxt_sw_tx_bd *tx_buf;
403         __le32 lflags = 0;
404
405         i = skb_get_queue_mapping(skb);
406         if (unlikely(i >= bp->tx_nr_rings)) {
407                 dev_kfree_skb_any(skb);
408                 dev_core_stats_tx_dropped_inc(dev);
409                 return NETDEV_TX_OK;
410         }
411
412         txq = netdev_get_tx_queue(dev, i);
413         txr = &bp->tx_ring[bp->tx_ring_map[i]];
414         prod = txr->tx_prod;
415
416         free_size = bnxt_tx_avail(bp, txr);
417         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
418                 /* We must have raced with NAPI cleanup */
419                 if (net_ratelimit() && txr->kick_pending)
420                         netif_warn(bp, tx_err, dev,
421                                    "bnxt: ring busy w/ flush pending!\n");
422                 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
423                                         bp->tx_wake_thresh))
424                         return NETDEV_TX_BUSY;
425         }
426
427         if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
428                 goto tx_free;
429
430         length = skb->len;
431         len = skb_headlen(skb);
432         last_frag = skb_shinfo(skb)->nr_frags;
433
434         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
435
436         txbd->tx_bd_opaque = prod;
437
438         tx_buf = &txr->tx_buf_ring[prod];
439         tx_buf->skb = skb;
440         tx_buf->nr_frags = last_frag;
441
442         vlan_tag_flags = 0;
443         cfa_action = bnxt_xmit_get_cfa_action(skb);
444         if (skb_vlan_tag_present(skb)) {
445                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
446                                  skb_vlan_tag_get(skb);
447                 /* Currently supports 8021Q, 8021AD vlan offloads
448                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
449                  */
450                 if (skb->vlan_proto == htons(ETH_P_8021Q))
451                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
452         }
453
454         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
455                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
456
457                 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
458                     atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
459                         if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
460                                             &ptp->tx_hdr_off)) {
461                                 if (vlan_tag_flags)
462                                         ptp->tx_hdr_off += VLAN_HLEN;
463                                 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
464                                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
465                         } else {
466                                 atomic_inc(&bp->ptp_cfg->tx_avail);
467                         }
468                 }
469         }
470
471         if (unlikely(skb->no_fcs))
472                 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
473
474         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
475             !lflags) {
476                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
477                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
478                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
479                 void __iomem *db = txr->tx_db.doorbell;
480                 void *pdata = tx_push_buf->data;
481                 u64 *end;
482                 int j, push_len;
483
484                 /* Set COAL_NOW to be ready quickly for the next push */
485                 tx_push->tx_bd_len_flags_type =
486                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
487                                         TX_BD_TYPE_LONG_TX_BD |
488                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
489                                         TX_BD_FLAGS_COAL_NOW |
490                                         TX_BD_FLAGS_PACKET_END |
491                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
492
493                 if (skb->ip_summed == CHECKSUM_PARTIAL)
494                         tx_push1->tx_bd_hsize_lflags =
495                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
496                 else
497                         tx_push1->tx_bd_hsize_lflags = 0;
498
499                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
500                 tx_push1->tx_bd_cfa_action =
501                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
502
503                 end = pdata + length;
504                 end = PTR_ALIGN(end, 8) - 1;
505                 *end = 0;
506
507                 skb_copy_from_linear_data(skb, pdata, len);
508                 pdata += len;
509                 for (j = 0; j < last_frag; j++) {
510                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
511                         void *fptr;
512
513                         fptr = skb_frag_address_safe(frag);
514                         if (!fptr)
515                                 goto normal_tx;
516
517                         memcpy(pdata, fptr, skb_frag_size(frag));
518                         pdata += skb_frag_size(frag);
519                 }
520
521                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
522                 txbd->tx_bd_haddr = txr->data_mapping;
523                 prod = NEXT_TX(prod);
524                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
525                 memcpy(txbd, tx_push1, sizeof(*txbd));
526                 prod = NEXT_TX(prod);
527                 tx_push->doorbell =
528                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
529                 WRITE_ONCE(txr->tx_prod, prod);
530
531                 tx_buf->is_push = 1;
532                 netdev_tx_sent_queue(txq, skb->len);
533                 wmb();  /* Sync is_push and byte queue before pushing data */
534
535                 push_len = (length + sizeof(*tx_push) + 7) / 8;
536                 if (push_len > 16) {
537                         __iowrite64_copy(db, tx_push_buf, 16);
538                         __iowrite32_copy(db + 4, tx_push_buf + 1,
539                                          (push_len - 16) << 1);
540                 } else {
541                         __iowrite64_copy(db, tx_push_buf, push_len);
542                 }
543
544                 goto tx_done;
545         }
546
547 normal_tx:
548         if (length < BNXT_MIN_PKT_SIZE) {
549                 pad = BNXT_MIN_PKT_SIZE - length;
550                 if (skb_pad(skb, pad))
551                         /* SKB already freed. */
552                         goto tx_kick_pending;
553                 length = BNXT_MIN_PKT_SIZE;
554         }
555
556         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
557
558         if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
559                 goto tx_free;
560
561         dma_unmap_addr_set(tx_buf, mapping, mapping);
562         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
563                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
564
565         txbd->tx_bd_haddr = cpu_to_le64(mapping);
566
567         prod = NEXT_TX(prod);
568         txbd1 = (struct tx_bd_ext *)
569                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
570
571         txbd1->tx_bd_hsize_lflags = lflags;
572         if (skb_is_gso(skb)) {
573                 u32 hdr_len;
574
575                 if (skb->encapsulation)
576                         hdr_len = skb_inner_tcp_all_headers(skb);
577                 else
578                         hdr_len = skb_tcp_all_headers(skb);
579
580                 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
581                                         TX_BD_FLAGS_T_IPID |
582                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
583                 length = skb_shinfo(skb)->gso_size;
584                 txbd1->tx_bd_mss = cpu_to_le32(length);
585                 length += hdr_len;
586         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
587                 txbd1->tx_bd_hsize_lflags |=
588                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
589                 txbd1->tx_bd_mss = 0;
590         }
591
592         length >>= 9;
593         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
594                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
595                                      skb->len);
596                 i = 0;
597                 goto tx_dma_error;
598         }
599         flags |= bnxt_lhint_arr[length];
600         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
601
602         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
603         txbd1->tx_bd_cfa_action =
604                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
605         for (i = 0; i < last_frag; i++) {
606                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
607
608                 prod = NEXT_TX(prod);
609                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
610
611                 len = skb_frag_size(frag);
612                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
613                                            DMA_TO_DEVICE);
614
615                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
616                         goto tx_dma_error;
617
618                 tx_buf = &txr->tx_buf_ring[prod];
619                 dma_unmap_addr_set(tx_buf, mapping, mapping);
620
621                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
622
623                 flags = len << TX_BD_LEN_SHIFT;
624                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
625         }
626
627         flags &= ~TX_BD_LEN;
628         txbd->tx_bd_len_flags_type =
629                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
630                             TX_BD_FLAGS_PACKET_END);
631
632         netdev_tx_sent_queue(txq, skb->len);
633
634         skb_tx_timestamp(skb);
635
636         /* Sync BD data before updating doorbell */
637         wmb();
638
639         prod = NEXT_TX(prod);
640         WRITE_ONCE(txr->tx_prod, prod);
641
642         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
643                 bnxt_txr_db_kick(bp, txr, prod);
644         else
645                 txr->kick_pending = 1;
646
647 tx_done:
648
649         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
650                 if (netdev_xmit_more() && !tx_buf->is_push)
651                         bnxt_txr_db_kick(bp, txr, prod);
652
653                 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
654                                    bp->tx_wake_thresh);
655         }
656         return NETDEV_TX_OK;
657
658 tx_dma_error:
659         if (BNXT_TX_PTP_IS_SET(lflags))
660                 atomic_inc(&bp->ptp_cfg->tx_avail);
661
662         last_frag = i;
663
664         /* start back at beginning and unmap skb */
665         prod = txr->tx_prod;
666         tx_buf = &txr->tx_buf_ring[prod];
667         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
668                          skb_headlen(skb), DMA_TO_DEVICE);
669         prod = NEXT_TX(prod);
670
671         /* unmap remaining mapped pages */
672         for (i = 0; i < last_frag; i++) {
673                 prod = NEXT_TX(prod);
674                 tx_buf = &txr->tx_buf_ring[prod];
675                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
676                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
677                                DMA_TO_DEVICE);
678         }
679
680 tx_free:
681         dev_kfree_skb_any(skb);
682 tx_kick_pending:
683         if (txr->kick_pending)
684                 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
685         txr->tx_buf_ring[txr->tx_prod].skb = NULL;
686         dev_core_stats_tx_dropped_inc(dev);
687         return NETDEV_TX_OK;
688 }
689
690 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
691 {
692         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
693         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
694         u16 cons = txr->tx_cons;
695         struct pci_dev *pdev = bp->pdev;
696         int nr_pkts = bnapi->tx_pkts;
697         int i;
698         unsigned int tx_bytes = 0;
699
700         for (i = 0; i < nr_pkts; i++) {
701                 struct bnxt_sw_tx_bd *tx_buf;
702                 struct sk_buff *skb;
703                 int j, last;
704
705                 tx_buf = &txr->tx_buf_ring[cons];
706                 cons = NEXT_TX(cons);
707                 skb = tx_buf->skb;
708                 tx_buf->skb = NULL;
709
710                 if (unlikely(!skb)) {
711                         bnxt_sched_reset_txr(bp, txr, i);
712                         return;
713                 }
714
715                 tx_bytes += skb->len;
716
717                 if (tx_buf->is_push) {
718                         tx_buf->is_push = 0;
719                         goto next_tx_int;
720                 }
721
722                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
723                                  skb_headlen(skb), DMA_TO_DEVICE);
724                 last = tx_buf->nr_frags;
725
726                 for (j = 0; j < last; j++) {
727                         cons = NEXT_TX(cons);
728                         tx_buf = &txr->tx_buf_ring[cons];
729                         dma_unmap_page(
730                                 &pdev->dev,
731                                 dma_unmap_addr(tx_buf, mapping),
732                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
733                                 DMA_TO_DEVICE);
734                 }
735                 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
736                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
737                                 /* PTP worker takes ownership of the skb */
738                                 if (!bnxt_get_tx_ts_p5(bp, skb))
739                                         skb = NULL;
740                                 else
741                                         atomic_inc(&bp->ptp_cfg->tx_avail);
742                         }
743                 }
744
745 next_tx_int:
746                 cons = NEXT_TX(cons);
747
748                 dev_consume_skb_any(skb);
749         }
750
751         bnapi->tx_pkts = 0;
752         WRITE_ONCE(txr->tx_cons, cons);
753
754         __netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
755                                    bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
756                                    READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
757 }
758
759 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
760                                          struct bnxt_rx_ring_info *rxr,
761                                          unsigned int *offset,
762                                          gfp_t gfp)
763 {
764         struct page *page;
765
766         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
767                 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
768                                                 BNXT_RX_PAGE_SIZE);
769         } else {
770                 page = page_pool_dev_alloc_pages(rxr->page_pool);
771                 *offset = 0;
772         }
773         if (!page)
774                 return NULL;
775
776         *mapping = page_pool_get_dma_addr(page) + *offset;
777         return page;
778 }
779
780 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
781                                        gfp_t gfp)
782 {
783         u8 *data;
784         struct pci_dev *pdev = bp->pdev;
785
786         if (gfp == GFP_ATOMIC)
787                 data = napi_alloc_frag(bp->rx_buf_size);
788         else
789                 data = netdev_alloc_frag(bp->rx_buf_size);
790         if (!data)
791                 return NULL;
792
793         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
794                                         bp->rx_buf_use_size, bp->rx_dir,
795                                         DMA_ATTR_WEAK_ORDERING);
796
797         if (dma_mapping_error(&pdev->dev, *mapping)) {
798                 skb_free_frag(data);
799                 data = NULL;
800         }
801         return data;
802 }
803
804 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
805                        u16 prod, gfp_t gfp)
806 {
807         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
808         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
809         dma_addr_t mapping;
810
811         if (BNXT_RX_PAGE_MODE(bp)) {
812                 unsigned int offset;
813                 struct page *page =
814                         __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
815
816                 if (!page)
817                         return -ENOMEM;
818
819                 mapping += bp->rx_dma_offset;
820                 rx_buf->data = page;
821                 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
822         } else {
823                 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
824
825                 if (!data)
826                         return -ENOMEM;
827
828                 rx_buf->data = data;
829                 rx_buf->data_ptr = data + bp->rx_offset;
830         }
831         rx_buf->mapping = mapping;
832
833         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
834         return 0;
835 }
836
837 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
838 {
839         u16 prod = rxr->rx_prod;
840         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
841         struct rx_bd *cons_bd, *prod_bd;
842
843         prod_rx_buf = &rxr->rx_buf_ring[prod];
844         cons_rx_buf = &rxr->rx_buf_ring[cons];
845
846         prod_rx_buf->data = data;
847         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
848
849         prod_rx_buf->mapping = cons_rx_buf->mapping;
850
851         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
852         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
853
854         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
855 }
856
857 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
858 {
859         u16 next, max = rxr->rx_agg_bmap_size;
860
861         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
862         if (next >= max)
863                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
864         return next;
865 }
866
867 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
868                                      struct bnxt_rx_ring_info *rxr,
869                                      u16 prod, gfp_t gfp)
870 {
871         struct rx_bd *rxbd =
872                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
873         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
874         struct page *page;
875         dma_addr_t mapping;
876         u16 sw_prod = rxr->rx_sw_agg_prod;
877         unsigned int offset = 0;
878
879         page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
880
881         if (!page)
882                 return -ENOMEM;
883
884         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
885                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
886
887         __set_bit(sw_prod, rxr->rx_agg_bmap);
888         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
889         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
890
891         rx_agg_buf->page = page;
892         rx_agg_buf->offset = offset;
893         rx_agg_buf->mapping = mapping;
894         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
895         rxbd->rx_bd_opaque = sw_prod;
896         return 0;
897 }
898
899 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
900                                        struct bnxt_cp_ring_info *cpr,
901                                        u16 cp_cons, u16 curr)
902 {
903         struct rx_agg_cmp *agg;
904
905         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
906         agg = (struct rx_agg_cmp *)
907                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
908         return agg;
909 }
910
911 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
912                                               struct bnxt_rx_ring_info *rxr,
913                                               u16 agg_id, u16 curr)
914 {
915         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
916
917         return &tpa_info->agg_arr[curr];
918 }
919
920 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
921                                    u16 start, u32 agg_bufs, bool tpa)
922 {
923         struct bnxt_napi *bnapi = cpr->bnapi;
924         struct bnxt *bp = bnapi->bp;
925         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
926         u16 prod = rxr->rx_agg_prod;
927         u16 sw_prod = rxr->rx_sw_agg_prod;
928         bool p5_tpa = false;
929         u32 i;
930
931         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
932                 p5_tpa = true;
933
934         for (i = 0; i < agg_bufs; i++) {
935                 u16 cons;
936                 struct rx_agg_cmp *agg;
937                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
938                 struct rx_bd *prod_bd;
939                 struct page *page;
940
941                 if (p5_tpa)
942                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
943                 else
944                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
945                 cons = agg->rx_agg_cmp_opaque;
946                 __clear_bit(cons, rxr->rx_agg_bmap);
947
948                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
949                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
950
951                 __set_bit(sw_prod, rxr->rx_agg_bmap);
952                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
953                 cons_rx_buf = &rxr->rx_agg_ring[cons];
954
955                 /* It is possible for sw_prod to be equal to cons, so
956                  * set cons_rx_buf->page to NULL first.
957                  */
958                 page = cons_rx_buf->page;
959                 cons_rx_buf->page = NULL;
960                 prod_rx_buf->page = page;
961                 prod_rx_buf->offset = cons_rx_buf->offset;
962
963                 prod_rx_buf->mapping = cons_rx_buf->mapping;
964
965                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
966
967                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
968                 prod_bd->rx_bd_opaque = sw_prod;
969
970                 prod = NEXT_RX_AGG(prod);
971                 sw_prod = NEXT_RX_AGG(sw_prod);
972         }
973         rxr->rx_agg_prod = prod;
974         rxr->rx_sw_agg_prod = sw_prod;
975 }
976
977 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
978                                               struct bnxt_rx_ring_info *rxr,
979                                               u16 cons, void *data, u8 *data_ptr,
980                                               dma_addr_t dma_addr,
981                                               unsigned int offset_and_len)
982 {
983         unsigned int len = offset_and_len & 0xffff;
984         struct page *page = data;
985         u16 prod = rxr->rx_prod;
986         struct sk_buff *skb;
987         int err;
988
989         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
990         if (unlikely(err)) {
991                 bnxt_reuse_rx_data(rxr, cons, data);
992                 return NULL;
993         }
994         dma_addr -= bp->rx_dma_offset;
995         dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
996                                 bp->rx_dir);
997         skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
998         if (!skb) {
999                 page_pool_recycle_direct(rxr->page_pool, page);
1000                 return NULL;
1001         }
1002         skb_mark_for_recycle(skb);
1003         skb_reserve(skb, bp->rx_offset);
1004         __skb_put(skb, len);
1005
1006         return skb;
1007 }
1008
1009 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1010                                         struct bnxt_rx_ring_info *rxr,
1011                                         u16 cons, void *data, u8 *data_ptr,
1012                                         dma_addr_t dma_addr,
1013                                         unsigned int offset_and_len)
1014 {
1015         unsigned int payload = offset_and_len >> 16;
1016         unsigned int len = offset_and_len & 0xffff;
1017         skb_frag_t *frag;
1018         struct page *page = data;
1019         u16 prod = rxr->rx_prod;
1020         struct sk_buff *skb;
1021         int off, err;
1022
1023         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1024         if (unlikely(err)) {
1025                 bnxt_reuse_rx_data(rxr, cons, data);
1026                 return NULL;
1027         }
1028         dma_addr -= bp->rx_dma_offset;
1029         dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1030                                 bp->rx_dir);
1031
1032         if (unlikely(!payload))
1033                 payload = eth_get_headlen(bp->dev, data_ptr, len);
1034
1035         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1036         if (!skb) {
1037                 page_pool_recycle_direct(rxr->page_pool, page);
1038                 return NULL;
1039         }
1040
1041         skb_mark_for_recycle(skb);
1042         off = (void *)data_ptr - page_address(page);
1043         skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1044         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1045                payload + NET_IP_ALIGN);
1046
1047         frag = &skb_shinfo(skb)->frags[0];
1048         skb_frag_size_sub(frag, payload);
1049         skb_frag_off_add(frag, payload);
1050         skb->data_len -= payload;
1051         skb->tail += payload;
1052
1053         return skb;
1054 }
1055
1056 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1057                                    struct bnxt_rx_ring_info *rxr, u16 cons,
1058                                    void *data, u8 *data_ptr,
1059                                    dma_addr_t dma_addr,
1060                                    unsigned int offset_and_len)
1061 {
1062         u16 prod = rxr->rx_prod;
1063         struct sk_buff *skb;
1064         int err;
1065
1066         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1067         if (unlikely(err)) {
1068                 bnxt_reuse_rx_data(rxr, cons, data);
1069                 return NULL;
1070         }
1071
1072         skb = napi_build_skb(data, bp->rx_buf_size);
1073         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1074                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1075         if (!skb) {
1076                 skb_free_frag(data);
1077                 return NULL;
1078         }
1079
1080         skb_reserve(skb, bp->rx_offset);
1081         skb_put(skb, offset_and_len & 0xffff);
1082         return skb;
1083 }
1084
1085 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1086                                struct bnxt_cp_ring_info *cpr,
1087                                struct skb_shared_info *shinfo,
1088                                u16 idx, u32 agg_bufs, bool tpa,
1089                                struct xdp_buff *xdp)
1090 {
1091         struct bnxt_napi *bnapi = cpr->bnapi;
1092         struct pci_dev *pdev = bp->pdev;
1093         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1094         u16 prod = rxr->rx_agg_prod;
1095         u32 i, total_frag_len = 0;
1096         bool p5_tpa = false;
1097
1098         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1099                 p5_tpa = true;
1100
1101         for (i = 0; i < agg_bufs; i++) {
1102                 skb_frag_t *frag = &shinfo->frags[i];
1103                 u16 cons, frag_len;
1104                 struct rx_agg_cmp *agg;
1105                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1106                 struct page *page;
1107                 dma_addr_t mapping;
1108
1109                 if (p5_tpa)
1110                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1111                 else
1112                         agg = bnxt_get_agg(bp, cpr, idx, i);
1113                 cons = agg->rx_agg_cmp_opaque;
1114                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1115                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1116
1117                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1118                 skb_frag_fill_page_desc(frag, cons_rx_buf->page,
1119                                         cons_rx_buf->offset, frag_len);
1120                 shinfo->nr_frags = i + 1;
1121                 __clear_bit(cons, rxr->rx_agg_bmap);
1122
1123                 /* It is possible for bnxt_alloc_rx_page() to allocate
1124                  * a sw_prod index that equals the cons index, so we
1125                  * need to clear the cons entry now.
1126                  */
1127                 mapping = cons_rx_buf->mapping;
1128                 page = cons_rx_buf->page;
1129                 cons_rx_buf->page = NULL;
1130
1131                 if (xdp && page_is_pfmemalloc(page))
1132                         xdp_buff_set_frag_pfmemalloc(xdp);
1133
1134                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1135                         --shinfo->nr_frags;
1136                         cons_rx_buf->page = page;
1137
1138                         /* Update prod since possibly some pages have been
1139                          * allocated already.
1140                          */
1141                         rxr->rx_agg_prod = prod;
1142                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1143                         return 0;
1144                 }
1145
1146                 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1147                                         bp->rx_dir);
1148
1149                 total_frag_len += frag_len;
1150                 prod = NEXT_RX_AGG(prod);
1151         }
1152         rxr->rx_agg_prod = prod;
1153         return total_frag_len;
1154 }
1155
1156 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1157                                              struct bnxt_cp_ring_info *cpr,
1158                                              struct sk_buff *skb, u16 idx,
1159                                              u32 agg_bufs, bool tpa)
1160 {
1161         struct skb_shared_info *shinfo = skb_shinfo(skb);
1162         u32 total_frag_len = 0;
1163
1164         total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1165                                              agg_bufs, tpa, NULL);
1166         if (!total_frag_len) {
1167                 skb_mark_for_recycle(skb);
1168                 dev_kfree_skb(skb);
1169                 return NULL;
1170         }
1171
1172         skb->data_len += total_frag_len;
1173         skb->len += total_frag_len;
1174         skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
1175         return skb;
1176 }
1177
1178 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1179                                  struct bnxt_cp_ring_info *cpr,
1180                                  struct xdp_buff *xdp, u16 idx,
1181                                  u32 agg_bufs, bool tpa)
1182 {
1183         struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1184         u32 total_frag_len = 0;
1185
1186         if (!xdp_buff_has_frags(xdp))
1187                 shinfo->nr_frags = 0;
1188
1189         total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1190                                              idx, agg_bufs, tpa, xdp);
1191         if (total_frag_len) {
1192                 xdp_buff_set_frags_flag(xdp);
1193                 shinfo->nr_frags = agg_bufs;
1194                 shinfo->xdp_frags_size = total_frag_len;
1195         }
1196         return total_frag_len;
1197 }
1198
1199 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1200                                u8 agg_bufs, u32 *raw_cons)
1201 {
1202         u16 last;
1203         struct rx_agg_cmp *agg;
1204
1205         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1206         last = RING_CMP(*raw_cons);
1207         agg = (struct rx_agg_cmp *)
1208                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1209         return RX_AGG_CMP_VALID(agg, *raw_cons);
1210 }
1211
1212 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1213                                             unsigned int len,
1214                                             dma_addr_t mapping)
1215 {
1216         struct bnxt *bp = bnapi->bp;
1217         struct pci_dev *pdev = bp->pdev;
1218         struct sk_buff *skb;
1219
1220         skb = napi_alloc_skb(&bnapi->napi, len);
1221         if (!skb)
1222                 return NULL;
1223
1224         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1225                                 bp->rx_dir);
1226
1227         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1228                len + NET_IP_ALIGN);
1229
1230         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1231                                    bp->rx_dir);
1232
1233         skb_put(skb, len);
1234         return skb;
1235 }
1236
1237 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1238                            u32 *raw_cons, void *cmp)
1239 {
1240         struct rx_cmp *rxcmp = cmp;
1241         u32 tmp_raw_cons = *raw_cons;
1242         u8 cmp_type, agg_bufs = 0;
1243
1244         cmp_type = RX_CMP_TYPE(rxcmp);
1245
1246         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1247                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1248                             RX_CMP_AGG_BUFS) >>
1249                            RX_CMP_AGG_BUFS_SHIFT;
1250         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1251                 struct rx_tpa_end_cmp *tpa_end = cmp;
1252
1253                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1254                         return 0;
1255
1256                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1257         }
1258
1259         if (agg_bufs) {
1260                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1261                         return -EBUSY;
1262         }
1263         *raw_cons = tmp_raw_cons;
1264         return 0;
1265 }
1266
1267 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1268 {
1269         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1270         u16 idx = agg_id & MAX_TPA_P5_MASK;
1271
1272         if (test_bit(idx, map->agg_idx_bmap))
1273                 idx = find_first_zero_bit(map->agg_idx_bmap,
1274                                           BNXT_AGG_IDX_BMAP_SIZE);
1275         __set_bit(idx, map->agg_idx_bmap);
1276         map->agg_id_tbl[agg_id] = idx;
1277         return idx;
1278 }
1279
1280 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1281 {
1282         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1283
1284         __clear_bit(idx, map->agg_idx_bmap);
1285 }
1286
1287 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1288 {
1289         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1290
1291         return map->agg_id_tbl[agg_id];
1292 }
1293
1294 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1295                            struct rx_tpa_start_cmp *tpa_start,
1296                            struct rx_tpa_start_cmp_ext *tpa_start1)
1297 {
1298         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1299         struct bnxt_tpa_info *tpa_info;
1300         u16 cons, prod, agg_id;
1301         struct rx_bd *prod_bd;
1302         dma_addr_t mapping;
1303
1304         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1305                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1306                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1307         } else {
1308                 agg_id = TPA_START_AGG_ID(tpa_start);
1309         }
1310         cons = tpa_start->rx_tpa_start_cmp_opaque;
1311         prod = rxr->rx_prod;
1312         cons_rx_buf = &rxr->rx_buf_ring[cons];
1313         prod_rx_buf = &rxr->rx_buf_ring[prod];
1314         tpa_info = &rxr->rx_tpa[agg_id];
1315
1316         if (unlikely(cons != rxr->rx_next_cons ||
1317                      TPA_START_ERROR(tpa_start))) {
1318                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1319                             cons, rxr->rx_next_cons,
1320                             TPA_START_ERROR_CODE(tpa_start1));
1321                 bnxt_sched_reset_rxr(bp, rxr);
1322                 return;
1323         }
1324         /* Store cfa_code in tpa_info to use in tpa_end
1325          * completion processing.
1326          */
1327         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1328         prod_rx_buf->data = tpa_info->data;
1329         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1330
1331         mapping = tpa_info->mapping;
1332         prod_rx_buf->mapping = mapping;
1333
1334         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1335
1336         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1337
1338         tpa_info->data = cons_rx_buf->data;
1339         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1340         cons_rx_buf->data = NULL;
1341         tpa_info->mapping = cons_rx_buf->mapping;
1342
1343         tpa_info->len =
1344                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1345                                 RX_TPA_START_CMP_LEN_SHIFT;
1346         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1347                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1348
1349                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1350                 tpa_info->gso_type = SKB_GSO_TCPV4;
1351                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1352                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1353                         tpa_info->gso_type = SKB_GSO_TCPV6;
1354                 tpa_info->rss_hash =
1355                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1356         } else {
1357                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1358                 tpa_info->gso_type = 0;
1359                 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1360         }
1361         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1362         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1363         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1364         tpa_info->agg_count = 0;
1365
1366         rxr->rx_prod = NEXT_RX(prod);
1367         cons = NEXT_RX(cons);
1368         rxr->rx_next_cons = NEXT_RX(cons);
1369         cons_rx_buf = &rxr->rx_buf_ring[cons];
1370
1371         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1372         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1373         cons_rx_buf->data = NULL;
1374 }
1375
1376 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1377 {
1378         if (agg_bufs)
1379                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1380 }
1381
1382 #ifdef CONFIG_INET
1383 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1384 {
1385         struct udphdr *uh = NULL;
1386
1387         if (ip_proto == htons(ETH_P_IP)) {
1388                 struct iphdr *iph = (struct iphdr *)skb->data;
1389
1390                 if (iph->protocol == IPPROTO_UDP)
1391                         uh = (struct udphdr *)(iph + 1);
1392         } else {
1393                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1394
1395                 if (iph->nexthdr == IPPROTO_UDP)
1396                         uh = (struct udphdr *)(iph + 1);
1397         }
1398         if (uh) {
1399                 if (uh->check)
1400                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1401                 else
1402                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1403         }
1404 }
1405 #endif
1406
1407 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1408                                            int payload_off, int tcp_ts,
1409                                            struct sk_buff *skb)
1410 {
1411 #ifdef CONFIG_INET
1412         struct tcphdr *th;
1413         int len, nw_off;
1414         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1415         u32 hdr_info = tpa_info->hdr_info;
1416         bool loopback = false;
1417
1418         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1419         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1420         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1421
1422         /* If the packet is an internal loopback packet, the offsets will
1423          * have an extra 4 bytes.
1424          */
1425         if (inner_mac_off == 4) {
1426                 loopback = true;
1427         } else if (inner_mac_off > 4) {
1428                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1429                                             ETH_HLEN - 2));
1430
1431                 /* We only support inner iPv4/ipv6.  If we don't see the
1432                  * correct protocol ID, it must be a loopback packet where
1433                  * the offsets are off by 4.
1434                  */
1435                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1436                         loopback = true;
1437         }
1438         if (loopback) {
1439                 /* internal loopback packet, subtract all offsets by 4 */
1440                 inner_ip_off -= 4;
1441                 inner_mac_off -= 4;
1442                 outer_ip_off -= 4;
1443         }
1444
1445         nw_off = inner_ip_off - ETH_HLEN;
1446         skb_set_network_header(skb, nw_off);
1447         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1448                 struct ipv6hdr *iph = ipv6_hdr(skb);
1449
1450                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1451                 len = skb->len - skb_transport_offset(skb);
1452                 th = tcp_hdr(skb);
1453                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1454         } else {
1455                 struct iphdr *iph = ip_hdr(skb);
1456
1457                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1458                 len = skb->len - skb_transport_offset(skb);
1459                 th = tcp_hdr(skb);
1460                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1461         }
1462
1463         if (inner_mac_off) { /* tunnel */
1464                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1465                                             ETH_HLEN - 2));
1466
1467                 bnxt_gro_tunnel(skb, proto);
1468         }
1469 #endif
1470         return skb;
1471 }
1472
1473 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1474                                            int payload_off, int tcp_ts,
1475                                            struct sk_buff *skb)
1476 {
1477 #ifdef CONFIG_INET
1478         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1479         u32 hdr_info = tpa_info->hdr_info;
1480         int iphdr_len, nw_off;
1481
1482         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1483         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1484         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1485
1486         nw_off = inner_ip_off - ETH_HLEN;
1487         skb_set_network_header(skb, nw_off);
1488         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1489                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1490         skb_set_transport_header(skb, nw_off + iphdr_len);
1491
1492         if (inner_mac_off) { /* tunnel */
1493                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1494                                             ETH_HLEN - 2));
1495
1496                 bnxt_gro_tunnel(skb, proto);
1497         }
1498 #endif
1499         return skb;
1500 }
1501
1502 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1503 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1504
1505 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1506                                            int payload_off, int tcp_ts,
1507                                            struct sk_buff *skb)
1508 {
1509 #ifdef CONFIG_INET
1510         struct tcphdr *th;
1511         int len, nw_off, tcp_opt_len = 0;
1512
1513         if (tcp_ts)
1514                 tcp_opt_len = 12;
1515
1516         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1517                 struct iphdr *iph;
1518
1519                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1520                          ETH_HLEN;
1521                 skb_set_network_header(skb, nw_off);
1522                 iph = ip_hdr(skb);
1523                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1524                 len = skb->len - skb_transport_offset(skb);
1525                 th = tcp_hdr(skb);
1526                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1527         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1528                 struct ipv6hdr *iph;
1529
1530                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1531                          ETH_HLEN;
1532                 skb_set_network_header(skb, nw_off);
1533                 iph = ipv6_hdr(skb);
1534                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1535                 len = skb->len - skb_transport_offset(skb);
1536                 th = tcp_hdr(skb);
1537                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1538         } else {
1539                 dev_kfree_skb_any(skb);
1540                 return NULL;
1541         }
1542
1543         if (nw_off) /* tunnel */
1544                 bnxt_gro_tunnel(skb, skb->protocol);
1545 #endif
1546         return skb;
1547 }
1548
1549 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1550                                            struct bnxt_tpa_info *tpa_info,
1551                                            struct rx_tpa_end_cmp *tpa_end,
1552                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1553                                            struct sk_buff *skb)
1554 {
1555 #ifdef CONFIG_INET
1556         int payload_off;
1557         u16 segs;
1558
1559         segs = TPA_END_TPA_SEGS(tpa_end);
1560         if (segs == 1)
1561                 return skb;
1562
1563         NAPI_GRO_CB(skb)->count = segs;
1564         skb_shinfo(skb)->gso_size =
1565                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1566         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1567         if (bp->flags & BNXT_FLAG_CHIP_P5)
1568                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1569         else
1570                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1571         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1572         if (likely(skb))
1573                 tcp_gro_complete(skb);
1574 #endif
1575         return skb;
1576 }
1577
1578 /* Given the cfa_code of a received packet determine which
1579  * netdev (vf-rep or PF) the packet is destined to.
1580  */
1581 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1582 {
1583         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1584
1585         /* if vf-rep dev is NULL, the must belongs to the PF */
1586         return dev ? dev : bp->dev;
1587 }
1588
1589 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1590                                            struct bnxt_cp_ring_info *cpr,
1591                                            u32 *raw_cons,
1592                                            struct rx_tpa_end_cmp *tpa_end,
1593                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1594                                            u8 *event)
1595 {
1596         struct bnxt_napi *bnapi = cpr->bnapi;
1597         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1598         u8 *data_ptr, agg_bufs;
1599         unsigned int len;
1600         struct bnxt_tpa_info *tpa_info;
1601         dma_addr_t mapping;
1602         struct sk_buff *skb;
1603         u16 idx = 0, agg_id;
1604         void *data;
1605         bool gro;
1606
1607         if (unlikely(bnapi->in_reset)) {
1608                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1609
1610                 if (rc < 0)
1611                         return ERR_PTR(-EBUSY);
1612                 return NULL;
1613         }
1614
1615         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1616                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1617                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1618                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1619                 tpa_info = &rxr->rx_tpa[agg_id];
1620                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1621                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1622                                     agg_bufs, tpa_info->agg_count);
1623                         agg_bufs = tpa_info->agg_count;
1624                 }
1625                 tpa_info->agg_count = 0;
1626                 *event |= BNXT_AGG_EVENT;
1627                 bnxt_free_agg_idx(rxr, agg_id);
1628                 idx = agg_id;
1629                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1630         } else {
1631                 agg_id = TPA_END_AGG_ID(tpa_end);
1632                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1633                 tpa_info = &rxr->rx_tpa[agg_id];
1634                 idx = RING_CMP(*raw_cons);
1635                 if (agg_bufs) {
1636                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1637                                 return ERR_PTR(-EBUSY);
1638
1639                         *event |= BNXT_AGG_EVENT;
1640                         idx = NEXT_CMP(idx);
1641                 }
1642                 gro = !!TPA_END_GRO(tpa_end);
1643         }
1644         data = tpa_info->data;
1645         data_ptr = tpa_info->data_ptr;
1646         prefetch(data_ptr);
1647         len = tpa_info->len;
1648         mapping = tpa_info->mapping;
1649
1650         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1651                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1652                 if (agg_bufs > MAX_SKB_FRAGS)
1653                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1654                                     agg_bufs, (int)MAX_SKB_FRAGS);
1655                 return NULL;
1656         }
1657
1658         if (len <= bp->rx_copy_thresh) {
1659                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1660                 if (!skb) {
1661                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1662                         cpr->sw_stats.rx.rx_oom_discards += 1;
1663                         return NULL;
1664                 }
1665         } else {
1666                 u8 *new_data;
1667                 dma_addr_t new_mapping;
1668
1669                 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1670                 if (!new_data) {
1671                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1672                         cpr->sw_stats.rx.rx_oom_discards += 1;
1673                         return NULL;
1674                 }
1675
1676                 tpa_info->data = new_data;
1677                 tpa_info->data_ptr = new_data + bp->rx_offset;
1678                 tpa_info->mapping = new_mapping;
1679
1680                 skb = napi_build_skb(data, bp->rx_buf_size);
1681                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1682                                        bp->rx_buf_use_size, bp->rx_dir,
1683                                        DMA_ATTR_WEAK_ORDERING);
1684
1685                 if (!skb) {
1686                         skb_free_frag(data);
1687                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1688                         cpr->sw_stats.rx.rx_oom_discards += 1;
1689                         return NULL;
1690                 }
1691                 skb_reserve(skb, bp->rx_offset);
1692                 skb_put(skb, len);
1693         }
1694
1695         if (agg_bufs) {
1696                 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1697                 if (!skb) {
1698                         /* Page reuse already handled by bnxt_rx_pages(). */
1699                         cpr->sw_stats.rx.rx_oom_discards += 1;
1700                         return NULL;
1701                 }
1702         }
1703
1704         skb->protocol =
1705                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1706
1707         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1708                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1709
1710         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1711             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1712                 __be16 vlan_proto = htons(tpa_info->metadata >>
1713                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1714                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1715
1716                 if (eth_type_vlan(vlan_proto)) {
1717                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1718                 } else {
1719                         dev_kfree_skb(skb);
1720                         return NULL;
1721                 }
1722         }
1723
1724         skb_checksum_none_assert(skb);
1725         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1726                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1727                 skb->csum_level =
1728                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1729         }
1730
1731         if (gro)
1732                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1733
1734         return skb;
1735 }
1736
1737 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1738                          struct rx_agg_cmp *rx_agg)
1739 {
1740         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1741         struct bnxt_tpa_info *tpa_info;
1742
1743         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1744         tpa_info = &rxr->rx_tpa[agg_id];
1745         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1746         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1747 }
1748
1749 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1750                              struct sk_buff *skb)
1751 {
1752         if (skb->dev != bp->dev) {
1753                 /* this packet belongs to a vf-rep */
1754                 bnxt_vf_rep_rx(bp, skb);
1755                 return;
1756         }
1757         skb_record_rx_queue(skb, bnapi->index);
1758         skb_mark_for_recycle(skb);
1759         napi_gro_receive(&bnapi->napi, skb);
1760 }
1761
1762 /* returns the following:
1763  * 1       - 1 packet successfully received
1764  * 0       - successful TPA_START, packet not completed yet
1765  * -EBUSY  - completion ring does not have all the agg buffers yet
1766  * -ENOMEM - packet aborted due to out of memory
1767  * -EIO    - packet aborted due to hw error indicated in BD
1768  */
1769 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1770                        u32 *raw_cons, u8 *event)
1771 {
1772         struct bnxt_napi *bnapi = cpr->bnapi;
1773         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1774         struct net_device *dev = bp->dev;
1775         struct rx_cmp *rxcmp;
1776         struct rx_cmp_ext *rxcmp1;
1777         u32 tmp_raw_cons = *raw_cons;
1778         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1779         struct bnxt_sw_rx_bd *rx_buf;
1780         unsigned int len;
1781         u8 *data_ptr, agg_bufs, cmp_type;
1782         bool xdp_active = false;
1783         dma_addr_t dma_addr;
1784         struct sk_buff *skb;
1785         struct xdp_buff xdp;
1786         u32 flags, misc;
1787         void *data;
1788         int rc = 0;
1789
1790         rxcmp = (struct rx_cmp *)
1791                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1792
1793         cmp_type = RX_CMP_TYPE(rxcmp);
1794
1795         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1796                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1797                 goto next_rx_no_prod_no_len;
1798         }
1799
1800         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1801         cp_cons = RING_CMP(tmp_raw_cons);
1802         rxcmp1 = (struct rx_cmp_ext *)
1803                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1804
1805         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1806                 return -EBUSY;
1807
1808         /* The valid test of the entry must be done first before
1809          * reading any further.
1810          */
1811         dma_rmb();
1812         prod = rxr->rx_prod;
1813
1814         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1815                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1816                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1817
1818                 *event |= BNXT_RX_EVENT;
1819                 goto next_rx_no_prod_no_len;
1820
1821         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1822                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1823                                    (struct rx_tpa_end_cmp *)rxcmp,
1824                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1825
1826                 if (IS_ERR(skb))
1827                         return -EBUSY;
1828
1829                 rc = -ENOMEM;
1830                 if (likely(skb)) {
1831                         bnxt_deliver_skb(bp, bnapi, skb);
1832                         rc = 1;
1833                 }
1834                 *event |= BNXT_RX_EVENT;
1835                 goto next_rx_no_prod_no_len;
1836         }
1837
1838         cons = rxcmp->rx_cmp_opaque;
1839         if (unlikely(cons != rxr->rx_next_cons)) {
1840                 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1841
1842                 /* 0xffff is forced error, don't print it */
1843                 if (rxr->rx_next_cons != 0xffff)
1844                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1845                                     cons, rxr->rx_next_cons);
1846                 bnxt_sched_reset_rxr(bp, rxr);
1847                 if (rc1)
1848                         return rc1;
1849                 goto next_rx_no_prod_no_len;
1850         }
1851         rx_buf = &rxr->rx_buf_ring[cons];
1852         data = rx_buf->data;
1853         data_ptr = rx_buf->data_ptr;
1854         prefetch(data_ptr);
1855
1856         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1857         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1858
1859         if (agg_bufs) {
1860                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1861                         return -EBUSY;
1862
1863                 cp_cons = NEXT_CMP(cp_cons);
1864                 *event |= BNXT_AGG_EVENT;
1865         }
1866         *event |= BNXT_RX_EVENT;
1867
1868         rx_buf->data = NULL;
1869         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1870                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1871
1872                 bnxt_reuse_rx_data(rxr, cons, data);
1873                 if (agg_bufs)
1874                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1875                                                false);
1876
1877                 rc = -EIO;
1878                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1879                         bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1880                         if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1881                             !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1882                                 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1883                                                  rx_err);
1884                                 bnxt_sched_reset_rxr(bp, rxr);
1885                         }
1886                 }
1887                 goto next_rx_no_len;
1888         }
1889
1890         flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1891         len = flags >> RX_CMP_LEN_SHIFT;
1892         dma_addr = rx_buf->mapping;
1893
1894         if (bnxt_xdp_attached(bp, rxr)) {
1895                 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
1896                 if (agg_bufs) {
1897                         u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
1898                                                              cp_cons, agg_bufs,
1899                                                              false);
1900                         if (!frag_len) {
1901                                 cpr->sw_stats.rx.rx_oom_discards += 1;
1902                                 rc = -ENOMEM;
1903                                 goto next_rx;
1904                         }
1905                 }
1906                 xdp_active = true;
1907         }
1908
1909         if (xdp_active) {
1910                 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
1911                         rc = 1;
1912                         goto next_rx;
1913                 }
1914         }
1915
1916         if (len <= bp->rx_copy_thresh) {
1917                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1918                 bnxt_reuse_rx_data(rxr, cons, data);
1919                 if (!skb) {
1920                         if (agg_bufs) {
1921                                 if (!xdp_active)
1922                                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1923                                                                agg_bufs, false);
1924                                 else
1925                                         bnxt_xdp_buff_frags_free(rxr, &xdp);
1926                         }
1927                         cpr->sw_stats.rx.rx_oom_discards += 1;
1928                         rc = -ENOMEM;
1929                         goto next_rx;
1930                 }
1931         } else {
1932                 u32 payload;
1933
1934                 if (rx_buf->data_ptr == data_ptr)
1935                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1936                 else
1937                         payload = 0;
1938                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1939                                       payload | len);
1940                 if (!skb) {
1941                         cpr->sw_stats.rx.rx_oom_discards += 1;
1942                         rc = -ENOMEM;
1943                         goto next_rx;
1944                 }
1945         }
1946
1947         if (agg_bufs) {
1948                 if (!xdp_active) {
1949                         skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
1950                         if (!skb) {
1951                                 cpr->sw_stats.rx.rx_oom_discards += 1;
1952                                 rc = -ENOMEM;
1953                                 goto next_rx;
1954                         }
1955                 } else {
1956                         skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
1957                         if (!skb) {
1958                                 /* we should be able to free the old skb here */
1959                                 bnxt_xdp_buff_frags_free(rxr, &xdp);
1960                                 cpr->sw_stats.rx.rx_oom_discards += 1;
1961                                 rc = -ENOMEM;
1962                                 goto next_rx;
1963                         }
1964                 }
1965         }
1966
1967         if (RX_CMP_HASH_VALID(rxcmp)) {
1968                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1969                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1970
1971                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1972                 if (hash_type != 1 && hash_type != 3)
1973                         type = PKT_HASH_TYPE_L3;
1974                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1975         }
1976
1977         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1978         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1979
1980         if ((rxcmp1->rx_cmp_flags2 &
1981              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1982             (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1983                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1984                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1985                 __be16 vlan_proto = htons(meta_data >>
1986                                           RX_CMP_FLAGS2_METADATA_TPID_SFT);
1987
1988                 if (eth_type_vlan(vlan_proto)) {
1989                         __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1990                 } else {
1991                         dev_kfree_skb(skb);
1992                         goto next_rx;
1993                 }
1994         }
1995
1996         skb_checksum_none_assert(skb);
1997         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1998                 if (dev->features & NETIF_F_RXCSUM) {
1999                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2000                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2001                 }
2002         } else {
2003                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2004                         if (dev->features & NETIF_F_RXCSUM)
2005                                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
2006                 }
2007         }
2008
2009         if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
2010                      RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
2011                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2012                         u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2013                         u64 ns, ts;
2014
2015                         if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2016                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2017
2018                                 spin_lock_bh(&ptp->ptp_lock);
2019                                 ns = timecounter_cyc2time(&ptp->tc, ts);
2020                                 spin_unlock_bh(&ptp->ptp_lock);
2021                                 memset(skb_hwtstamps(skb), 0,
2022                                        sizeof(*skb_hwtstamps(skb)));
2023                                 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2024                         }
2025                 }
2026         }
2027         bnxt_deliver_skb(bp, bnapi, skb);
2028         rc = 1;
2029
2030 next_rx:
2031         cpr->rx_packets += 1;
2032         cpr->rx_bytes += len;
2033
2034 next_rx_no_len:
2035         rxr->rx_prod = NEXT_RX(prod);
2036         rxr->rx_next_cons = NEXT_RX(cons);
2037
2038 next_rx_no_prod_no_len:
2039         *raw_cons = tmp_raw_cons;
2040
2041         return rc;
2042 }
2043
2044 /* In netpoll mode, if we are using a combined completion ring, we need to
2045  * discard the rx packets and recycle the buffers.
2046  */
2047 static int bnxt_force_rx_discard(struct bnxt *bp,
2048                                  struct bnxt_cp_ring_info *cpr,
2049                                  u32 *raw_cons, u8 *event)
2050 {
2051         u32 tmp_raw_cons = *raw_cons;
2052         struct rx_cmp_ext *rxcmp1;
2053         struct rx_cmp *rxcmp;
2054         u16 cp_cons;
2055         u8 cmp_type;
2056         int rc;
2057
2058         cp_cons = RING_CMP(tmp_raw_cons);
2059         rxcmp = (struct rx_cmp *)
2060                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2061
2062         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2063         cp_cons = RING_CMP(tmp_raw_cons);
2064         rxcmp1 = (struct rx_cmp_ext *)
2065                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2066
2067         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2068                 return -EBUSY;
2069
2070         /* The valid test of the entry must be done first before
2071          * reading any further.
2072          */
2073         dma_rmb();
2074         cmp_type = RX_CMP_TYPE(rxcmp);
2075         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2076                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2077                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2078         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2079                 struct rx_tpa_end_cmp_ext *tpa_end1;
2080
2081                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2082                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2083                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2084         }
2085         rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2086         if (rc && rc != -EBUSY)
2087                 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2088         return rc;
2089 }
2090
2091 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2092 {
2093         struct bnxt_fw_health *fw_health = bp->fw_health;
2094         u32 reg = fw_health->regs[reg_idx];
2095         u32 reg_type, reg_off, val = 0;
2096
2097         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2098         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2099         switch (reg_type) {
2100         case BNXT_FW_HEALTH_REG_TYPE_CFG:
2101                 pci_read_config_dword(bp->pdev, reg_off, &val);
2102                 break;
2103         case BNXT_FW_HEALTH_REG_TYPE_GRC:
2104                 reg_off = fw_health->mapped_regs[reg_idx];
2105                 fallthrough;
2106         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2107                 val = readl(bp->bar0 + reg_off);
2108                 break;
2109         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2110                 val = readl(bp->bar1 + reg_off);
2111                 break;
2112         }
2113         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2114                 val &= fw_health->fw_reset_inprog_reg_mask;
2115         return val;
2116 }
2117
2118 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2119 {
2120         int i;
2121
2122         for (i = 0; i < bp->rx_nr_rings; i++) {
2123                 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2124                 struct bnxt_ring_grp_info *grp_info;
2125
2126                 grp_info = &bp->grp_info[grp_idx];
2127                 if (grp_info->agg_fw_ring_id == ring_id)
2128                         return grp_idx;
2129         }
2130         return INVALID_HW_RING_ID;
2131 }
2132
2133 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2134 {
2135         u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2136
2137         switch (err_type) {
2138         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2139                 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2140                            BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2141                 break;
2142         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2143                 netdev_warn(bp->dev, "Pause Storm detected!\n");
2144                 break;
2145         case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2146                 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2147                 break;
2148         default:
2149                 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2150                            err_type);
2151                 break;
2152         }
2153 }
2154
2155 #define BNXT_GET_EVENT_PORT(data)       \
2156         ((data) &                       \
2157          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2158
2159 #define BNXT_EVENT_RING_TYPE(data2)     \
2160         ((data2) &                      \
2161          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2162
2163 #define BNXT_EVENT_RING_TYPE_RX(data2)  \
2164         (BNXT_EVENT_RING_TYPE(data2) == \
2165          ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2166
2167 #define BNXT_EVENT_PHC_EVENT_TYPE(data1)        \
2168         (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2169          ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2170
2171 #define BNXT_EVENT_PHC_RTC_UPDATE(data1)        \
2172         (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2173          ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2174
2175 #define BNXT_PHC_BITS   48
2176
2177 static int bnxt_async_event_process(struct bnxt *bp,
2178                                     struct hwrm_async_event_cmpl *cmpl)
2179 {
2180         u16 event_id = le16_to_cpu(cmpl->event_id);
2181         u32 data1 = le32_to_cpu(cmpl->event_data1);
2182         u32 data2 = le32_to_cpu(cmpl->event_data2);
2183
2184         netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2185                    event_id, data1, data2);
2186
2187         /* TODO CHIMP_FW: Define event id's for link change, error etc */
2188         switch (event_id) {
2189         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2190                 struct bnxt_link_info *link_info = &bp->link_info;
2191
2192                 if (BNXT_VF(bp))
2193                         goto async_event_process_exit;
2194
2195                 /* print unsupported speed warning in forced speed mode only */
2196                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2197                     (data1 & 0x20000)) {
2198                         u16 fw_speed = link_info->force_link_speed;
2199                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2200
2201                         if (speed != SPEED_UNKNOWN)
2202                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2203                                             speed);
2204                 }
2205                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2206         }
2207                 fallthrough;
2208         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2209         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2210                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2211                 fallthrough;
2212         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2213                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2214                 break;
2215         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2216                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2217                 break;
2218         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2219                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2220
2221                 if (BNXT_VF(bp))
2222                         break;
2223
2224                 if (bp->pf.port_id != port_id)
2225                         break;
2226
2227                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2228                 break;
2229         }
2230         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2231                 if (BNXT_PF(bp))
2232                         goto async_event_process_exit;
2233                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2234                 break;
2235         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2236                 char *type_str = "Solicited";
2237
2238                 if (!bp->fw_health)
2239                         goto async_event_process_exit;
2240
2241                 bp->fw_reset_timestamp = jiffies;
2242                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2243                 if (!bp->fw_reset_min_dsecs)
2244                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2245                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2246                 if (!bp->fw_reset_max_dsecs)
2247                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2248                 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2249                         set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2250                 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2251                         type_str = "Fatal";
2252                         bp->fw_health->fatalities++;
2253                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2254                 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2255                            EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2256                         type_str = "Non-fatal";
2257                         bp->fw_health->survivals++;
2258                         set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2259                 }
2260                 netif_warn(bp, hw, bp->dev,
2261                            "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2262                            type_str, data1, data2,
2263                            bp->fw_reset_min_dsecs * 100,
2264                            bp->fw_reset_max_dsecs * 100);
2265                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2266                 break;
2267         }
2268         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2269                 struct bnxt_fw_health *fw_health = bp->fw_health;
2270                 char *status_desc = "healthy";
2271                 u32 status;
2272
2273                 if (!fw_health)
2274                         goto async_event_process_exit;
2275
2276                 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2277                         fw_health->enabled = false;
2278                         netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2279                         break;
2280                 }
2281                 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2282                 fw_health->tmr_multiplier =
2283                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2284                                      bp->current_interval * 10);
2285                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2286                 if (!fw_health->enabled)
2287                         fw_health->last_fw_heartbeat =
2288                                 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2289                 fw_health->last_fw_reset_cnt =
2290                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2291                 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2292                 if (status != BNXT_FW_STATUS_HEALTHY)
2293                         status_desc = "unhealthy";
2294                 netif_info(bp, drv, bp->dev,
2295                            "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2296                            fw_health->primary ? "primary" : "backup", status,
2297                            status_desc, fw_health->last_fw_reset_cnt);
2298                 if (!fw_health->enabled) {
2299                         /* Make sure tmr_counter is set and visible to
2300                          * bnxt_health_check() before setting enabled to true.
2301                          */
2302                         smp_wmb();
2303                         fw_health->enabled = true;
2304                 }
2305                 goto async_event_process_exit;
2306         }
2307         case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2308                 netif_notice(bp, hw, bp->dev,
2309                              "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2310                              data1, data2);
2311                 goto async_event_process_exit;
2312         case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2313                 struct bnxt_rx_ring_info *rxr;
2314                 u16 grp_idx;
2315
2316                 if (bp->flags & BNXT_FLAG_CHIP_P5)
2317                         goto async_event_process_exit;
2318
2319                 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2320                             BNXT_EVENT_RING_TYPE(data2), data1);
2321                 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2322                         goto async_event_process_exit;
2323
2324                 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2325                 if (grp_idx == INVALID_HW_RING_ID) {
2326                         netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2327                                     data1);
2328                         goto async_event_process_exit;
2329                 }
2330                 rxr = bp->bnapi[grp_idx]->rx_ring;
2331                 bnxt_sched_reset_rxr(bp, rxr);
2332                 goto async_event_process_exit;
2333         }
2334         case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2335                 struct bnxt_fw_health *fw_health = bp->fw_health;
2336
2337                 netif_notice(bp, hw, bp->dev,
2338                              "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2339                              data1, data2);
2340                 if (fw_health) {
2341                         fw_health->echo_req_data1 = data1;
2342                         fw_health->echo_req_data2 = data2;
2343                         set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2344                         break;
2345                 }
2346                 goto async_event_process_exit;
2347         }
2348         case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2349                 bnxt_ptp_pps_event(bp, data1, data2);
2350                 goto async_event_process_exit;
2351         }
2352         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2353                 bnxt_event_error_report(bp, data1, data2);
2354                 goto async_event_process_exit;
2355         }
2356         case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2357                 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2358                 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2359                         if (BNXT_PTP_USE_RTC(bp)) {
2360                                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2361                                 u64 ns;
2362
2363                                 if (!ptp)
2364                                         goto async_event_process_exit;
2365
2366                                 spin_lock_bh(&ptp->ptp_lock);
2367                                 bnxt_ptp_update_current_time(bp);
2368                                 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2369                                        BNXT_PHC_BITS) | ptp->current_time);
2370                                 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2371                                 spin_unlock_bh(&ptp->ptp_lock);
2372                         }
2373                         break;
2374                 }
2375                 goto async_event_process_exit;
2376         }
2377         case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2378                 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2379
2380                 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2381                 goto async_event_process_exit;
2382         }
2383         default:
2384                 goto async_event_process_exit;
2385         }
2386         __bnxt_queue_sp_work(bp);
2387 async_event_process_exit:
2388         return 0;
2389 }
2390
2391 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2392 {
2393         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2394         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2395         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2396                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2397
2398         switch (cmpl_type) {
2399         case CMPL_BASE_TYPE_HWRM_DONE:
2400                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2401                 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2402                 break;
2403
2404         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2405                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2406
2407                 if ((vf_id < bp->pf.first_vf_id) ||
2408                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2409                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2410                                    vf_id);
2411                         return -EINVAL;
2412                 }
2413
2414                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2415                 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2416                 break;
2417
2418         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2419                 bnxt_async_event_process(bp,
2420                                          (struct hwrm_async_event_cmpl *)txcmp);
2421                 break;
2422
2423         default:
2424                 break;
2425         }
2426
2427         return 0;
2428 }
2429
2430 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2431 {
2432         struct bnxt_napi *bnapi = dev_instance;
2433         struct bnxt *bp = bnapi->bp;
2434         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2435         u32 cons = RING_CMP(cpr->cp_raw_cons);
2436
2437         cpr->event_ctr++;
2438         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2439         napi_schedule(&bnapi->napi);
2440         return IRQ_HANDLED;
2441 }
2442
2443 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2444 {
2445         u32 raw_cons = cpr->cp_raw_cons;
2446         u16 cons = RING_CMP(raw_cons);
2447         struct tx_cmp *txcmp;
2448
2449         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2450
2451         return TX_CMP_VALID(txcmp, raw_cons);
2452 }
2453
2454 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2455 {
2456         struct bnxt_napi *bnapi = dev_instance;
2457         struct bnxt *bp = bnapi->bp;
2458         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2459         u32 cons = RING_CMP(cpr->cp_raw_cons);
2460         u32 int_status;
2461
2462         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2463
2464         if (!bnxt_has_work(bp, cpr)) {
2465                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2466                 /* return if erroneous interrupt */
2467                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2468                         return IRQ_NONE;
2469         }
2470
2471         /* disable ring IRQ */
2472         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2473
2474         /* Return here if interrupt is shared and is disabled. */
2475         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2476                 return IRQ_HANDLED;
2477
2478         napi_schedule(&bnapi->napi);
2479         return IRQ_HANDLED;
2480 }
2481
2482 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2483                             int budget)
2484 {
2485         struct bnxt_napi *bnapi = cpr->bnapi;
2486         u32 raw_cons = cpr->cp_raw_cons;
2487         u32 cons;
2488         int tx_pkts = 0;
2489         int rx_pkts = 0;
2490         u8 event = 0;
2491         struct tx_cmp *txcmp;
2492
2493         cpr->has_more_work = 0;
2494         cpr->had_work_done = 1;
2495         while (1) {
2496                 int rc;
2497
2498                 cons = RING_CMP(raw_cons);
2499                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2500
2501                 if (!TX_CMP_VALID(txcmp, raw_cons))
2502                         break;
2503
2504                 /* The valid test of the entry must be done first before
2505                  * reading any further.
2506                  */
2507                 dma_rmb();
2508                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2509                         tx_pkts++;
2510                         /* return full budget so NAPI will complete. */
2511                         if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
2512                                 rx_pkts = budget;
2513                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2514                                 if (budget)
2515                                         cpr->has_more_work = 1;
2516                                 break;
2517                         }
2518                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2519                         if (likely(budget))
2520                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2521                         else
2522                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2523                                                            &event);
2524                         if (likely(rc >= 0))
2525                                 rx_pkts += rc;
2526                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2527                          * the NAPI budget.  Otherwise, we may potentially loop
2528                          * here forever if we consistently cannot allocate
2529                          * buffers.
2530                          */
2531                         else if (rc == -ENOMEM && budget)
2532                                 rx_pkts++;
2533                         else if (rc == -EBUSY)  /* partial completion */
2534                                 break;
2535                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2536                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2537                                     (TX_CMP_TYPE(txcmp) ==
2538                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2539                                     (TX_CMP_TYPE(txcmp) ==
2540                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2541                         bnxt_hwrm_handler(bp, txcmp);
2542                 }
2543                 raw_cons = NEXT_RAW_CMP(raw_cons);
2544
2545                 if (rx_pkts && rx_pkts == budget) {
2546                         cpr->has_more_work = 1;
2547                         break;
2548                 }
2549         }
2550
2551         if (event & BNXT_REDIRECT_EVENT)
2552                 xdp_do_flush();
2553
2554         if (event & BNXT_TX_EVENT) {
2555                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2556                 u16 prod = txr->tx_prod;
2557
2558                 /* Sync BD data before updating doorbell */
2559                 wmb();
2560
2561                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2562         }
2563
2564         cpr->cp_raw_cons = raw_cons;
2565         bnapi->tx_pkts += tx_pkts;
2566         bnapi->events |= event;
2567         return rx_pkts;
2568 }
2569
2570 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2571                                   int budget)
2572 {
2573         if (bnapi->tx_pkts && !bnapi->tx_fault)
2574                 bnapi->tx_int(bp, bnapi, budget);
2575
2576         if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2577                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2578
2579                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2580         }
2581         if (bnapi->events & BNXT_AGG_EVENT) {
2582                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2583
2584                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2585         }
2586         bnapi->events = 0;
2587 }
2588
2589 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2590                           int budget)
2591 {
2592         struct bnxt_napi *bnapi = cpr->bnapi;
2593         int rx_pkts;
2594
2595         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2596
2597         /* ACK completion ring before freeing tx ring and producing new
2598          * buffers in rx/agg rings to prevent overflowing the completion
2599          * ring.
2600          */
2601         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2602
2603         __bnxt_poll_work_done(bp, bnapi, budget);
2604         return rx_pkts;
2605 }
2606
2607 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2608 {
2609         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2610         struct bnxt *bp = bnapi->bp;
2611         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2612         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2613         struct tx_cmp *txcmp;
2614         struct rx_cmp_ext *rxcmp1;
2615         u32 cp_cons, tmp_raw_cons;
2616         u32 raw_cons = cpr->cp_raw_cons;
2617         u32 rx_pkts = 0;
2618         u8 event = 0;
2619
2620         while (1) {
2621                 int rc;
2622
2623                 cp_cons = RING_CMP(raw_cons);
2624                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2625
2626                 if (!TX_CMP_VALID(txcmp, raw_cons))
2627                         break;
2628
2629                 /* The valid test of the entry must be done first before
2630                  * reading any further.
2631                  */
2632                 dma_rmb();
2633                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2634                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2635                         cp_cons = RING_CMP(tmp_raw_cons);
2636                         rxcmp1 = (struct rx_cmp_ext *)
2637                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2638
2639                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2640                                 break;
2641
2642                         /* force an error to recycle the buffer */
2643                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2644                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2645
2646                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2647                         if (likely(rc == -EIO) && budget)
2648                                 rx_pkts++;
2649                         else if (rc == -EBUSY)  /* partial completion */
2650                                 break;
2651                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2652                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2653                         bnxt_hwrm_handler(bp, txcmp);
2654                 } else {
2655                         netdev_err(bp->dev,
2656                                    "Invalid completion received on special ring\n");
2657                 }
2658                 raw_cons = NEXT_RAW_CMP(raw_cons);
2659
2660                 if (rx_pkts == budget)
2661                         break;
2662         }
2663
2664         cpr->cp_raw_cons = raw_cons;
2665         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2666         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2667
2668         if (event & BNXT_AGG_EVENT)
2669                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2670
2671         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2672                 napi_complete_done(napi, rx_pkts);
2673                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2674         }
2675         return rx_pkts;
2676 }
2677
2678 static int bnxt_poll(struct napi_struct *napi, int budget)
2679 {
2680         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2681         struct bnxt *bp = bnapi->bp;
2682         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2683         int work_done = 0;
2684
2685         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2686                 napi_complete(napi);
2687                 return 0;
2688         }
2689         while (1) {
2690                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2691
2692                 if (work_done >= budget) {
2693                         if (!budget)
2694                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2695                         break;
2696                 }
2697
2698                 if (!bnxt_has_work(bp, cpr)) {
2699                         if (napi_complete_done(napi, work_done))
2700                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2701                         break;
2702                 }
2703         }
2704         if (bp->flags & BNXT_FLAG_DIM) {
2705                 struct dim_sample dim_sample = {};
2706
2707                 dim_update_sample(cpr->event_ctr,
2708                                   cpr->rx_packets,
2709                                   cpr->rx_bytes,
2710                                   &dim_sample);
2711                 net_dim(&cpr->dim, dim_sample);
2712         }
2713         return work_done;
2714 }
2715
2716 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2717 {
2718         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2719         int i, work_done = 0;
2720
2721         for (i = 0; i < 2; i++) {
2722                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2723
2724                 if (cpr2) {
2725                         work_done += __bnxt_poll_work(bp, cpr2,
2726                                                       budget - work_done);
2727                         cpr->has_more_work |= cpr2->has_more_work;
2728                 }
2729         }
2730         return work_done;
2731 }
2732
2733 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2734                                  u64 dbr_type, int budget)
2735 {
2736         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2737         int i;
2738
2739         for (i = 0; i < 2; i++) {
2740                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2741                 struct bnxt_db_info *db;
2742
2743                 if (cpr2 && cpr2->had_work_done) {
2744                         db = &cpr2->cp_db;
2745                         bnxt_writeq(bp, db->db_key64 | dbr_type |
2746                                     RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2747                         cpr2->had_work_done = 0;
2748                 }
2749         }
2750         __bnxt_poll_work_done(bp, bnapi, budget);
2751 }
2752
2753 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2754 {
2755         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2756         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2757         struct bnxt_cp_ring_info *cpr_rx;
2758         u32 raw_cons = cpr->cp_raw_cons;
2759         struct bnxt *bp = bnapi->bp;
2760         struct nqe_cn *nqcmp;
2761         int work_done = 0;
2762         u32 cons;
2763
2764         if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2765                 napi_complete(napi);
2766                 return 0;
2767         }
2768         if (cpr->has_more_work) {
2769                 cpr->has_more_work = 0;
2770                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2771         }
2772         while (1) {
2773                 cons = RING_CMP(raw_cons);
2774                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2775
2776                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2777                         if (cpr->has_more_work)
2778                                 break;
2779
2780                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2781                                              budget);
2782                         cpr->cp_raw_cons = raw_cons;
2783                         if (napi_complete_done(napi, work_done))
2784                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2785                                                   cpr->cp_raw_cons);
2786                         goto poll_done;
2787                 }
2788
2789                 /* The valid test of the entry must be done first before
2790                  * reading any further.
2791                  */
2792                 dma_rmb();
2793
2794                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2795                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2796                         struct bnxt_cp_ring_info *cpr2;
2797
2798                         /* No more budget for RX work */
2799                         if (budget && work_done >= budget && idx == BNXT_RX_HDL)
2800                                 break;
2801
2802                         cpr2 = cpr->cp_ring_arr[idx];
2803                         work_done += __bnxt_poll_work(bp, cpr2,
2804                                                       budget - work_done);
2805                         cpr->has_more_work |= cpr2->has_more_work;
2806                 } else {
2807                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2808                 }
2809                 raw_cons = NEXT_RAW_CMP(raw_cons);
2810         }
2811         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
2812         if (raw_cons != cpr->cp_raw_cons) {
2813                 cpr->cp_raw_cons = raw_cons;
2814                 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2815         }
2816 poll_done:
2817         cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
2818         if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
2819                 struct dim_sample dim_sample = {};
2820
2821                 dim_update_sample(cpr->event_ctr,
2822                                   cpr_rx->rx_packets,
2823                                   cpr_rx->rx_bytes,
2824                                   &dim_sample);
2825                 net_dim(&cpr->dim, dim_sample);
2826         }
2827         return work_done;
2828 }
2829
2830 static void bnxt_free_tx_skbs(struct bnxt *bp)
2831 {
2832         int i, max_idx;
2833         struct pci_dev *pdev = bp->pdev;
2834
2835         if (!bp->tx_ring)
2836                 return;
2837
2838         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2839         for (i = 0; i < bp->tx_nr_rings; i++) {
2840                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2841                 int j;
2842
2843                 if (!txr->tx_buf_ring)
2844                         continue;
2845
2846                 for (j = 0; j < max_idx;) {
2847                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2848                         struct sk_buff *skb;
2849                         int k, last;
2850
2851                         if (i < bp->tx_nr_rings_xdp &&
2852                             tx_buf->action == XDP_REDIRECT) {
2853                                 dma_unmap_single(&pdev->dev,
2854                                         dma_unmap_addr(tx_buf, mapping),
2855                                         dma_unmap_len(tx_buf, len),
2856                                         DMA_TO_DEVICE);
2857                                 xdp_return_frame(tx_buf->xdpf);
2858                                 tx_buf->action = 0;
2859                                 tx_buf->xdpf = NULL;
2860                                 j++;
2861                                 continue;
2862                         }
2863
2864                         skb = tx_buf->skb;
2865                         if (!skb) {
2866                                 j++;
2867                                 continue;
2868                         }
2869
2870                         tx_buf->skb = NULL;
2871
2872                         if (tx_buf->is_push) {
2873                                 dev_kfree_skb(skb);
2874                                 j += 2;
2875                                 continue;
2876                         }
2877
2878                         dma_unmap_single(&pdev->dev,
2879                                          dma_unmap_addr(tx_buf, mapping),
2880                                          skb_headlen(skb),
2881                                          DMA_TO_DEVICE);
2882
2883                         last = tx_buf->nr_frags;
2884                         j += 2;
2885                         for (k = 0; k < last; k++, j++) {
2886                                 int ring_idx = j & bp->tx_ring_mask;
2887                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2888
2889                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2890                                 dma_unmap_page(
2891                                         &pdev->dev,
2892                                         dma_unmap_addr(tx_buf, mapping),
2893                                         skb_frag_size(frag), DMA_TO_DEVICE);
2894                         }
2895                         dev_kfree_skb(skb);
2896                 }
2897                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2898         }
2899 }
2900
2901 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2902 {
2903         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2904         struct pci_dev *pdev = bp->pdev;
2905         struct bnxt_tpa_idx_map *map;
2906         int i, max_idx, max_agg_idx;
2907
2908         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2909         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2910         if (!rxr->rx_tpa)
2911                 goto skip_rx_tpa_free;
2912
2913         for (i = 0; i < bp->max_tpa; i++) {
2914                 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2915                 u8 *data = tpa_info->data;
2916
2917                 if (!data)
2918                         continue;
2919
2920                 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2921                                        bp->rx_buf_use_size, bp->rx_dir,
2922                                        DMA_ATTR_WEAK_ORDERING);
2923
2924                 tpa_info->data = NULL;
2925
2926                 skb_free_frag(data);
2927         }
2928
2929 skip_rx_tpa_free:
2930         if (!rxr->rx_buf_ring)
2931                 goto skip_rx_buf_free;
2932
2933         for (i = 0; i < max_idx; i++) {
2934                 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2935                 dma_addr_t mapping = rx_buf->mapping;
2936                 void *data = rx_buf->data;
2937
2938                 if (!data)
2939                         continue;
2940
2941                 rx_buf->data = NULL;
2942                 if (BNXT_RX_PAGE_MODE(bp)) {
2943                         page_pool_recycle_direct(rxr->page_pool, data);
2944                 } else {
2945                         dma_unmap_single_attrs(&pdev->dev, mapping,
2946                                                bp->rx_buf_use_size, bp->rx_dir,
2947                                                DMA_ATTR_WEAK_ORDERING);
2948                         skb_free_frag(data);
2949                 }
2950         }
2951
2952 skip_rx_buf_free:
2953         if (!rxr->rx_agg_ring)
2954                 goto skip_rx_agg_free;
2955
2956         for (i = 0; i < max_agg_idx; i++) {
2957                 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2958                 struct page *page = rx_agg_buf->page;
2959
2960                 if (!page)
2961                         continue;
2962
2963                 rx_agg_buf->page = NULL;
2964                 __clear_bit(i, rxr->rx_agg_bmap);
2965
2966                 page_pool_recycle_direct(rxr->page_pool, page);
2967         }
2968
2969 skip_rx_agg_free:
2970         map = rxr->rx_tpa_idx_map;
2971         if (map)
2972                 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2973 }
2974
2975 static void bnxt_free_rx_skbs(struct bnxt *bp)
2976 {
2977         int i;
2978
2979         if (!bp->rx_ring)
2980                 return;
2981
2982         for (i = 0; i < bp->rx_nr_rings; i++)
2983                 bnxt_free_one_rx_ring_skbs(bp, i);
2984 }
2985
2986 static void bnxt_free_skbs(struct bnxt *bp)
2987 {
2988         bnxt_free_tx_skbs(bp);
2989         bnxt_free_rx_skbs(bp);
2990 }
2991
2992 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2993 {
2994         u8 init_val = mem_init->init_val;
2995         u16 offset = mem_init->offset;
2996         u8 *p2 = p;
2997         int i;
2998
2999         if (!init_val)
3000                 return;
3001         if (offset == BNXT_MEM_INVALID_OFFSET) {
3002                 memset(p, init_val, len);
3003                 return;
3004         }
3005         for (i = 0; i < len; i += mem_init->size)
3006                 *(p2 + i + offset) = init_val;
3007 }
3008
3009 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3010 {
3011         struct pci_dev *pdev = bp->pdev;
3012         int i;
3013
3014         if (!rmem->pg_arr)
3015                 goto skip_pages;
3016
3017         for (i = 0; i < rmem->nr_pages; i++) {
3018                 if (!rmem->pg_arr[i])
3019                         continue;
3020
3021                 dma_free_coherent(&pdev->dev, rmem->page_size,
3022                                   rmem->pg_arr[i], rmem->dma_arr[i]);
3023
3024                 rmem->pg_arr[i] = NULL;
3025         }
3026 skip_pages:
3027         if (rmem->pg_tbl) {
3028                 size_t pg_tbl_size = rmem->nr_pages * 8;
3029
3030                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3031                         pg_tbl_size = rmem->page_size;
3032                 dma_free_coherent(&pdev->dev, pg_tbl_size,
3033                                   rmem->pg_tbl, rmem->pg_tbl_map);
3034                 rmem->pg_tbl = NULL;
3035         }
3036         if (rmem->vmem_size && *rmem->vmem) {
3037                 vfree(*rmem->vmem);
3038                 *rmem->vmem = NULL;
3039         }
3040 }
3041
3042 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3043 {
3044         struct pci_dev *pdev = bp->pdev;
3045         u64 valid_bit = 0;
3046         int i;
3047
3048         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3049                 valid_bit = PTU_PTE_VALID;
3050         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3051                 size_t pg_tbl_size = rmem->nr_pages * 8;
3052
3053                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3054                         pg_tbl_size = rmem->page_size;
3055                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3056                                                   &rmem->pg_tbl_map,
3057                                                   GFP_KERNEL);
3058                 if (!rmem->pg_tbl)
3059                         return -ENOMEM;
3060         }
3061
3062         for (i = 0; i < rmem->nr_pages; i++) {
3063                 u64 extra_bits = valid_bit;
3064
3065                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3066                                                      rmem->page_size,
3067                                                      &rmem->dma_arr[i],
3068                                                      GFP_KERNEL);
3069                 if (!rmem->pg_arr[i])
3070                         return -ENOMEM;
3071
3072                 if (rmem->mem_init)
3073                         bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
3074                                           rmem->page_size);
3075                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3076                         if (i == rmem->nr_pages - 2 &&
3077                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3078                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3079                         else if (i == rmem->nr_pages - 1 &&
3080                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3081                                 extra_bits |= PTU_PTE_LAST;
3082                         rmem->pg_tbl[i] =
3083                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3084                 }
3085         }
3086
3087         if (rmem->vmem_size) {
3088                 *rmem->vmem = vzalloc(rmem->vmem_size);
3089                 if (!(*rmem->vmem))
3090                         return -ENOMEM;
3091         }
3092         return 0;
3093 }
3094
3095 static void bnxt_free_tpa_info(struct bnxt *bp)
3096 {
3097         int i, j;
3098
3099         for (i = 0; i < bp->rx_nr_rings; i++) {
3100                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3101
3102                 kfree(rxr->rx_tpa_idx_map);
3103                 rxr->rx_tpa_idx_map = NULL;
3104                 if (rxr->rx_tpa) {
3105                         for (j = 0; j < bp->max_tpa; j++) {
3106                                 kfree(rxr->rx_tpa[j].agg_arr);
3107                                 rxr->rx_tpa[j].agg_arr = NULL;
3108                         }
3109                 }
3110                 kfree(rxr->rx_tpa);
3111                 rxr->rx_tpa = NULL;
3112         }
3113 }
3114
3115 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3116 {
3117         int i, j;
3118
3119         bp->max_tpa = MAX_TPA;
3120         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3121                 if (!bp->max_tpa_v2)
3122                         return 0;
3123                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3124         }
3125
3126         for (i = 0; i < bp->rx_nr_rings; i++) {
3127                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3128                 struct rx_agg_cmp *agg;
3129
3130                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3131                                       GFP_KERNEL);
3132                 if (!rxr->rx_tpa)
3133                         return -ENOMEM;
3134
3135                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3136                         continue;
3137                 for (j = 0; j < bp->max_tpa; j++) {
3138                         agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3139                         if (!agg)
3140                                 return -ENOMEM;
3141                         rxr->rx_tpa[j].agg_arr = agg;
3142                 }
3143                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3144                                               GFP_KERNEL);
3145                 if (!rxr->rx_tpa_idx_map)
3146                         return -ENOMEM;
3147         }
3148         return 0;
3149 }
3150
3151 static void bnxt_free_rx_rings(struct bnxt *bp)
3152 {
3153         int i;
3154
3155         if (!bp->rx_ring)
3156                 return;
3157
3158         bnxt_free_tpa_info(bp);
3159         for (i = 0; i < bp->rx_nr_rings; i++) {
3160                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3161                 struct bnxt_ring_struct *ring;
3162
3163                 if (rxr->xdp_prog)
3164                         bpf_prog_put(rxr->xdp_prog);
3165
3166                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3167                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3168
3169                 page_pool_destroy(rxr->page_pool);
3170                 rxr->page_pool = NULL;
3171
3172                 kfree(rxr->rx_agg_bmap);
3173                 rxr->rx_agg_bmap = NULL;
3174
3175                 ring = &rxr->rx_ring_struct;
3176                 bnxt_free_ring(bp, &ring->ring_mem);
3177
3178                 ring = &rxr->rx_agg_ring_struct;
3179                 bnxt_free_ring(bp, &ring->ring_mem);
3180         }
3181 }
3182
3183 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3184                                    struct bnxt_rx_ring_info *rxr)
3185 {
3186         struct page_pool_params pp = { 0 };
3187
3188         pp.pool_size = bp->rx_agg_ring_size;
3189         if (BNXT_RX_PAGE_MODE(bp))
3190                 pp.pool_size += bp->rx_ring_size;
3191         pp.nid = dev_to_node(&bp->pdev->dev);
3192         pp.napi = &rxr->bnapi->napi;
3193         pp.dev = &bp->pdev->dev;
3194         pp.dma_dir = bp->rx_dir;
3195         pp.max_len = PAGE_SIZE;
3196         pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3197         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
3198                 pp.flags |= PP_FLAG_PAGE_FRAG;
3199
3200         rxr->page_pool = page_pool_create(&pp);
3201         if (IS_ERR(rxr->page_pool)) {
3202                 int err = PTR_ERR(rxr->page_pool);
3203
3204                 rxr->page_pool = NULL;
3205                 return err;
3206         }
3207         return 0;
3208 }
3209
3210 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3211 {
3212         int i, rc = 0, agg_rings = 0;
3213
3214         if (!bp->rx_ring)
3215                 return -ENOMEM;
3216
3217         if (bp->flags & BNXT_FLAG_AGG_RINGS)
3218                 agg_rings = 1;
3219
3220         for (i = 0; i < bp->rx_nr_rings; i++) {
3221                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3222                 struct bnxt_ring_struct *ring;
3223
3224                 ring = &rxr->rx_ring_struct;
3225
3226                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3227                 if (rc)
3228                         return rc;
3229
3230                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3231                 if (rc < 0)
3232                         return rc;
3233
3234                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3235                                                 MEM_TYPE_PAGE_POOL,
3236                                                 rxr->page_pool);
3237                 if (rc) {
3238                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
3239                         return rc;
3240                 }
3241
3242                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3243                 if (rc)
3244                         return rc;
3245
3246                 ring->grp_idx = i;
3247                 if (agg_rings) {
3248                         u16 mem_size;
3249
3250                         ring = &rxr->rx_agg_ring_struct;
3251                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3252                         if (rc)
3253                                 return rc;
3254
3255                         ring->grp_idx = i;
3256                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3257                         mem_size = rxr->rx_agg_bmap_size / 8;
3258                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3259                         if (!rxr->rx_agg_bmap)
3260                                 return -ENOMEM;
3261                 }
3262         }
3263         if (bp->flags & BNXT_FLAG_TPA)
3264                 rc = bnxt_alloc_tpa_info(bp);
3265         return rc;
3266 }
3267
3268 static void bnxt_free_tx_rings(struct bnxt *bp)
3269 {
3270         int i;
3271         struct pci_dev *pdev = bp->pdev;
3272
3273         if (!bp->tx_ring)
3274                 return;
3275
3276         for (i = 0; i < bp->tx_nr_rings; i++) {
3277                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3278                 struct bnxt_ring_struct *ring;
3279
3280                 if (txr->tx_push) {
3281                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
3282                                           txr->tx_push, txr->tx_push_mapping);
3283                         txr->tx_push = NULL;
3284                 }
3285
3286                 ring = &txr->tx_ring_struct;
3287
3288                 bnxt_free_ring(bp, &ring->ring_mem);
3289         }
3290 }
3291
3292 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3293 {
3294         int i, j, rc;
3295         struct pci_dev *pdev = bp->pdev;
3296
3297         bp->tx_push_size = 0;
3298         if (bp->tx_push_thresh) {
3299                 int push_size;
3300
3301                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3302                                         bp->tx_push_thresh);
3303
3304                 if (push_size > 256) {
3305                         push_size = 0;
3306                         bp->tx_push_thresh = 0;
3307                 }
3308
3309                 bp->tx_push_size = push_size;
3310         }
3311
3312         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3313                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3314                 struct bnxt_ring_struct *ring;
3315                 u8 qidx;
3316
3317                 ring = &txr->tx_ring_struct;
3318
3319                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3320                 if (rc)
3321                         return rc;
3322
3323                 ring->grp_idx = txr->bnapi->index;
3324                 if (bp->tx_push_size) {
3325                         dma_addr_t mapping;
3326
3327                         /* One pre-allocated DMA buffer to backup
3328                          * TX push operation
3329                          */
3330                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
3331                                                 bp->tx_push_size,
3332                                                 &txr->tx_push_mapping,
3333                                                 GFP_KERNEL);
3334
3335                         if (!txr->tx_push)
3336                                 return -ENOMEM;
3337
3338                         mapping = txr->tx_push_mapping +
3339                                 sizeof(struct tx_push_bd);
3340                         txr->data_mapping = cpu_to_le64(mapping);
3341                 }
3342                 qidx = bp->tc_to_qidx[j];
3343                 ring->queue_id = bp->q_info[qidx].queue_id;
3344                 spin_lock_init(&txr->xdp_tx_lock);
3345                 if (i < bp->tx_nr_rings_xdp)
3346                         continue;
3347                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3348                         j++;
3349         }
3350         return 0;
3351 }
3352
3353 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3354 {
3355         struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3356
3357         kfree(cpr->cp_desc_ring);
3358         cpr->cp_desc_ring = NULL;
3359         ring->ring_mem.pg_arr = NULL;
3360         kfree(cpr->cp_desc_mapping);
3361         cpr->cp_desc_mapping = NULL;
3362         ring->ring_mem.dma_arr = NULL;
3363 }
3364
3365 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3366 {
3367         cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3368         if (!cpr->cp_desc_ring)
3369                 return -ENOMEM;
3370         cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3371                                        GFP_KERNEL);
3372         if (!cpr->cp_desc_mapping)
3373                 return -ENOMEM;
3374         return 0;
3375 }
3376
3377 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3378 {
3379         int i;
3380
3381         if (!bp->bnapi)
3382                 return;
3383         for (i = 0; i < bp->cp_nr_rings; i++) {
3384                 struct bnxt_napi *bnapi = bp->bnapi[i];
3385
3386                 if (!bnapi)
3387                         continue;
3388                 bnxt_free_cp_arrays(&bnapi->cp_ring);
3389         }
3390 }
3391
3392 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3393 {
3394         int i, n = bp->cp_nr_pages;
3395
3396         for (i = 0; i < bp->cp_nr_rings; i++) {
3397                 struct bnxt_napi *bnapi = bp->bnapi[i];
3398                 int rc;
3399
3400                 if (!bnapi)
3401                         continue;
3402                 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3403                 if (rc)
3404                         return rc;
3405         }
3406         return 0;
3407 }
3408
3409 static void bnxt_free_cp_rings(struct bnxt *bp)
3410 {
3411         int i;
3412
3413         if (!bp->bnapi)
3414                 return;
3415
3416         for (i = 0; i < bp->cp_nr_rings; i++) {
3417                 struct bnxt_napi *bnapi = bp->bnapi[i];
3418                 struct bnxt_cp_ring_info *cpr;
3419                 struct bnxt_ring_struct *ring;
3420                 int j;
3421
3422                 if (!bnapi)
3423                         continue;
3424
3425                 cpr = &bnapi->cp_ring;
3426                 ring = &cpr->cp_ring_struct;
3427
3428                 bnxt_free_ring(bp, &ring->ring_mem);
3429
3430                 for (j = 0; j < 2; j++) {
3431                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3432
3433                         if (cpr2) {
3434                                 ring = &cpr2->cp_ring_struct;
3435                                 bnxt_free_ring(bp, &ring->ring_mem);
3436                                 bnxt_free_cp_arrays(cpr2);
3437                                 kfree(cpr2);
3438                                 cpr->cp_ring_arr[j] = NULL;
3439                         }
3440                 }
3441         }
3442 }
3443
3444 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3445 {
3446         struct bnxt_ring_mem_info *rmem;
3447         struct bnxt_ring_struct *ring;
3448         struct bnxt_cp_ring_info *cpr;
3449         int rc;
3450
3451         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3452         if (!cpr)
3453                 return NULL;
3454
3455         rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3456         if (rc) {
3457                 bnxt_free_cp_arrays(cpr);
3458                 kfree(cpr);
3459                 return NULL;
3460         }
3461         ring = &cpr->cp_ring_struct;
3462         rmem = &ring->ring_mem;
3463         rmem->nr_pages = bp->cp_nr_pages;
3464         rmem->page_size = HW_CMPD_RING_SIZE;
3465         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3466         rmem->dma_arr = cpr->cp_desc_mapping;
3467         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3468         rc = bnxt_alloc_ring(bp, rmem);
3469         if (rc) {
3470                 bnxt_free_ring(bp, rmem);
3471                 bnxt_free_cp_arrays(cpr);
3472                 kfree(cpr);
3473                 cpr = NULL;
3474         }
3475         return cpr;
3476 }
3477
3478 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3479 {
3480         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3481         int i, rc, ulp_base_vec, ulp_msix;
3482
3483         ulp_msix = bnxt_get_ulp_msix_num(bp);
3484         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3485         for (i = 0; i < bp->cp_nr_rings; i++) {
3486                 struct bnxt_napi *bnapi = bp->bnapi[i];
3487                 struct bnxt_cp_ring_info *cpr;
3488                 struct bnxt_ring_struct *ring;
3489
3490                 if (!bnapi)
3491                         continue;
3492
3493                 cpr = &bnapi->cp_ring;
3494                 cpr->bnapi = bnapi;
3495                 ring = &cpr->cp_ring_struct;
3496
3497                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3498                 if (rc)
3499                         return rc;
3500
3501                 if (ulp_msix && i >= ulp_base_vec)
3502                         ring->map_idx = i + ulp_msix;
3503                 else
3504                         ring->map_idx = i;
3505
3506                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3507                         continue;
3508
3509                 if (i < bp->rx_nr_rings) {
3510                         struct bnxt_cp_ring_info *cpr2 =
3511                                 bnxt_alloc_cp_sub_ring(bp);
3512
3513                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3514                         if (!cpr2)
3515                                 return -ENOMEM;
3516                         cpr2->bnapi = bnapi;
3517                 }
3518                 if ((sh && i < bp->tx_nr_rings) ||
3519                     (!sh && i >= bp->rx_nr_rings)) {
3520                         struct bnxt_cp_ring_info *cpr2 =
3521                                 bnxt_alloc_cp_sub_ring(bp);
3522
3523                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3524                         if (!cpr2)
3525                                 return -ENOMEM;
3526                         cpr2->bnapi = bnapi;
3527                 }
3528         }
3529         return 0;
3530 }
3531
3532 static void bnxt_init_ring_struct(struct bnxt *bp)
3533 {
3534         int i;
3535
3536         for (i = 0; i < bp->cp_nr_rings; i++) {
3537                 struct bnxt_napi *bnapi = bp->bnapi[i];
3538                 struct bnxt_ring_mem_info *rmem;
3539                 struct bnxt_cp_ring_info *cpr;
3540                 struct bnxt_rx_ring_info *rxr;
3541                 struct bnxt_tx_ring_info *txr;
3542                 struct bnxt_ring_struct *ring;
3543
3544                 if (!bnapi)
3545                         continue;
3546
3547                 cpr = &bnapi->cp_ring;
3548                 ring = &cpr->cp_ring_struct;
3549                 rmem = &ring->ring_mem;
3550                 rmem->nr_pages = bp->cp_nr_pages;
3551                 rmem->page_size = HW_CMPD_RING_SIZE;
3552                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3553                 rmem->dma_arr = cpr->cp_desc_mapping;
3554                 rmem->vmem_size = 0;
3555
3556                 rxr = bnapi->rx_ring;
3557                 if (!rxr)
3558                         goto skip_rx;
3559
3560                 ring = &rxr->rx_ring_struct;
3561                 rmem = &ring->ring_mem;
3562                 rmem->nr_pages = bp->rx_nr_pages;
3563                 rmem->page_size = HW_RXBD_RING_SIZE;
3564                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3565                 rmem->dma_arr = rxr->rx_desc_mapping;
3566                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3567                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3568
3569                 ring = &rxr->rx_agg_ring_struct;
3570                 rmem = &ring->ring_mem;
3571                 rmem->nr_pages = bp->rx_agg_nr_pages;
3572                 rmem->page_size = HW_RXBD_RING_SIZE;
3573                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3574                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3575                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3576                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3577
3578 skip_rx:
3579                 txr = bnapi->tx_ring;
3580                 if (!txr)
3581                         continue;
3582
3583                 ring = &txr->tx_ring_struct;
3584                 rmem = &ring->ring_mem;
3585                 rmem->nr_pages = bp->tx_nr_pages;
3586                 rmem->page_size = HW_RXBD_RING_SIZE;
3587                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3588                 rmem->dma_arr = txr->tx_desc_mapping;
3589                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3590                 rmem->vmem = (void **)&txr->tx_buf_ring;
3591         }
3592 }
3593
3594 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3595 {
3596         int i;
3597         u32 prod;
3598         struct rx_bd **rx_buf_ring;
3599
3600         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3601         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3602                 int j;
3603                 struct rx_bd *rxbd;
3604
3605                 rxbd = rx_buf_ring[i];
3606                 if (!rxbd)
3607                         continue;
3608
3609                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3610                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3611                         rxbd->rx_bd_opaque = prod;
3612                 }
3613         }
3614 }
3615
3616 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3617 {
3618         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3619         struct net_device *dev = bp->dev;
3620         u32 prod;
3621         int i;
3622
3623         prod = rxr->rx_prod;
3624         for (i = 0; i < bp->rx_ring_size; i++) {
3625                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3626                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3627                                     ring_nr, i, bp->rx_ring_size);
3628                         break;
3629                 }
3630                 prod = NEXT_RX(prod);
3631         }
3632         rxr->rx_prod = prod;
3633
3634         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3635                 return 0;
3636
3637         prod = rxr->rx_agg_prod;
3638         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3639                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3640                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3641                                     ring_nr, i, bp->rx_ring_size);
3642                         break;
3643                 }
3644                 prod = NEXT_RX_AGG(prod);
3645         }
3646         rxr->rx_agg_prod = prod;
3647
3648         if (rxr->rx_tpa) {
3649                 dma_addr_t mapping;
3650                 u8 *data;
3651
3652                 for (i = 0; i < bp->max_tpa; i++) {
3653                         data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
3654                         if (!data)
3655                                 return -ENOMEM;
3656
3657                         rxr->rx_tpa[i].data = data;
3658                         rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3659                         rxr->rx_tpa[i].mapping = mapping;
3660                 }
3661         }
3662         return 0;
3663 }
3664
3665 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3666 {
3667         struct bnxt_rx_ring_info *rxr;
3668         struct bnxt_ring_struct *ring;
3669         u32 type;
3670
3671         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3672                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3673
3674         if (NET_IP_ALIGN == 2)
3675                 type |= RX_BD_FLAGS_SOP;
3676
3677         rxr = &bp->rx_ring[ring_nr];
3678         ring = &rxr->rx_ring_struct;
3679         bnxt_init_rxbd_pages(ring, type);
3680
3681         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3682                 bpf_prog_add(bp->xdp_prog, 1);
3683                 rxr->xdp_prog = bp->xdp_prog;
3684         }
3685         ring->fw_ring_id = INVALID_HW_RING_ID;
3686
3687         ring = &rxr->rx_agg_ring_struct;
3688         ring->fw_ring_id = INVALID_HW_RING_ID;
3689
3690         if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3691                 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3692                         RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3693
3694                 bnxt_init_rxbd_pages(ring, type);
3695         }
3696
3697         return bnxt_alloc_one_rx_ring(bp, ring_nr);
3698 }
3699
3700 static void bnxt_init_cp_rings(struct bnxt *bp)
3701 {
3702         int i, j;
3703
3704         for (i = 0; i < bp->cp_nr_rings; i++) {
3705                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3706                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3707
3708                 ring->fw_ring_id = INVALID_HW_RING_ID;
3709                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3710                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3711                 for (j = 0; j < 2; j++) {
3712                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3713
3714                         if (!cpr2)
3715                                 continue;
3716
3717                         ring = &cpr2->cp_ring_struct;
3718                         ring->fw_ring_id = INVALID_HW_RING_ID;
3719                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3720                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3721                 }
3722         }
3723 }
3724
3725 static int bnxt_init_rx_rings(struct bnxt *bp)
3726 {
3727         int i, rc = 0;
3728
3729         if (BNXT_RX_PAGE_MODE(bp)) {
3730                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3731                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3732         } else {
3733                 bp->rx_offset = BNXT_RX_OFFSET;
3734                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3735         }
3736
3737         for (i = 0; i < bp->rx_nr_rings; i++) {
3738                 rc = bnxt_init_one_rx_ring(bp, i);
3739                 if (rc)
3740                         break;
3741         }
3742
3743         return rc;
3744 }
3745
3746 static int bnxt_init_tx_rings(struct bnxt *bp)
3747 {
3748         u16 i;
3749
3750         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3751                                    BNXT_MIN_TX_DESC_CNT);
3752
3753         for (i = 0; i < bp->tx_nr_rings; i++) {
3754                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3755                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3756
3757                 ring->fw_ring_id = INVALID_HW_RING_ID;
3758         }
3759
3760         return 0;
3761 }
3762
3763 static void bnxt_free_ring_grps(struct bnxt *bp)
3764 {
3765         kfree(bp->grp_info);
3766         bp->grp_info = NULL;
3767 }
3768
3769 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3770 {
3771         int i;
3772
3773         if (irq_re_init) {
3774                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3775                                        sizeof(struct bnxt_ring_grp_info),
3776                                        GFP_KERNEL);
3777                 if (!bp->grp_info)
3778                         return -ENOMEM;
3779         }
3780         for (i = 0; i < bp->cp_nr_rings; i++) {
3781                 if (irq_re_init)
3782                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3783                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3784                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3785                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3786                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3787         }
3788         return 0;
3789 }
3790
3791 static void bnxt_free_vnics(struct bnxt *bp)
3792 {
3793         kfree(bp->vnic_info);
3794         bp->vnic_info = NULL;
3795         bp->nr_vnics = 0;
3796 }
3797
3798 static int bnxt_alloc_vnics(struct bnxt *bp)
3799 {
3800         int num_vnics = 1;
3801
3802 #ifdef CONFIG_RFS_ACCEL
3803         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3804                 num_vnics += bp->rx_nr_rings;
3805 #endif
3806
3807         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3808                 num_vnics++;
3809
3810         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3811                                 GFP_KERNEL);
3812         if (!bp->vnic_info)
3813                 return -ENOMEM;
3814
3815         bp->nr_vnics = num_vnics;
3816         return 0;
3817 }
3818
3819 static void bnxt_init_vnics(struct bnxt *bp)
3820 {
3821         int i;
3822
3823         for (i = 0; i < bp->nr_vnics; i++) {
3824                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3825                 int j;
3826
3827                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3828                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3829                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3830
3831                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3832
3833                 if (bp->vnic_info[i].rss_hash_key) {
3834                         if (i == 0)
3835                                 get_random_bytes(vnic->rss_hash_key,
3836                                               HW_HASH_KEY_SIZE);
3837                         else
3838                                 memcpy(vnic->rss_hash_key,
3839                                        bp->vnic_info[0].rss_hash_key,
3840                                        HW_HASH_KEY_SIZE);
3841                 }
3842         }
3843 }
3844
3845 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3846 {
3847         int pages;
3848
3849         pages = ring_size / desc_per_pg;
3850
3851         if (!pages)
3852                 return 1;
3853
3854         pages++;
3855
3856         while (pages & (pages - 1))
3857                 pages++;
3858
3859         return pages;
3860 }
3861
3862 void bnxt_set_tpa_flags(struct bnxt *bp)
3863 {
3864         bp->flags &= ~BNXT_FLAG_TPA;
3865         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3866                 return;
3867         if (bp->dev->features & NETIF_F_LRO)
3868                 bp->flags |= BNXT_FLAG_LRO;
3869         else if (bp->dev->features & NETIF_F_GRO_HW)
3870                 bp->flags |= BNXT_FLAG_GRO;
3871 }
3872
3873 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3874  * be set on entry.
3875  */
3876 void bnxt_set_ring_params(struct bnxt *bp)
3877 {
3878         u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3879         u32 agg_factor = 0, agg_ring_size = 0;
3880
3881         /* 8 for CRC and VLAN */
3882         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3883
3884         rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
3885                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3886
3887         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3888         ring_size = bp->rx_ring_size;
3889         bp->rx_agg_ring_size = 0;
3890         bp->rx_agg_nr_pages = 0;
3891
3892         if (bp->flags & BNXT_FLAG_TPA)
3893                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3894
3895         bp->flags &= ~BNXT_FLAG_JUMBO;
3896         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3897                 u32 jumbo_factor;
3898
3899                 bp->flags |= BNXT_FLAG_JUMBO;
3900                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3901                 if (jumbo_factor > agg_factor)
3902                         agg_factor = jumbo_factor;
3903         }
3904         if (agg_factor) {
3905                 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3906                         ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3907                         netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3908                                     bp->rx_ring_size, ring_size);
3909                         bp->rx_ring_size = ring_size;
3910                 }
3911                 agg_ring_size = ring_size * agg_factor;
3912
3913                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3914                                                         RX_DESC_CNT);
3915                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3916                         u32 tmp = agg_ring_size;
3917
3918                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3919                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3920                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3921                                     tmp, agg_ring_size);
3922                 }
3923                 bp->rx_agg_ring_size = agg_ring_size;
3924                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3925
3926                 if (BNXT_RX_PAGE_MODE(bp)) {
3927                         rx_space = PAGE_SIZE;
3928                         rx_size = PAGE_SIZE -
3929                                   ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
3930                                   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3931                 } else {
3932                         rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3933                         rx_space = rx_size + NET_SKB_PAD +
3934                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3935                 }
3936         }
3937
3938         bp->rx_buf_use_size = rx_size;
3939         bp->rx_buf_size = rx_space;
3940
3941         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3942         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3943
3944         ring_size = bp->tx_ring_size;
3945         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3946         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3947
3948         max_rx_cmpl = bp->rx_ring_size;
3949         /* MAX TPA needs to be added because TPA_START completions are
3950          * immediately recycled, so the TPA completions are not bound by
3951          * the RX ring size.
3952          */
3953         if (bp->flags & BNXT_FLAG_TPA)
3954                 max_rx_cmpl += bp->max_tpa;
3955         /* RX and TPA completions are 32-byte, all others are 16-byte */
3956         ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3957         bp->cp_ring_size = ring_size;
3958
3959         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3960         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3961                 bp->cp_nr_pages = MAX_CP_PAGES;
3962                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3963                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3964                             ring_size, bp->cp_ring_size);
3965         }
3966         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3967         bp->cp_ring_mask = bp->cp_bit - 1;
3968 }
3969
3970 /* Changing allocation mode of RX rings.
3971  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3972  */
3973 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3974 {
3975         struct net_device *dev = bp->dev;
3976
3977         if (page_mode) {
3978                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3979                 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
3980
3981                 if (bp->xdp_prog->aux->xdp_has_frags)
3982                         dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
3983                 else
3984                         dev->max_mtu =
3985                                 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3986                 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
3987                         bp->flags |= BNXT_FLAG_JUMBO;
3988                         bp->rx_skb_func = bnxt_rx_multi_page_skb;
3989                 } else {
3990                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
3991                         bp->rx_skb_func = bnxt_rx_page_skb;
3992                 }
3993                 bp->rx_dir = DMA_BIDIRECTIONAL;
3994                 /* Disable LRO or GRO_HW */
3995                 netdev_update_features(dev);
3996         } else {
3997                 dev->max_mtu = bp->max_mtu;
3998                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3999                 bp->rx_dir = DMA_FROM_DEVICE;
4000                 bp->rx_skb_func = bnxt_rx_skb;
4001         }
4002         return 0;
4003 }
4004
4005 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4006 {
4007         int i;
4008         struct bnxt_vnic_info *vnic;
4009         struct pci_dev *pdev = bp->pdev;
4010
4011         if (!bp->vnic_info)
4012                 return;
4013
4014         for (i = 0; i < bp->nr_vnics; i++) {
4015                 vnic = &bp->vnic_info[i];
4016
4017                 kfree(vnic->fw_grp_ids);
4018                 vnic->fw_grp_ids = NULL;
4019
4020                 kfree(vnic->uc_list);
4021                 vnic->uc_list = NULL;
4022
4023                 if (vnic->mc_list) {
4024                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4025                                           vnic->mc_list, vnic->mc_list_mapping);
4026                         vnic->mc_list = NULL;
4027                 }
4028
4029                 if (vnic->rss_table) {
4030                         dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4031                                           vnic->rss_table,
4032                                           vnic->rss_table_dma_addr);
4033                         vnic->rss_table = NULL;
4034                 }
4035
4036                 vnic->rss_hash_key = NULL;
4037                 vnic->flags = 0;
4038         }
4039 }
4040
4041 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4042 {
4043         int i, rc = 0, size;
4044         struct bnxt_vnic_info *vnic;
4045         struct pci_dev *pdev = bp->pdev;
4046         int max_rings;
4047
4048         for (i = 0; i < bp->nr_vnics; i++) {
4049                 vnic = &bp->vnic_info[i];
4050
4051                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4052                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4053
4054                         if (mem_size > 0) {
4055                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4056                                 if (!vnic->uc_list) {
4057                                         rc = -ENOMEM;
4058                                         goto out;
4059                                 }
4060                         }
4061                 }
4062
4063                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4064                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4065                         vnic->mc_list =
4066                                 dma_alloc_coherent(&pdev->dev,
4067                                                    vnic->mc_list_size,
4068                                                    &vnic->mc_list_mapping,
4069                                                    GFP_KERNEL);
4070                         if (!vnic->mc_list) {
4071                                 rc = -ENOMEM;
4072                                 goto out;
4073                         }
4074                 }
4075
4076                 if (bp->flags & BNXT_FLAG_CHIP_P5)
4077                         goto vnic_skip_grps;
4078
4079                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4080                         max_rings = bp->rx_nr_rings;
4081                 else
4082                         max_rings = 1;
4083
4084                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4085                 if (!vnic->fw_grp_ids) {
4086                         rc = -ENOMEM;
4087                         goto out;
4088                 }
4089 vnic_skip_grps:
4090                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
4091                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4092                         continue;
4093
4094                 /* Allocate rss table and hash key */
4095                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4096                 if (bp->flags & BNXT_FLAG_CHIP_P5)
4097                         size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4098
4099                 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4100                 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4101                                                      vnic->rss_table_size,
4102                                                      &vnic->rss_table_dma_addr,
4103                                                      GFP_KERNEL);
4104                 if (!vnic->rss_table) {
4105                         rc = -ENOMEM;
4106                         goto out;
4107                 }
4108
4109                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4110                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4111         }
4112         return 0;
4113
4114 out:
4115         return rc;
4116 }
4117
4118 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4119 {
4120         struct bnxt_hwrm_wait_token *token;
4121
4122         dma_pool_destroy(bp->hwrm_dma_pool);
4123         bp->hwrm_dma_pool = NULL;
4124
4125         rcu_read_lock();
4126         hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4127                 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4128         rcu_read_unlock();
4129 }
4130
4131 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4132 {
4133         bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4134                                             BNXT_HWRM_DMA_SIZE,
4135                                             BNXT_HWRM_DMA_ALIGN, 0);
4136         if (!bp->hwrm_dma_pool)
4137                 return -ENOMEM;
4138
4139         INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4140
4141         return 0;
4142 }
4143
4144 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4145 {
4146         kfree(stats->hw_masks);
4147         stats->hw_masks = NULL;
4148         kfree(stats->sw_stats);
4149         stats->sw_stats = NULL;
4150         if (stats->hw_stats) {
4151                 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4152                                   stats->hw_stats_map);
4153                 stats->hw_stats = NULL;
4154         }
4155 }
4156
4157 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4158                                 bool alloc_masks)
4159 {
4160         stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4161                                              &stats->hw_stats_map, GFP_KERNEL);
4162         if (!stats->hw_stats)
4163                 return -ENOMEM;
4164
4165         stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4166         if (!stats->sw_stats)
4167                 goto stats_mem_err;
4168
4169         if (alloc_masks) {
4170                 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4171                 if (!stats->hw_masks)
4172                         goto stats_mem_err;
4173         }
4174         return 0;
4175
4176 stats_mem_err:
4177         bnxt_free_stats_mem(bp, stats);
4178         return -ENOMEM;
4179 }
4180
4181 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4182 {
4183         int i;
4184
4185         for (i = 0; i < count; i++)
4186                 mask_arr[i] = mask;
4187 }
4188
4189 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4190 {
4191         int i;
4192
4193         for (i = 0; i < count; i++)
4194                 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4195 }
4196
4197 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4198                                     struct bnxt_stats_mem *stats)
4199 {
4200         struct hwrm_func_qstats_ext_output *resp;
4201         struct hwrm_func_qstats_ext_input *req;
4202         __le64 *hw_masks;
4203         int rc;
4204
4205         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4206             !(bp->flags & BNXT_FLAG_CHIP_P5))
4207                 return -EOPNOTSUPP;
4208
4209         rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4210         if (rc)
4211                 return rc;
4212
4213         req->fid = cpu_to_le16(0xffff);
4214         req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4215
4216         resp = hwrm_req_hold(bp, req);
4217         rc = hwrm_req_send(bp, req);
4218         if (!rc) {
4219                 hw_masks = &resp->rx_ucast_pkts;
4220                 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4221         }
4222         hwrm_req_drop(bp, req);
4223         return rc;
4224 }
4225
4226 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4227 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4228
4229 static void bnxt_init_stats(struct bnxt *bp)
4230 {
4231         struct bnxt_napi *bnapi = bp->bnapi[0];
4232         struct bnxt_cp_ring_info *cpr;
4233         struct bnxt_stats_mem *stats;
4234         __le64 *rx_stats, *tx_stats;
4235         int rc, rx_count, tx_count;
4236         u64 *rx_masks, *tx_masks;
4237         u64 mask;
4238         u8 flags;
4239
4240         cpr = &bnapi->cp_ring;
4241         stats = &cpr->stats;
4242         rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4243         if (rc) {
4244                 if (bp->flags & BNXT_FLAG_CHIP_P5)
4245                         mask = (1ULL << 48) - 1;
4246                 else
4247                         mask = -1ULL;
4248                 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4249         }
4250         if (bp->flags & BNXT_FLAG_PORT_STATS) {
4251                 stats = &bp->port_stats;
4252                 rx_stats = stats->hw_stats;
4253                 rx_masks = stats->hw_masks;
4254                 rx_count = sizeof(struct rx_port_stats) / 8;
4255                 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4256                 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4257                 tx_count = sizeof(struct tx_port_stats) / 8;
4258
4259                 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4260                 rc = bnxt_hwrm_port_qstats(bp, flags);
4261                 if (rc) {
4262                         mask = (1ULL << 40) - 1;
4263
4264                         bnxt_fill_masks(rx_masks, mask, rx_count);
4265                         bnxt_fill_masks(tx_masks, mask, tx_count);
4266                 } else {
4267                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4268                         bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4269                         bnxt_hwrm_port_qstats(bp, 0);
4270                 }
4271         }
4272         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4273                 stats = &bp->rx_port_stats_ext;
4274                 rx_stats = stats->hw_stats;
4275                 rx_masks = stats->hw_masks;
4276                 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4277                 stats = &bp->tx_port_stats_ext;
4278                 tx_stats = stats->hw_stats;
4279                 tx_masks = stats->hw_masks;
4280                 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4281
4282                 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4283                 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4284                 if (rc) {
4285                         mask = (1ULL << 40) - 1;
4286
4287                         bnxt_fill_masks(rx_masks, mask, rx_count);
4288                         if (tx_stats)
4289                                 bnxt_fill_masks(tx_masks, mask, tx_count);
4290                 } else {
4291                         bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4292                         if (tx_stats)
4293                                 bnxt_copy_hw_masks(tx_masks, tx_stats,
4294                                                    tx_count);
4295                         bnxt_hwrm_port_qstats_ext(bp, 0);
4296                 }
4297         }
4298 }
4299
4300 static void bnxt_free_port_stats(struct bnxt *bp)
4301 {
4302         bp->flags &= ~BNXT_FLAG_PORT_STATS;
4303         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4304
4305         bnxt_free_stats_mem(bp, &bp->port_stats);
4306         bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4307         bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4308 }
4309
4310 static void bnxt_free_ring_stats(struct bnxt *bp)
4311 {
4312         int i;
4313
4314         if (!bp->bnapi)
4315                 return;
4316
4317         for (i = 0; i < bp->cp_nr_rings; i++) {
4318                 struct bnxt_napi *bnapi = bp->bnapi[i];
4319                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4320
4321                 bnxt_free_stats_mem(bp, &cpr->stats);
4322         }
4323 }
4324
4325 static int bnxt_alloc_stats(struct bnxt *bp)
4326 {
4327         u32 size, i;
4328         int rc;
4329
4330         size = bp->hw_ring_stats_size;
4331
4332         for (i = 0; i < bp->cp_nr_rings; i++) {
4333                 struct bnxt_napi *bnapi = bp->bnapi[i];
4334                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4335
4336                 cpr->stats.len = size;
4337                 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4338                 if (rc)
4339                         return rc;
4340
4341                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4342         }
4343
4344         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4345                 return 0;
4346
4347         if (bp->port_stats.hw_stats)
4348                 goto alloc_ext_stats;
4349
4350         bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4351         rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4352         if (rc)
4353                 return rc;
4354
4355         bp->flags |= BNXT_FLAG_PORT_STATS;
4356
4357 alloc_ext_stats:
4358         /* Display extended statistics only if FW supports it */
4359         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4360                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4361                         return 0;
4362
4363         if (bp->rx_port_stats_ext.hw_stats)
4364                 goto alloc_tx_ext_stats;
4365
4366         bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4367         rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4368         /* Extended stats are optional */
4369         if (rc)
4370                 return 0;
4371
4372 alloc_tx_ext_stats:
4373         if (bp->tx_port_stats_ext.hw_stats)
4374                 return 0;
4375
4376         if (bp->hwrm_spec_code >= 0x10902 ||
4377             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4378                 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4379                 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4380                 /* Extended stats are optional */
4381                 if (rc)
4382                         return 0;
4383         }
4384         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4385         return 0;
4386 }
4387
4388 static void bnxt_clear_ring_indices(struct bnxt *bp)
4389 {
4390         int i;
4391
4392         if (!bp->bnapi)
4393                 return;
4394
4395         for (i = 0; i < bp->cp_nr_rings; i++) {
4396                 struct bnxt_napi *bnapi = bp->bnapi[i];
4397                 struct bnxt_cp_ring_info *cpr;
4398                 struct bnxt_rx_ring_info *rxr;
4399                 struct bnxt_tx_ring_info *txr;
4400
4401                 if (!bnapi)
4402                         continue;
4403
4404                 cpr = &bnapi->cp_ring;
4405                 cpr->cp_raw_cons = 0;
4406
4407                 txr = bnapi->tx_ring;
4408                 if (txr) {
4409                         txr->tx_prod = 0;
4410                         txr->tx_cons = 0;
4411                 }
4412
4413                 rxr = bnapi->rx_ring;
4414                 if (rxr) {
4415                         rxr->rx_prod = 0;
4416                         rxr->rx_agg_prod = 0;
4417                         rxr->rx_sw_agg_prod = 0;
4418                         rxr->rx_next_cons = 0;
4419                 }
4420         }
4421 }
4422
4423 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4424 {
4425 #ifdef CONFIG_RFS_ACCEL
4426         int i;
4427
4428         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
4429          * safe to delete the hash table.
4430          */
4431         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4432                 struct hlist_head *head;
4433                 struct hlist_node *tmp;
4434                 struct bnxt_ntuple_filter *fltr;
4435
4436                 head = &bp->ntp_fltr_hash_tbl[i];
4437                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4438                         hlist_del(&fltr->hash);
4439                         kfree(fltr);
4440                 }
4441         }
4442         if (irq_reinit) {
4443                 bitmap_free(bp->ntp_fltr_bmap);
4444                 bp->ntp_fltr_bmap = NULL;
4445         }
4446         bp->ntp_fltr_count = 0;
4447 #endif
4448 }
4449
4450 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4451 {
4452 #ifdef CONFIG_RFS_ACCEL
4453         int i, rc = 0;
4454
4455         if (!(bp->flags & BNXT_FLAG_RFS))
4456                 return 0;
4457
4458         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4459                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4460
4461         bp->ntp_fltr_count = 0;
4462         bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
4463
4464         if (!bp->ntp_fltr_bmap)
4465                 rc = -ENOMEM;
4466
4467         return rc;
4468 #else
4469         return 0;
4470 #endif
4471 }
4472
4473 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4474 {
4475         bnxt_free_vnic_attributes(bp);
4476         bnxt_free_tx_rings(bp);
4477         bnxt_free_rx_rings(bp);
4478         bnxt_free_cp_rings(bp);
4479         bnxt_free_all_cp_arrays(bp);
4480         bnxt_free_ntp_fltrs(bp, irq_re_init);
4481         if (irq_re_init) {
4482                 bnxt_free_ring_stats(bp);
4483                 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4484                     test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4485                         bnxt_free_port_stats(bp);
4486                 bnxt_free_ring_grps(bp);
4487                 bnxt_free_vnics(bp);
4488                 kfree(bp->tx_ring_map);
4489                 bp->tx_ring_map = NULL;
4490                 kfree(bp->tx_ring);
4491                 bp->tx_ring = NULL;
4492                 kfree(bp->rx_ring);
4493                 bp->rx_ring = NULL;
4494                 kfree(bp->bnapi);
4495                 bp->bnapi = NULL;
4496         } else {
4497                 bnxt_clear_ring_indices(bp);
4498         }
4499 }
4500
4501 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4502 {
4503         int i, j, rc, size, arr_size;
4504         void *bnapi;
4505
4506         if (irq_re_init) {
4507                 /* Allocate bnapi mem pointer array and mem block for
4508                  * all queues
4509                  */
4510                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4511                                 bp->cp_nr_rings);
4512                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4513                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4514                 if (!bnapi)
4515                         return -ENOMEM;
4516
4517                 bp->bnapi = bnapi;
4518                 bnapi += arr_size;
4519                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4520                         bp->bnapi[i] = bnapi;
4521                         bp->bnapi[i]->index = i;
4522                         bp->bnapi[i]->bp = bp;
4523                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4524                                 struct bnxt_cp_ring_info *cpr =
4525                                         &bp->bnapi[i]->cp_ring;
4526
4527                                 cpr->cp_ring_struct.ring_mem.flags =
4528                                         BNXT_RMEM_RING_PTE_FLAG;
4529                         }
4530                 }
4531
4532                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4533                                       sizeof(struct bnxt_rx_ring_info),
4534                                       GFP_KERNEL);
4535                 if (!bp->rx_ring)
4536                         return -ENOMEM;
4537
4538                 for (i = 0; i < bp->rx_nr_rings; i++) {
4539                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4540
4541                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4542                                 rxr->rx_ring_struct.ring_mem.flags =
4543                                         BNXT_RMEM_RING_PTE_FLAG;
4544                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4545                                         BNXT_RMEM_RING_PTE_FLAG;
4546                         }
4547                         rxr->bnapi = bp->bnapi[i];
4548                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4549                 }
4550
4551                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4552                                       sizeof(struct bnxt_tx_ring_info),
4553                                       GFP_KERNEL);
4554                 if (!bp->tx_ring)
4555                         return -ENOMEM;
4556
4557                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4558                                           GFP_KERNEL);
4559
4560                 if (!bp->tx_ring_map)
4561                         return -ENOMEM;
4562
4563                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4564                         j = 0;
4565                 else
4566                         j = bp->rx_nr_rings;
4567
4568                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4569                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4570
4571                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4572                                 txr->tx_ring_struct.ring_mem.flags =
4573                                         BNXT_RMEM_RING_PTE_FLAG;
4574                         txr->bnapi = bp->bnapi[j];
4575                         bp->bnapi[j]->tx_ring = txr;
4576                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4577                         if (i >= bp->tx_nr_rings_xdp) {
4578                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4579                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4580                         } else {
4581                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4582                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4583                         }
4584                 }
4585
4586                 rc = bnxt_alloc_stats(bp);
4587                 if (rc)
4588                         goto alloc_mem_err;
4589                 bnxt_init_stats(bp);
4590
4591                 rc = bnxt_alloc_ntp_fltrs(bp);
4592                 if (rc)
4593                         goto alloc_mem_err;
4594
4595                 rc = bnxt_alloc_vnics(bp);
4596                 if (rc)
4597                         goto alloc_mem_err;
4598         }
4599
4600         rc = bnxt_alloc_all_cp_arrays(bp);
4601         if (rc)
4602                 goto alloc_mem_err;
4603
4604         bnxt_init_ring_struct(bp);
4605
4606         rc = bnxt_alloc_rx_rings(bp);
4607         if (rc)
4608                 goto alloc_mem_err;
4609
4610         rc = bnxt_alloc_tx_rings(bp);
4611         if (rc)
4612                 goto alloc_mem_err;
4613
4614         rc = bnxt_alloc_cp_rings(bp);
4615         if (rc)
4616                 goto alloc_mem_err;
4617
4618         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4619                                   BNXT_VNIC_UCAST_FLAG;
4620         rc = bnxt_alloc_vnic_attributes(bp);
4621         if (rc)
4622                 goto alloc_mem_err;
4623         return 0;
4624
4625 alloc_mem_err:
4626         bnxt_free_mem(bp, true);
4627         return rc;
4628 }
4629
4630 static void bnxt_disable_int(struct bnxt *bp)
4631 {
4632         int i;
4633
4634         if (!bp->bnapi)
4635                 return;
4636
4637         for (i = 0; i < bp->cp_nr_rings; i++) {
4638                 struct bnxt_napi *bnapi = bp->bnapi[i];
4639                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4640                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4641
4642                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4643                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4644         }
4645 }
4646
4647 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4648 {
4649         struct bnxt_napi *bnapi = bp->bnapi[n];
4650         struct bnxt_cp_ring_info *cpr;
4651
4652         cpr = &bnapi->cp_ring;
4653         return cpr->cp_ring_struct.map_idx;
4654 }
4655
4656 static void bnxt_disable_int_sync(struct bnxt *bp)
4657 {
4658         int i;
4659
4660         if (!bp->irq_tbl)
4661                 return;
4662
4663         atomic_inc(&bp->intr_sem);
4664
4665         bnxt_disable_int(bp);
4666         for (i = 0; i < bp->cp_nr_rings; i++) {
4667                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4668
4669                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4670         }
4671 }
4672
4673 static void bnxt_enable_int(struct bnxt *bp)
4674 {
4675         int i;
4676
4677         atomic_set(&bp->intr_sem, 0);
4678         for (i = 0; i < bp->cp_nr_rings; i++) {
4679                 struct bnxt_napi *bnapi = bp->bnapi[i];
4680                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4681
4682                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4683         }
4684 }
4685
4686 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4687                             bool async_only)
4688 {
4689         DECLARE_BITMAP(async_events_bmap, 256);
4690         u32 *events = (u32 *)async_events_bmap;
4691         struct hwrm_func_drv_rgtr_output *resp;
4692         struct hwrm_func_drv_rgtr_input *req;
4693         u32 flags;
4694         int rc, i;
4695
4696         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4697         if (rc)
4698                 return rc;
4699
4700         req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4701                                    FUNC_DRV_RGTR_REQ_ENABLES_VER |
4702                                    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4703
4704         req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4705         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4706         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4707                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4708         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4709                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4710                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4711         req->flags = cpu_to_le32(flags);
4712         req->ver_maj_8b = DRV_VER_MAJ;
4713         req->ver_min_8b = DRV_VER_MIN;
4714         req->ver_upd_8b = DRV_VER_UPD;
4715         req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4716         req->ver_min = cpu_to_le16(DRV_VER_MIN);
4717         req->ver_upd = cpu_to_le16(DRV_VER_UPD);
4718
4719         if (BNXT_PF(bp)) {
4720                 u32 data[8];
4721                 int i;
4722
4723                 memset(data, 0, sizeof(data));
4724                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4725                         u16 cmd = bnxt_vf_req_snif[i];
4726                         unsigned int bit, idx;
4727
4728                         idx = cmd / 32;
4729                         bit = cmd % 32;
4730                         data[idx] |= 1 << bit;
4731                 }
4732
4733                 for (i = 0; i < 8; i++)
4734                         req->vf_req_fwd[i] = cpu_to_le32(data[i]);
4735
4736                 req->enables |=
4737                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4738         }
4739
4740         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4741                 req->flags |= cpu_to_le32(
4742                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4743
4744         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4745         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4746                 u16 event_id = bnxt_async_events_arr[i];
4747
4748                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4749                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4750                         continue;
4751                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
4752                     !bp->ptp_cfg)
4753                         continue;
4754                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4755         }
4756         if (bmap && bmap_size) {
4757                 for (i = 0; i < bmap_size; i++) {
4758                         if (test_bit(i, bmap))
4759                                 __set_bit(i, async_events_bmap);
4760                 }
4761         }
4762         for (i = 0; i < 8; i++)
4763                 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
4764
4765         if (async_only)
4766                 req->enables =
4767                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4768
4769         resp = hwrm_req_hold(bp, req);
4770         rc = hwrm_req_send(bp, req);
4771         if (!rc) {
4772                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4773                 if (resp->flags &
4774                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4775                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4776         }
4777         hwrm_req_drop(bp, req);
4778         return rc;
4779 }
4780
4781 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4782 {
4783         struct hwrm_func_drv_unrgtr_input *req;
4784         int rc;
4785
4786         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4787                 return 0;
4788
4789         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4790         if (rc)
4791                 return rc;
4792         return hwrm_req_send(bp, req);
4793 }
4794
4795 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4796 {
4797         struct hwrm_tunnel_dst_port_free_input *req;
4798         int rc;
4799
4800         if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4801             bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4802                 return 0;
4803         if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4804             bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4805                 return 0;
4806
4807         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4808         if (rc)
4809                 return rc;
4810
4811         req->tunnel_type = tunnel_type;
4812
4813         switch (tunnel_type) {
4814         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4815                 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4816                 bp->vxlan_port = 0;
4817                 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4818                 break;
4819         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4820                 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4821                 bp->nge_port = 0;
4822                 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4823                 break;
4824         default:
4825                 break;
4826         }
4827
4828         rc = hwrm_req_send(bp, req);
4829         if (rc)
4830                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4831                            rc);
4832         return rc;
4833 }
4834
4835 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4836                                            u8 tunnel_type)
4837 {
4838         struct hwrm_tunnel_dst_port_alloc_output *resp;
4839         struct hwrm_tunnel_dst_port_alloc_input *req;
4840         int rc;
4841
4842         rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4843         if (rc)
4844                 return rc;
4845
4846         req->tunnel_type = tunnel_type;
4847         req->tunnel_dst_port_val = port;
4848
4849         resp = hwrm_req_hold(bp, req);
4850         rc = hwrm_req_send(bp, req);
4851         if (rc) {
4852                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4853                            rc);
4854                 goto err_out;
4855         }
4856
4857         switch (tunnel_type) {
4858         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4859                 bp->vxlan_port = port;
4860                 bp->vxlan_fw_dst_port_id =
4861                         le16_to_cpu(resp->tunnel_dst_port_id);
4862                 break;
4863         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4864                 bp->nge_port = port;
4865                 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4866                 break;
4867         default:
4868                 break;
4869         }
4870
4871 err_out:
4872         hwrm_req_drop(bp, req);
4873         return rc;
4874 }
4875
4876 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4877 {
4878         struct hwrm_cfa_l2_set_rx_mask_input *req;
4879         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4880         int rc;
4881
4882         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4883         if (rc)
4884                 return rc;
4885
4886         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4887         if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
4888                 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4889                 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4890         }
4891         req->mask = cpu_to_le32(vnic->rx_mask);
4892         return hwrm_req_send_silent(bp, req);
4893 }
4894
4895 #ifdef CONFIG_RFS_ACCEL
4896 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4897                                             struct bnxt_ntuple_filter *fltr)
4898 {
4899         struct hwrm_cfa_ntuple_filter_free_input *req;
4900         int rc;
4901
4902         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4903         if (rc)
4904                 return rc;
4905
4906         req->ntuple_filter_id = fltr->filter_id;
4907         return hwrm_req_send(bp, req);
4908 }
4909
4910 #define BNXT_NTP_FLTR_FLAGS                                     \
4911         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4912          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4913          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4914          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4915          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4916          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4917          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4918          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4919          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4920          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4921          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4922          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4923          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4924          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4925
4926 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4927                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4928
4929 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4930                                              struct bnxt_ntuple_filter *fltr)
4931 {
4932         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4933         struct hwrm_cfa_ntuple_filter_alloc_input *req;
4934         struct flow_keys *keys = &fltr->fkeys;
4935         struct bnxt_vnic_info *vnic;
4936         u32 flags = 0;
4937         int rc;
4938
4939         rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4940         if (rc)
4941                 return rc;
4942
4943         req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4944
4945         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4946                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4947                 req->dst_id = cpu_to_le16(fltr->rxq);
4948         } else {
4949                 vnic = &bp->vnic_info[fltr->rxq + 1];
4950                 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
4951         }
4952         req->flags = cpu_to_le32(flags);
4953         req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4954
4955         req->ethertype = htons(ETH_P_IP);
4956         memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4957         req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4958         req->ip_protocol = keys->basic.ip_proto;
4959
4960         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4961                 int i;
4962
4963                 req->ethertype = htons(ETH_P_IPV6);
4964                 req->ip_addr_type =
4965                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4966                 *(struct in6_addr *)&req->src_ipaddr[0] =
4967                         keys->addrs.v6addrs.src;
4968                 *(struct in6_addr *)&req->dst_ipaddr[0] =
4969                         keys->addrs.v6addrs.dst;
4970                 for (i = 0; i < 4; i++) {
4971                         req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4972                         req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4973                 }
4974         } else {
4975                 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4976                 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4977                 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4978                 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4979         }
4980         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4981                 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4982                 req->tunnel_type =
4983                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4984         }
4985
4986         req->src_port = keys->ports.src;
4987         req->src_port_mask = cpu_to_be16(0xffff);
4988         req->dst_port = keys->ports.dst;
4989         req->dst_port_mask = cpu_to_be16(0xffff);
4990
4991         resp = hwrm_req_hold(bp, req);
4992         rc = hwrm_req_send(bp, req);
4993         if (!rc)
4994                 fltr->filter_id = resp->ntuple_filter_id;
4995         hwrm_req_drop(bp, req);
4996         return rc;
4997 }
4998 #endif
4999
5000 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5001                                      const u8 *mac_addr)
5002 {
5003         struct hwrm_cfa_l2_filter_alloc_output *resp;
5004         struct hwrm_cfa_l2_filter_alloc_input *req;
5005         int rc;
5006
5007         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5008         if (rc)
5009                 return rc;
5010
5011         req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5012         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5013                 req->flags |=
5014                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5015         req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
5016         req->enables =
5017                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5018                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5019                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5020         memcpy(req->l2_addr, mac_addr, ETH_ALEN);
5021         req->l2_addr_mask[0] = 0xff;
5022         req->l2_addr_mask[1] = 0xff;
5023         req->l2_addr_mask[2] = 0xff;
5024         req->l2_addr_mask[3] = 0xff;
5025         req->l2_addr_mask[4] = 0xff;
5026         req->l2_addr_mask[5] = 0xff;
5027
5028         resp = hwrm_req_hold(bp, req);
5029         rc = hwrm_req_send(bp, req);
5030         if (!rc)
5031                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5032                                                         resp->l2_filter_id;
5033         hwrm_req_drop(bp, req);
5034         return rc;
5035 }
5036
5037 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5038 {
5039         struct hwrm_cfa_l2_filter_free_input *req;
5040         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5041         int rc;
5042
5043         /* Any associated ntuple filters will also be cleared by firmware. */
5044         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5045         if (rc)
5046                 return rc;
5047         hwrm_req_hold(bp, req);
5048         for (i = 0; i < num_of_vnics; i++) {
5049                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5050
5051                 for (j = 0; j < vnic->uc_filter_count; j++) {
5052                         req->l2_filter_id = vnic->fw_l2_filter_id[j];
5053
5054                         rc = hwrm_req_send(bp, req);
5055                 }
5056                 vnic->uc_filter_count = 0;
5057         }
5058         hwrm_req_drop(bp, req);
5059         return rc;
5060 }
5061
5062 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5063 {
5064         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5065         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5066         struct hwrm_vnic_tpa_cfg_input *req;
5067         int rc;
5068
5069         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5070                 return 0;
5071
5072         rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
5073         if (rc)
5074                 return rc;
5075
5076         if (tpa_flags) {
5077                 u16 mss = bp->dev->mtu - 40;
5078                 u32 nsegs, n, segs = 0, flags;
5079
5080                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5081                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5082                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5083                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5084                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5085                 if (tpa_flags & BNXT_FLAG_GRO)
5086                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5087
5088                 req->flags = cpu_to_le32(flags);
5089
5090                 req->enables =
5091                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5092                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5093                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5094
5095                 /* Number of segs are log2 units, and first packet is not
5096                  * included as part of this units.
5097                  */
5098                 if (mss <= BNXT_RX_PAGE_SIZE) {
5099                         n = BNXT_RX_PAGE_SIZE / mss;
5100                         nsegs = (MAX_SKB_FRAGS - 1) * n;
5101                 } else {
5102                         n = mss / BNXT_RX_PAGE_SIZE;
5103                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
5104                                 n++;
5105                         nsegs = (MAX_SKB_FRAGS - n) / n;
5106                 }
5107
5108                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5109                         segs = MAX_TPA_SEGS_P5;
5110                         max_aggs = bp->max_tpa;
5111                 } else {
5112                         segs = ilog2(nsegs);
5113                 }
5114                 req->max_agg_segs = cpu_to_le16(segs);
5115                 req->max_aggs = cpu_to_le16(max_aggs);
5116
5117                 req->min_agg_len = cpu_to_le32(512);
5118         }
5119         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5120
5121         return hwrm_req_send(bp, req);
5122 }
5123
5124 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5125 {
5126         struct bnxt_ring_grp_info *grp_info;
5127
5128         grp_info = &bp->grp_info[ring->grp_idx];
5129         return grp_info->cp_fw_ring_id;
5130 }
5131
5132 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5133 {
5134         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5135                 struct bnxt_napi *bnapi = rxr->bnapi;
5136                 struct bnxt_cp_ring_info *cpr;
5137
5138                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5139                 return cpr->cp_ring_struct.fw_ring_id;
5140         } else {
5141                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5142         }
5143 }
5144
5145 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5146 {
5147         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5148                 struct bnxt_napi *bnapi = txr->bnapi;
5149                 struct bnxt_cp_ring_info *cpr;
5150
5151                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5152                 return cpr->cp_ring_struct.fw_ring_id;
5153         } else {
5154                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5155         }
5156 }
5157
5158 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5159 {
5160         int entries;
5161
5162         if (bp->flags & BNXT_FLAG_CHIP_P5)
5163                 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5164         else
5165                 entries = HW_HASH_INDEX_SIZE;
5166
5167         bp->rss_indir_tbl_entries = entries;
5168         bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5169                                           GFP_KERNEL);
5170         if (!bp->rss_indir_tbl)
5171                 return -ENOMEM;
5172         return 0;
5173 }
5174
5175 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5176 {
5177         u16 max_rings, max_entries, pad, i;
5178
5179         if (!bp->rx_nr_rings)
5180                 return;
5181
5182         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5183                 max_rings = bp->rx_nr_rings - 1;
5184         else
5185                 max_rings = bp->rx_nr_rings;
5186
5187         max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5188
5189         for (i = 0; i < max_entries; i++)
5190                 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5191
5192         pad = bp->rss_indir_tbl_entries - max_entries;
5193         if (pad)
5194                 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5195 }
5196
5197 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5198 {
5199         u16 i, tbl_size, max_ring = 0;
5200
5201         if (!bp->rss_indir_tbl)
5202                 return 0;
5203
5204         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5205         for (i = 0; i < tbl_size; i++)
5206                 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5207         return max_ring;
5208 }
5209
5210 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5211 {
5212         if (bp->flags & BNXT_FLAG_CHIP_P5)
5213                 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5214         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5215                 return 2;
5216         return 1;
5217 }
5218
5219 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5220 {
5221         bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5222         u16 i, j;
5223
5224         /* Fill the RSS indirection table with ring group ids */
5225         for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5226                 if (!no_rss)
5227                         j = bp->rss_indir_tbl[i];
5228                 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5229         }
5230 }
5231
5232 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5233                                     struct bnxt_vnic_info *vnic)
5234 {
5235         __le16 *ring_tbl = vnic->rss_table;
5236         struct bnxt_rx_ring_info *rxr;
5237         u16 tbl_size, i;
5238
5239         tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5240
5241         for (i = 0; i < tbl_size; i++) {
5242                 u16 ring_id, j;
5243
5244                 j = bp->rss_indir_tbl[i];
5245                 rxr = &bp->rx_ring[j];
5246
5247                 ring_id = rxr->rx_ring_struct.fw_ring_id;
5248                 *ring_tbl++ = cpu_to_le16(ring_id);
5249                 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5250                 *ring_tbl++ = cpu_to_le16(ring_id);
5251         }
5252 }
5253
5254 static void
5255 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
5256                          struct bnxt_vnic_info *vnic)
5257 {
5258         if (bp->flags & BNXT_FLAG_CHIP_P5)
5259                 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5260         else
5261                 bnxt_fill_hw_rss_tbl(bp, vnic);
5262
5263         if (bp->rss_hash_delta) {
5264                 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
5265                 if (bp->rss_hash_cfg & bp->rss_hash_delta)
5266                         req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
5267                 else
5268                         req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
5269         } else {
5270                 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5271         }
5272         req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5273         req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5274         req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5275 }
5276
5277 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5278 {
5279         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5280         struct hwrm_vnic_rss_cfg_input *req;
5281         int rc;
5282
5283         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5284             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5285                 return 0;
5286
5287         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5288         if (rc)
5289                 return rc;
5290
5291         if (set_rss)
5292                 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
5293         req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5294         return hwrm_req_send(bp, req);
5295 }
5296
5297 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5298 {
5299         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5300         struct hwrm_vnic_rss_cfg_input *req;
5301         dma_addr_t ring_tbl_map;
5302         u32 i, nr_ctxs;
5303         int rc;
5304
5305         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5306         if (rc)
5307                 return rc;
5308
5309         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5310         if (!set_rss)
5311                 return hwrm_req_send(bp, req);
5312
5313         __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
5314         ring_tbl_map = vnic->rss_table_dma_addr;
5315         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5316
5317         hwrm_req_hold(bp, req);
5318         for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5319                 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5320                 req->ring_table_pair_index = i;
5321                 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5322                 rc = hwrm_req_send(bp, req);
5323                 if (rc)
5324                         goto exit;
5325         }
5326
5327 exit:
5328         hwrm_req_drop(bp, req);
5329         return rc;
5330 }
5331
5332 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
5333 {
5334         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5335         struct hwrm_vnic_rss_qcfg_output *resp;
5336         struct hwrm_vnic_rss_qcfg_input *req;
5337
5338         if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
5339                 return;
5340
5341         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5342         /* all contexts configured to same hash_type, zero always exists */
5343         req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5344         resp = hwrm_req_hold(bp, req);
5345         if (!hwrm_req_send(bp, req)) {
5346                 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
5347                 bp->rss_hash_delta = 0;
5348         }
5349         hwrm_req_drop(bp, req);
5350 }
5351
5352 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5353 {
5354         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5355         struct hwrm_vnic_plcmodes_cfg_input *req;
5356         int rc;
5357
5358         rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5359         if (rc)
5360                 return rc;
5361
5362         req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
5363         req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
5364
5365         if (BNXT_RX_PAGE_MODE(bp)) {
5366                 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
5367         } else {
5368                 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5369                                           VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5370                 req->enables |=
5371                         cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5372                 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5373                 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5374         }
5375         req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5376         return hwrm_req_send(bp, req);
5377 }
5378
5379 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5380                                         u16 ctx_idx)
5381 {
5382         struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
5383
5384         if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5385                 return;
5386
5387         req->rss_cos_lb_ctx_id =
5388                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5389
5390         hwrm_req_send(bp, req);
5391         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5392 }
5393
5394 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5395 {
5396         int i, j;
5397
5398         for (i = 0; i < bp->nr_vnics; i++) {
5399                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5400
5401                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5402                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5403                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5404                 }
5405         }
5406         bp->rsscos_nr_ctxs = 0;
5407 }
5408
5409 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5410 {
5411         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5412         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
5413         int rc;
5414
5415         rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5416         if (rc)
5417                 return rc;
5418
5419         resp = hwrm_req_hold(bp, req);
5420         rc = hwrm_req_send(bp, req);
5421         if (!rc)
5422                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5423                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
5424         hwrm_req_drop(bp, req);
5425
5426         return rc;
5427 }
5428
5429 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5430 {
5431         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5432                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5433         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5434 }
5435
5436 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5437 {
5438         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5439         struct hwrm_vnic_cfg_input *req;
5440         unsigned int ring = 0, grp_idx;
5441         u16 def_vlan = 0;
5442         int rc;
5443
5444         rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5445         if (rc)
5446                 return rc;
5447
5448         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5449                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5450
5451                 req->default_rx_ring_id =
5452                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5453                 req->default_cmpl_ring_id =
5454                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5455                 req->enables =
5456                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5457                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5458                 goto vnic_mru;
5459         }
5460         req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5461         /* Only RSS support for now TBD: COS & LB */
5462         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5463                 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5464                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5465                                            VNIC_CFG_REQ_ENABLES_MRU);
5466         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5467                 req->rss_rule =
5468                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5469                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5470                                            VNIC_CFG_REQ_ENABLES_MRU);
5471                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5472         } else {
5473                 req->rss_rule = cpu_to_le16(0xffff);
5474         }
5475
5476         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5477             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5478                 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5479                 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5480         } else {
5481                 req->cos_rule = cpu_to_le16(0xffff);
5482         }
5483
5484         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5485                 ring = 0;
5486         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5487                 ring = vnic_id - 1;
5488         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5489                 ring = bp->rx_nr_rings - 1;
5490
5491         grp_idx = bp->rx_ring[ring].bnapi->index;
5492         req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5493         req->lb_rule = cpu_to_le16(0xffff);
5494 vnic_mru:
5495         req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5496
5497         req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5498 #ifdef CONFIG_BNXT_SRIOV
5499         if (BNXT_VF(bp))
5500                 def_vlan = bp->vf.vlan;
5501 #endif
5502         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5503                 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5504         if (!vnic_id && bnxt_ulp_registered(bp->edev))
5505                 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5506
5507         return hwrm_req_send(bp, req);
5508 }
5509
5510 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5511 {
5512         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5513                 struct hwrm_vnic_free_input *req;
5514
5515                 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5516                         return;
5517
5518                 req->vnic_id =
5519                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5520
5521                 hwrm_req_send(bp, req);
5522                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5523         }
5524 }
5525
5526 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5527 {
5528         u16 i;
5529
5530         for (i = 0; i < bp->nr_vnics; i++)
5531                 bnxt_hwrm_vnic_free_one(bp, i);
5532 }
5533
5534 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5535                                 unsigned int start_rx_ring_idx,
5536                                 unsigned int nr_rings)
5537 {
5538         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5539         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5540         struct hwrm_vnic_alloc_output *resp;
5541         struct hwrm_vnic_alloc_input *req;
5542         int rc;
5543
5544         rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5545         if (rc)
5546                 return rc;
5547
5548         if (bp->flags & BNXT_FLAG_CHIP_P5)
5549                 goto vnic_no_ring_grps;
5550
5551         /* map ring groups to this vnic */
5552         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5553                 grp_idx = bp->rx_ring[i].bnapi->index;
5554                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5555                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5556                                    j, nr_rings);
5557                         break;
5558                 }
5559                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5560         }
5561
5562 vnic_no_ring_grps:
5563         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5564                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5565         if (vnic_id == 0)
5566                 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5567
5568         resp = hwrm_req_hold(bp, req);
5569         rc = hwrm_req_send(bp, req);
5570         if (!rc)
5571                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5572         hwrm_req_drop(bp, req);
5573         return rc;
5574 }
5575
5576 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5577 {
5578         struct hwrm_vnic_qcaps_output *resp;
5579         struct hwrm_vnic_qcaps_input *req;
5580         int rc;
5581
5582         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5583         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5584         if (bp->hwrm_spec_code < 0x10600)
5585                 return 0;
5586
5587         rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5588         if (rc)
5589                 return rc;
5590
5591         resp = hwrm_req_hold(bp, req);
5592         rc = hwrm_req_send(bp, req);
5593         if (!rc) {
5594                 u32 flags = le32_to_cpu(resp->flags);
5595
5596                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5597                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5598                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5599                 if (flags &
5600                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5601                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5602
5603                 /* Older P5 fw before EXT_HW_STATS support did not set
5604                  * VLAN_STRIP_CAP properly.
5605                  */
5606                 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5607                     (BNXT_CHIP_P5_THOR(bp) &&
5608                      !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5609                         bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5610                 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
5611                         bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA;
5612                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5613                 if (bp->max_tpa_v2) {
5614                         if (BNXT_CHIP_P5_THOR(bp))
5615                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5616                         else
5617                                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5618                 }
5619         }
5620         hwrm_req_drop(bp, req);
5621         return rc;
5622 }
5623
5624 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5625 {
5626         struct hwrm_ring_grp_alloc_output *resp;
5627         struct hwrm_ring_grp_alloc_input *req;
5628         int rc;
5629         u16 i;
5630
5631         if (bp->flags & BNXT_FLAG_CHIP_P5)
5632                 return 0;
5633
5634         rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5635         if (rc)
5636                 return rc;
5637
5638         resp = hwrm_req_hold(bp, req);
5639         for (i = 0; i < bp->rx_nr_rings; i++) {
5640                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5641
5642                 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5643                 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5644                 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5645                 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5646
5647                 rc = hwrm_req_send(bp, req);
5648
5649                 if (rc)
5650                         break;
5651
5652                 bp->grp_info[grp_idx].fw_grp_id =
5653                         le32_to_cpu(resp->ring_group_id);
5654         }
5655         hwrm_req_drop(bp, req);
5656         return rc;
5657 }
5658
5659 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5660 {
5661         struct hwrm_ring_grp_free_input *req;
5662         u16 i;
5663
5664         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5665                 return;
5666
5667         if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5668                 return;
5669
5670         hwrm_req_hold(bp, req);
5671         for (i = 0; i < bp->cp_nr_rings; i++) {
5672                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5673                         continue;
5674                 req->ring_group_id =
5675                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5676
5677                 hwrm_req_send(bp, req);
5678                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5679         }
5680         hwrm_req_drop(bp, req);
5681 }
5682
5683 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5684                                     struct bnxt_ring_struct *ring,
5685                                     u32 ring_type, u32 map_index)
5686 {
5687         struct hwrm_ring_alloc_output *resp;
5688         struct hwrm_ring_alloc_input *req;
5689         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5690         struct bnxt_ring_grp_info *grp_info;
5691         int rc, err = 0;
5692         u16 ring_id;
5693
5694         rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5695         if (rc)
5696                 goto exit;
5697
5698         req->enables = 0;
5699         if (rmem->nr_pages > 1) {
5700                 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5701                 /* Page size is in log2 units */
5702                 req->page_size = BNXT_PAGE_SHIFT;
5703                 req->page_tbl_depth = 1;
5704         } else {
5705                 req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5706         }
5707         req->fbo = 0;
5708         /* Association of ring index with doorbell index and MSIX number */
5709         req->logical_id = cpu_to_le16(map_index);
5710
5711         switch (ring_type) {
5712         case HWRM_RING_ALLOC_TX: {
5713                 struct bnxt_tx_ring_info *txr;
5714
5715                 txr = container_of(ring, struct bnxt_tx_ring_info,
5716                                    tx_ring_struct);
5717                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5718                 /* Association of transmit ring with completion ring */
5719                 grp_info = &bp->grp_info[ring->grp_idx];
5720                 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5721                 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5722                 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5723                 req->queue_id = cpu_to_le16(ring->queue_id);
5724                 break;
5725         }
5726         case HWRM_RING_ALLOC_RX:
5727                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5728                 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5729                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5730                         u16 flags = 0;
5731
5732                         /* Association of rx ring with stats context */
5733                         grp_info = &bp->grp_info[ring->grp_idx];
5734                         req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5735                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5736                         req->enables |= cpu_to_le32(
5737                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5738                         if (NET_IP_ALIGN == 2)
5739                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5740                         req->flags = cpu_to_le16(flags);
5741                 }
5742                 break;
5743         case HWRM_RING_ALLOC_AGG:
5744                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5745                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5746                         /* Association of agg ring with rx ring */
5747                         grp_info = &bp->grp_info[ring->grp_idx];
5748                         req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5749                         req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5750                         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5751                         req->enables |= cpu_to_le32(
5752                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5753                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5754                 } else {
5755                         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5756                 }
5757                 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5758                 break;
5759         case HWRM_RING_ALLOC_CMPL:
5760                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5761                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5762                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5763                         /* Association of cp ring with nq */
5764                         grp_info = &bp->grp_info[map_index];
5765                         req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5766                         req->cq_handle = cpu_to_le64(ring->handle);
5767                         req->enables |= cpu_to_le32(
5768                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5769                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5770                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5771                 }
5772                 break;
5773         case HWRM_RING_ALLOC_NQ:
5774                 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5775                 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5776                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5777                         req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5778                 break;
5779         default:
5780                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5781                            ring_type);
5782                 return -1;
5783         }
5784
5785         resp = hwrm_req_hold(bp, req);
5786         rc = hwrm_req_send(bp, req);
5787         err = le16_to_cpu(resp->error_code);
5788         ring_id = le16_to_cpu(resp->ring_id);
5789         hwrm_req_drop(bp, req);
5790
5791 exit:
5792         if (rc || err) {
5793                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5794                            ring_type, rc, err);
5795                 return -EIO;
5796         }
5797         ring->fw_ring_id = ring_id;
5798         return rc;
5799 }
5800
5801 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5802 {
5803         int rc;
5804
5805         if (BNXT_PF(bp)) {
5806                 struct hwrm_func_cfg_input *req;
5807
5808                 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5809                 if (rc)
5810                         return rc;
5811
5812                 req->fid = cpu_to_le16(0xffff);
5813                 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5814                 req->async_event_cr = cpu_to_le16(idx);
5815                 return hwrm_req_send(bp, req);
5816         } else {
5817                 struct hwrm_func_vf_cfg_input *req;
5818
5819                 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5820                 if (rc)
5821                         return rc;
5822
5823                 req->enables =
5824                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5825                 req->async_event_cr = cpu_to_le16(idx);
5826                 return hwrm_req_send(bp, req);
5827         }
5828 }
5829
5830 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5831                         u32 map_idx, u32 xid)
5832 {
5833         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5834                 if (BNXT_PF(bp))
5835                         db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5836                 else
5837                         db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5838                 switch (ring_type) {
5839                 case HWRM_RING_ALLOC_TX:
5840                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5841                         break;
5842                 case HWRM_RING_ALLOC_RX:
5843                 case HWRM_RING_ALLOC_AGG:
5844                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5845                         break;
5846                 case HWRM_RING_ALLOC_CMPL:
5847                         db->db_key64 = DBR_PATH_L2;
5848                         break;
5849                 case HWRM_RING_ALLOC_NQ:
5850                         db->db_key64 = DBR_PATH_L2;
5851                         break;
5852                 }
5853                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5854         } else {
5855                 db->doorbell = bp->bar1 + map_idx * 0x80;
5856                 switch (ring_type) {
5857                 case HWRM_RING_ALLOC_TX:
5858                         db->db_key32 = DB_KEY_TX;
5859                         break;
5860                 case HWRM_RING_ALLOC_RX:
5861                 case HWRM_RING_ALLOC_AGG:
5862                         db->db_key32 = DB_KEY_RX;
5863                         break;
5864                 case HWRM_RING_ALLOC_CMPL:
5865                         db->db_key32 = DB_KEY_CP;
5866                         break;
5867                 }
5868         }
5869 }
5870
5871 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5872 {
5873         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5874         int i, rc = 0;
5875         u32 type;
5876
5877         if (bp->flags & BNXT_FLAG_CHIP_P5)
5878                 type = HWRM_RING_ALLOC_NQ;
5879         else
5880                 type = HWRM_RING_ALLOC_CMPL;
5881         for (i = 0; i < bp->cp_nr_rings; i++) {
5882                 struct bnxt_napi *bnapi = bp->bnapi[i];
5883                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5884                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5885                 u32 map_idx = ring->map_idx;
5886                 unsigned int vector;
5887
5888                 vector = bp->irq_tbl[map_idx].vector;
5889                 disable_irq_nosync(vector);
5890                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5891                 if (rc) {
5892                         enable_irq(vector);
5893                         goto err_out;
5894                 }
5895                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5896                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5897                 enable_irq(vector);
5898                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5899
5900                 if (!i) {
5901                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5902                         if (rc)
5903                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5904                 }
5905         }
5906
5907         type = HWRM_RING_ALLOC_TX;
5908         for (i = 0; i < bp->tx_nr_rings; i++) {
5909                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5910                 struct bnxt_ring_struct *ring;
5911                 u32 map_idx;
5912
5913                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5914                         struct bnxt_napi *bnapi = txr->bnapi;
5915                         struct bnxt_cp_ring_info *cpr, *cpr2;
5916                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5917
5918                         cpr = &bnapi->cp_ring;
5919                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5920                         ring = &cpr2->cp_ring_struct;
5921                         ring->handle = BNXT_TX_HDL;
5922                         map_idx = bnapi->index;
5923                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5924                         if (rc)
5925                                 goto err_out;
5926                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5927                                     ring->fw_ring_id);
5928                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5929                 }
5930                 ring = &txr->tx_ring_struct;
5931                 map_idx = i;
5932                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5933                 if (rc)
5934                         goto err_out;
5935                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5936         }
5937
5938         type = HWRM_RING_ALLOC_RX;
5939         for (i = 0; i < bp->rx_nr_rings; i++) {
5940                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5941                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5942                 struct bnxt_napi *bnapi = rxr->bnapi;
5943                 u32 map_idx = bnapi->index;
5944
5945                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5946                 if (rc)
5947                         goto err_out;
5948                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5949                 /* If we have agg rings, post agg buffers first. */
5950                 if (!agg_rings)
5951                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5952                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5953                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5954                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5955                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5956                         struct bnxt_cp_ring_info *cpr2;
5957
5958                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5959                         ring = &cpr2->cp_ring_struct;
5960                         ring->handle = BNXT_RX_HDL;
5961                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5962                         if (rc)
5963                                 goto err_out;
5964                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5965                                     ring->fw_ring_id);
5966                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5967                 }
5968         }
5969
5970         if (agg_rings) {
5971                 type = HWRM_RING_ALLOC_AGG;
5972                 for (i = 0; i < bp->rx_nr_rings; i++) {
5973                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5974                         struct bnxt_ring_struct *ring =
5975                                                 &rxr->rx_agg_ring_struct;
5976                         u32 grp_idx = ring->grp_idx;
5977                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5978
5979                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5980                         if (rc)
5981                                 goto err_out;
5982
5983                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5984                                     ring->fw_ring_id);
5985                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5986                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5987                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5988                 }
5989         }
5990 err_out:
5991         return rc;
5992 }
5993
5994 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5995                                    struct bnxt_ring_struct *ring,
5996                                    u32 ring_type, int cmpl_ring_id)
5997 {
5998         struct hwrm_ring_free_output *resp;
5999         struct hwrm_ring_free_input *req;
6000         u16 error_code = 0;
6001         int rc;
6002
6003         if (BNXT_NO_FW_ACCESS(bp))
6004                 return 0;
6005
6006         rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6007         if (rc)
6008                 goto exit;
6009
6010         req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
6011         req->ring_type = ring_type;
6012         req->ring_id = cpu_to_le16(ring->fw_ring_id);
6013
6014         resp = hwrm_req_hold(bp, req);
6015         rc = hwrm_req_send(bp, req);
6016         error_code = le16_to_cpu(resp->error_code);
6017         hwrm_req_drop(bp, req);
6018 exit:
6019         if (rc || error_code) {
6020                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6021                            ring_type, rc, error_code);
6022                 return -EIO;
6023         }
6024         return 0;
6025 }
6026
6027 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
6028 {
6029         u32 type;
6030         int i;
6031
6032         if (!bp->bnapi)
6033                 return;
6034
6035         for (i = 0; i < bp->tx_nr_rings; i++) {
6036                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6037                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
6038
6039                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6040                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
6041
6042                         hwrm_ring_free_send_msg(bp, ring,
6043                                                 RING_FREE_REQ_RING_TYPE_TX,
6044                                                 close_path ? cmpl_ring_id :
6045                                                 INVALID_HW_RING_ID);
6046                         ring->fw_ring_id = INVALID_HW_RING_ID;
6047                 }
6048         }
6049
6050         for (i = 0; i < bp->rx_nr_rings; i++) {
6051                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6052                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6053                 u32 grp_idx = rxr->bnapi->index;
6054
6055                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6056                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6057
6058                         hwrm_ring_free_send_msg(bp, ring,
6059                                                 RING_FREE_REQ_RING_TYPE_RX,
6060                                                 close_path ? cmpl_ring_id :
6061                                                 INVALID_HW_RING_ID);
6062                         ring->fw_ring_id = INVALID_HW_RING_ID;
6063                         bp->grp_info[grp_idx].rx_fw_ring_id =
6064                                 INVALID_HW_RING_ID;
6065                 }
6066         }
6067
6068         if (bp->flags & BNXT_FLAG_CHIP_P5)
6069                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
6070         else
6071                 type = RING_FREE_REQ_RING_TYPE_RX;
6072         for (i = 0; i < bp->rx_nr_rings; i++) {
6073                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6074                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
6075                 u32 grp_idx = rxr->bnapi->index;
6076
6077                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6078                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6079
6080                         hwrm_ring_free_send_msg(bp, ring, type,
6081                                                 close_path ? cmpl_ring_id :
6082                                                 INVALID_HW_RING_ID);
6083                         ring->fw_ring_id = INVALID_HW_RING_ID;
6084                         bp->grp_info[grp_idx].agg_fw_ring_id =
6085                                 INVALID_HW_RING_ID;
6086                 }
6087         }
6088
6089         /* The completion rings are about to be freed.  After that the
6090          * IRQ doorbell will not work anymore.  So we need to disable
6091          * IRQ here.
6092          */
6093         bnxt_disable_int_sync(bp);
6094
6095         if (bp->flags & BNXT_FLAG_CHIP_P5)
6096                 type = RING_FREE_REQ_RING_TYPE_NQ;
6097         else
6098                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
6099         for (i = 0; i < bp->cp_nr_rings; i++) {
6100                 struct bnxt_napi *bnapi = bp->bnapi[i];
6101                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6102                 struct bnxt_ring_struct *ring;
6103                 int j;
6104
6105                 for (j = 0; j < 2; j++) {
6106                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6107
6108                         if (cpr2) {
6109                                 ring = &cpr2->cp_ring_struct;
6110                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6111                                         continue;
6112                                 hwrm_ring_free_send_msg(bp, ring,
6113                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
6114                                         INVALID_HW_RING_ID);
6115                                 ring->fw_ring_id = INVALID_HW_RING_ID;
6116                         }
6117                 }
6118                 ring = &cpr->cp_ring_struct;
6119                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6120                         hwrm_ring_free_send_msg(bp, ring, type,
6121                                                 INVALID_HW_RING_ID);
6122                         ring->fw_ring_id = INVALID_HW_RING_ID;
6123                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
6124                 }
6125         }
6126 }
6127
6128 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6129                            bool shared);
6130
6131 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6132 {
6133         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6134         struct hwrm_func_qcfg_output *resp;
6135         struct hwrm_func_qcfg_input *req;
6136         int rc;
6137
6138         if (bp->hwrm_spec_code < 0x10601)
6139                 return 0;
6140
6141         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6142         if (rc)
6143                 return rc;
6144
6145         req->fid = cpu_to_le16(0xffff);
6146         resp = hwrm_req_hold(bp, req);
6147         rc = hwrm_req_send(bp, req);
6148         if (rc) {
6149                 hwrm_req_drop(bp, req);
6150                 return rc;
6151         }
6152
6153         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6154         if (BNXT_NEW_RM(bp)) {
6155                 u16 cp, stats;
6156
6157                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6158                 hw_resc->resv_hw_ring_grps =
6159                         le32_to_cpu(resp->alloc_hw_ring_grps);
6160                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6161                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6162                 stats = le16_to_cpu(resp->alloc_stat_ctx);
6163                 hw_resc->resv_irqs = cp;
6164                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6165                         int rx = hw_resc->resv_rx_rings;
6166                         int tx = hw_resc->resv_tx_rings;
6167
6168                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6169                                 rx >>= 1;
6170                         if (cp < (rx + tx)) {
6171                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6172                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6173                                         rx <<= 1;
6174                                 hw_resc->resv_rx_rings = rx;
6175                                 hw_resc->resv_tx_rings = tx;
6176                         }
6177                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6178                         hw_resc->resv_hw_ring_grps = rx;
6179                 }
6180                 hw_resc->resv_cp_rings = cp;
6181                 hw_resc->resv_stat_ctxs = stats;
6182         }
6183         hwrm_req_drop(bp, req);
6184         return 0;
6185 }
6186
6187 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6188 {
6189         struct hwrm_func_qcfg_output *resp;
6190         struct hwrm_func_qcfg_input *req;
6191         int rc;
6192
6193         if (bp->hwrm_spec_code < 0x10601)
6194                 return 0;
6195
6196         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6197         if (rc)
6198                 return rc;
6199
6200         req->fid = cpu_to_le16(fid);
6201         resp = hwrm_req_hold(bp, req);
6202         rc = hwrm_req_send(bp, req);
6203         if (!rc)
6204                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6205
6206         hwrm_req_drop(bp, req);
6207         return rc;
6208 }
6209
6210 static bool bnxt_rfs_supported(struct bnxt *bp);
6211
6212 static struct hwrm_func_cfg_input *
6213 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6214                              int ring_grps, int cp_rings, int stats, int vnics)
6215 {
6216         struct hwrm_func_cfg_input *req;
6217         u32 enables = 0;
6218
6219         if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6220                 return NULL;
6221
6222         req->fid = cpu_to_le16(0xffff);
6223         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6224         req->num_tx_rings = cpu_to_le16(tx_rings);
6225         if (BNXT_NEW_RM(bp)) {
6226                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6227                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6228                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6229                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6230                         enables |= tx_rings + ring_grps ?
6231                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6232                         enables |= rx_rings ?
6233                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6234                 } else {
6235                         enables |= cp_rings ?
6236                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6237                         enables |= ring_grps ?
6238                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6239                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6240                 }
6241                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6242
6243                 req->num_rx_rings = cpu_to_le16(rx_rings);
6244                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6245                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6246                         req->num_msix = cpu_to_le16(cp_rings);
6247                         req->num_rsscos_ctxs =
6248                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6249                 } else {
6250                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
6251                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6252                         req->num_rsscos_ctxs = cpu_to_le16(1);
6253                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6254                             bnxt_rfs_supported(bp))
6255                                 req->num_rsscos_ctxs =
6256                                         cpu_to_le16(ring_grps + 1);
6257                 }
6258                 req->num_stat_ctxs = cpu_to_le16(stats);
6259                 req->num_vnics = cpu_to_le16(vnics);
6260         }
6261         req->enables = cpu_to_le32(enables);
6262         return req;
6263 }
6264
6265 static struct hwrm_func_vf_cfg_input *
6266 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6267                              int ring_grps, int cp_rings, int stats, int vnics)
6268 {
6269         struct hwrm_func_vf_cfg_input *req;
6270         u32 enables = 0;
6271
6272         if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6273                 return NULL;
6274
6275         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6276         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6277                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6278         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6279         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6280                 enables |= tx_rings + ring_grps ?
6281                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6282         } else {
6283                 enables |= cp_rings ?
6284                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6285                 enables |= ring_grps ?
6286                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6287         }
6288         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6289         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6290
6291         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6292         req->num_tx_rings = cpu_to_le16(tx_rings);
6293         req->num_rx_rings = cpu_to_le16(rx_rings);
6294         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6295                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6296                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6297         } else {
6298                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6299                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6300                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6301         }
6302         req->num_stat_ctxs = cpu_to_le16(stats);
6303         req->num_vnics = cpu_to_le16(vnics);
6304
6305         req->enables = cpu_to_le32(enables);
6306         return req;
6307 }
6308
6309 static int
6310 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6311                            int ring_grps, int cp_rings, int stats, int vnics)
6312 {
6313         struct hwrm_func_cfg_input *req;
6314         int rc;
6315
6316         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6317                                            cp_rings, stats, vnics);
6318         if (!req)
6319                 return -ENOMEM;
6320
6321         if (!req->enables) {
6322                 hwrm_req_drop(bp, req);
6323                 return 0;
6324         }
6325
6326         rc = hwrm_req_send(bp, req);
6327         if (rc)
6328                 return rc;
6329
6330         if (bp->hwrm_spec_code < 0x10601)
6331                 bp->hw_resc.resv_tx_rings = tx_rings;
6332
6333         return bnxt_hwrm_get_rings(bp);
6334 }
6335
6336 static int
6337 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6338                            int ring_grps, int cp_rings, int stats, int vnics)
6339 {
6340         struct hwrm_func_vf_cfg_input *req;
6341         int rc;
6342
6343         if (!BNXT_NEW_RM(bp)) {
6344                 bp->hw_resc.resv_tx_rings = tx_rings;
6345                 return 0;
6346         }
6347
6348         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6349                                            cp_rings, stats, vnics);
6350         if (!req)
6351                 return -ENOMEM;
6352
6353         rc = hwrm_req_send(bp, req);
6354         if (rc)
6355                 return rc;
6356
6357         return bnxt_hwrm_get_rings(bp);
6358 }
6359
6360 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6361                                    int cp, int stat, int vnic)
6362 {
6363         if (BNXT_PF(bp))
6364                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6365                                                   vnic);
6366         else
6367                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6368                                                   vnic);
6369 }
6370
6371 int bnxt_nq_rings_in_use(struct bnxt *bp)
6372 {
6373         int cp = bp->cp_nr_rings;
6374         int ulp_msix, ulp_base;
6375
6376         ulp_msix = bnxt_get_ulp_msix_num(bp);
6377         if (ulp_msix) {
6378                 ulp_base = bnxt_get_ulp_msix_base(bp);
6379                 cp += ulp_msix;
6380                 if ((ulp_base + ulp_msix) > cp)
6381                         cp = ulp_base + ulp_msix;
6382         }
6383         return cp;
6384 }
6385
6386 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6387 {
6388         int cp;
6389
6390         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6391                 return bnxt_nq_rings_in_use(bp);
6392
6393         cp = bp->tx_nr_rings + bp->rx_nr_rings;
6394         return cp;
6395 }
6396
6397 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6398 {
6399         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6400         int cp = bp->cp_nr_rings;
6401
6402         if (!ulp_stat)
6403                 return cp;
6404
6405         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6406                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6407
6408         return cp + ulp_stat;
6409 }
6410
6411 /* Check if a default RSS map needs to be setup.  This function is only
6412  * used on older firmware that does not require reserving RX rings.
6413  */
6414 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6415 {
6416         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6417
6418         /* The RSS map is valid for RX rings set to resv_rx_rings */
6419         if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6420                 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6421                 if (!netif_is_rxfh_configured(bp->dev))
6422                         bnxt_set_dflt_rss_indir_tbl(bp);
6423         }
6424 }
6425
6426 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6427 {
6428         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6429         int cp = bnxt_cp_rings_in_use(bp);
6430         int nq = bnxt_nq_rings_in_use(bp);
6431         int rx = bp->rx_nr_rings, stat;
6432         int vnic = 1, grp = rx;
6433
6434         if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6435             bp->hwrm_spec_code >= 0x10601)
6436                 return true;
6437
6438         /* Old firmware does not need RX ring reservations but we still
6439          * need to setup a default RSS map when needed.  With new firmware
6440          * we go through RX ring reservations first and then set up the
6441          * RSS map for the successfully reserved RX rings when needed.
6442          */
6443         if (!BNXT_NEW_RM(bp)) {
6444                 bnxt_check_rss_tbl_no_rmgr(bp);
6445                 return false;
6446         }
6447         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6448                 vnic = rx + 1;
6449         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6450                 rx <<= 1;
6451         stat = bnxt_get_func_stat_ctxs(bp);
6452         if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6453             hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6454             (hw_resc->resv_hw_ring_grps != grp &&
6455              !(bp->flags & BNXT_FLAG_CHIP_P5)))
6456                 return true;
6457         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6458             hw_resc->resv_irqs != nq)
6459                 return true;
6460         return false;
6461 }
6462
6463 static int __bnxt_reserve_rings(struct bnxt *bp)
6464 {
6465         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6466         int cp = bnxt_nq_rings_in_use(bp);
6467         int tx = bp->tx_nr_rings;
6468         int rx = bp->rx_nr_rings;
6469         int grp, rx_rings, rc;
6470         int vnic = 1, stat;
6471         bool sh = false;
6472
6473         if (!bnxt_need_reserve_rings(bp))
6474                 return 0;
6475
6476         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6477                 sh = true;
6478         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6479                 vnic = rx + 1;
6480         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6481                 rx <<= 1;
6482         grp = bp->rx_nr_rings;
6483         stat = bnxt_get_func_stat_ctxs(bp);
6484
6485         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6486         if (rc)
6487                 return rc;
6488
6489         tx = hw_resc->resv_tx_rings;
6490         if (BNXT_NEW_RM(bp)) {
6491                 rx = hw_resc->resv_rx_rings;
6492                 cp = hw_resc->resv_irqs;
6493                 grp = hw_resc->resv_hw_ring_grps;
6494                 vnic = hw_resc->resv_vnics;
6495                 stat = hw_resc->resv_stat_ctxs;
6496         }
6497
6498         rx_rings = rx;
6499         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6500                 if (rx >= 2) {
6501                         rx_rings = rx >> 1;
6502                 } else {
6503                         if (netif_running(bp->dev))
6504                                 return -ENOMEM;
6505
6506                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6507                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6508                         bp->dev->hw_features &= ~NETIF_F_LRO;
6509                         bp->dev->features &= ~NETIF_F_LRO;
6510                         bnxt_set_ring_params(bp);
6511                 }
6512         }
6513         rx_rings = min_t(int, rx_rings, grp);
6514         cp = min_t(int, cp, bp->cp_nr_rings);
6515         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6516                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6517         cp = min_t(int, cp, stat);
6518         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6519         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6520                 rx = rx_rings << 1;
6521         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6522         bp->tx_nr_rings = tx;
6523
6524         /* If we cannot reserve all the RX rings, reset the RSS map only
6525          * if absolutely necessary
6526          */
6527         if (rx_rings != bp->rx_nr_rings) {
6528                 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6529                             rx_rings, bp->rx_nr_rings);
6530                 if (netif_is_rxfh_configured(bp->dev) &&
6531                     (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6532                      bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6533                      bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6534                         netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6535                         bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6536                 }
6537         }
6538         bp->rx_nr_rings = rx_rings;
6539         bp->cp_nr_rings = cp;
6540
6541         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6542                 return -ENOMEM;
6543
6544         if (!netif_is_rxfh_configured(bp->dev))
6545                 bnxt_set_dflt_rss_indir_tbl(bp);
6546
6547         return rc;
6548 }
6549
6550 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6551                                     int ring_grps, int cp_rings, int stats,
6552                                     int vnics)
6553 {
6554         struct hwrm_func_vf_cfg_input *req;
6555         u32 flags;
6556
6557         if (!BNXT_NEW_RM(bp))
6558                 return 0;
6559
6560         req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6561                                            cp_rings, stats, vnics);
6562         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6563                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6564                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6565                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6566                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6567                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6568         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6569                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6570
6571         req->flags = cpu_to_le32(flags);
6572         return hwrm_req_send_silent(bp, req);
6573 }
6574
6575 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6576                                     int ring_grps, int cp_rings, int stats,
6577                                     int vnics)
6578 {
6579         struct hwrm_func_cfg_input *req;
6580         u32 flags;
6581
6582         req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6583                                            cp_rings, stats, vnics);
6584         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6585         if (BNXT_NEW_RM(bp)) {
6586                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6587                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6588                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6589                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6590                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6591                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6592                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6593                 else
6594                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6595         }
6596
6597         req->flags = cpu_to_le32(flags);
6598         return hwrm_req_send_silent(bp, req);
6599 }
6600
6601 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6602                                  int ring_grps, int cp_rings, int stats,
6603                                  int vnics)
6604 {
6605         if (bp->hwrm_spec_code < 0x10801)
6606                 return 0;
6607
6608         if (BNXT_PF(bp))
6609                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6610                                                 ring_grps, cp_rings, stats,
6611                                                 vnics);
6612
6613         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6614                                         cp_rings, stats, vnics);
6615 }
6616
6617 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6618 {
6619         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6620         struct hwrm_ring_aggint_qcaps_output *resp;
6621         struct hwrm_ring_aggint_qcaps_input *req;
6622         int rc;
6623
6624         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6625         coal_cap->num_cmpl_dma_aggr_max = 63;
6626         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6627         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6628         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6629         coal_cap->int_lat_tmr_min_max = 65535;
6630         coal_cap->int_lat_tmr_max_max = 65535;
6631         coal_cap->num_cmpl_aggr_int_max = 65535;
6632         coal_cap->timer_units = 80;
6633
6634         if (bp->hwrm_spec_code < 0x10902)
6635                 return;
6636
6637         if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6638                 return;
6639
6640         resp = hwrm_req_hold(bp, req);
6641         rc = hwrm_req_send_silent(bp, req);
6642         if (!rc) {
6643                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6644                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6645                 coal_cap->num_cmpl_dma_aggr_max =
6646                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6647                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6648                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6649                 coal_cap->cmpl_aggr_dma_tmr_max =
6650                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6651                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6652                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6653                 coal_cap->int_lat_tmr_min_max =
6654                         le16_to_cpu(resp->int_lat_tmr_min_max);
6655                 coal_cap->int_lat_tmr_max_max =
6656                         le16_to_cpu(resp->int_lat_tmr_max_max);
6657                 coal_cap->num_cmpl_aggr_int_max =
6658                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6659                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6660         }
6661         hwrm_req_drop(bp, req);
6662 }
6663
6664 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6665 {
6666         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6667
6668         return usec * 1000 / coal_cap->timer_units;
6669 }
6670
6671 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6672         struct bnxt_coal *hw_coal,
6673         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6674 {
6675         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6676         u16 val, tmr, max, flags = hw_coal->flags;
6677         u32 cmpl_params = coal_cap->cmpl_params;
6678
6679         max = hw_coal->bufs_per_record * 128;
6680         if (hw_coal->budget)
6681                 max = hw_coal->bufs_per_record * hw_coal->budget;
6682         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6683
6684         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6685         req->num_cmpl_aggr_int = cpu_to_le16(val);
6686
6687         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6688         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6689
6690         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6691                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6692         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6693
6694         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6695         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6696         req->int_lat_tmr_max = cpu_to_le16(tmr);
6697
6698         /* min timer set to 1/2 of interrupt timer */
6699         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6700                 val = tmr / 2;
6701                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6702                 req->int_lat_tmr_min = cpu_to_le16(val);
6703                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6704         }
6705
6706         /* buf timer set to 1/4 of interrupt timer */
6707         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6708         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6709
6710         if (cmpl_params &
6711             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6712                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6713                 val = clamp_t(u16, tmr, 1,
6714                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6715                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6716                 req->enables |=
6717                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6718         }
6719
6720         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6721             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6722                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6723         req->flags = cpu_to_le16(flags);
6724         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6725 }
6726
6727 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6728                                    struct bnxt_coal *hw_coal)
6729 {
6730         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
6731         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6732         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6733         u32 nq_params = coal_cap->nq_params;
6734         u16 tmr;
6735         int rc;
6736
6737         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6738                 return 0;
6739
6740         rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6741         if (rc)
6742                 return rc;
6743
6744         req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6745         req->flags =
6746                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6747
6748         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6749         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6750         req->int_lat_tmr_min = cpu_to_le16(tmr);
6751         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6752         return hwrm_req_send(bp, req);
6753 }
6754
6755 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6756 {
6757         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6758         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6759         struct bnxt_coal coal;
6760         int rc;
6761
6762         /* Tick values in micro seconds.
6763          * 1 coal_buf x bufs_per_record = 1 completion record.
6764          */
6765         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6766
6767         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6768         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6769
6770         if (!bnapi->rx_ring)
6771                 return -ENODEV;
6772
6773         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6774         if (rc)
6775                 return rc;
6776
6777         bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6778
6779         req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6780
6781         return hwrm_req_send(bp, req_rx);
6782 }
6783
6784 int bnxt_hwrm_set_coal(struct bnxt *bp)
6785 {
6786         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6787                                                            *req;
6788         int i, rc;
6789
6790         rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6791         if (rc)
6792                 return rc;
6793
6794         rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6795         if (rc) {
6796                 hwrm_req_drop(bp, req_rx);
6797                 return rc;
6798         }
6799
6800         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6801         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6802
6803         hwrm_req_hold(bp, req_rx);
6804         hwrm_req_hold(bp, req_tx);
6805         for (i = 0; i < bp->cp_nr_rings; i++) {
6806                 struct bnxt_napi *bnapi = bp->bnapi[i];
6807                 struct bnxt_coal *hw_coal;
6808                 u16 ring_id;
6809
6810                 req = req_rx;
6811                 if (!bnapi->rx_ring) {
6812                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6813                         req = req_tx;
6814                 } else {
6815                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6816                 }
6817                 req->ring_id = cpu_to_le16(ring_id);
6818
6819                 rc = hwrm_req_send(bp, req);
6820                 if (rc)
6821                         break;
6822
6823                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6824                         continue;
6825
6826                 if (bnapi->rx_ring && bnapi->tx_ring) {
6827                         req = req_tx;
6828                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6829                         req->ring_id = cpu_to_le16(ring_id);
6830                         rc = hwrm_req_send(bp, req);
6831                         if (rc)
6832                                 break;
6833                 }
6834                 if (bnapi->rx_ring)
6835                         hw_coal = &bp->rx_coal;
6836                 else
6837                         hw_coal = &bp->tx_coal;
6838                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6839         }
6840         hwrm_req_drop(bp, req_rx);
6841         hwrm_req_drop(bp, req_tx);
6842         return rc;
6843 }
6844
6845 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6846 {
6847         struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6848         struct hwrm_stat_ctx_free_input *req;
6849         int i;
6850
6851         if (!bp->bnapi)
6852                 return;
6853
6854         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6855                 return;
6856
6857         if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6858                 return;
6859         if (BNXT_FW_MAJ(bp) <= 20) {
6860                 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6861                         hwrm_req_drop(bp, req);
6862                         return;
6863                 }
6864                 hwrm_req_hold(bp, req0);
6865         }
6866         hwrm_req_hold(bp, req);
6867         for (i = 0; i < bp->cp_nr_rings; i++) {
6868                 struct bnxt_napi *bnapi = bp->bnapi[i];
6869                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6870
6871                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6872                         req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6873                         if (req0) {
6874                                 req0->stat_ctx_id = req->stat_ctx_id;
6875                                 hwrm_req_send(bp, req0);
6876                         }
6877                         hwrm_req_send(bp, req);
6878
6879                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6880                 }
6881         }
6882         hwrm_req_drop(bp, req);
6883         if (req0)
6884                 hwrm_req_drop(bp, req0);
6885 }
6886
6887 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6888 {
6889         struct hwrm_stat_ctx_alloc_output *resp;
6890         struct hwrm_stat_ctx_alloc_input *req;
6891         int rc, i;
6892
6893         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6894                 return 0;
6895
6896         rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6897         if (rc)
6898                 return rc;
6899
6900         req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6901         req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6902
6903         resp = hwrm_req_hold(bp, req);
6904         for (i = 0; i < bp->cp_nr_rings; i++) {
6905                 struct bnxt_napi *bnapi = bp->bnapi[i];
6906                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6907
6908                 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6909
6910                 rc = hwrm_req_send(bp, req);
6911                 if (rc)
6912                         break;
6913
6914                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6915
6916                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6917         }
6918         hwrm_req_drop(bp, req);
6919         return rc;
6920 }
6921
6922 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6923 {
6924         struct hwrm_func_qcfg_output *resp;
6925         struct hwrm_func_qcfg_input *req;
6926         u32 min_db_offset = 0;
6927         u16 flags;
6928         int rc;
6929
6930         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6931         if (rc)
6932                 return rc;
6933
6934         req->fid = cpu_to_le16(0xffff);
6935         resp = hwrm_req_hold(bp, req);
6936         rc = hwrm_req_send(bp, req);
6937         if (rc)
6938                 goto func_qcfg_exit;
6939
6940 #ifdef CONFIG_BNXT_SRIOV
6941         if (BNXT_VF(bp)) {
6942                 struct bnxt_vf_info *vf = &bp->vf;
6943
6944                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6945         } else {
6946                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6947         }
6948 #endif
6949         flags = le16_to_cpu(resp->flags);
6950         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6951                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6952                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6953                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6954                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6955         }
6956         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6957                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6958
6959         if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6960                 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6961
6962         switch (resp->port_partition_type) {
6963         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6964         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6965         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6966                 bp->port_partition_type = resp->port_partition_type;
6967                 break;
6968         }
6969         if (bp->hwrm_spec_code < 0x10707 ||
6970             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6971                 bp->br_mode = BRIDGE_MODE_VEB;
6972         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6973                 bp->br_mode = BRIDGE_MODE_VEPA;
6974         else
6975                 bp->br_mode = BRIDGE_MODE_UNDEF;
6976
6977         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6978         if (!bp->max_mtu)
6979                 bp->max_mtu = BNXT_MAX_MTU;
6980
6981         if (bp->db_size)
6982                 goto func_qcfg_exit;
6983
6984         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6985                 if (BNXT_PF(bp))
6986                         min_db_offset = DB_PF_OFFSET_P5;
6987                 else
6988                         min_db_offset = DB_VF_OFFSET_P5;
6989         }
6990         bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6991                                  1024);
6992         if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6993             bp->db_size <= min_db_offset)
6994                 bp->db_size = pci_resource_len(bp->pdev, 2);
6995
6996 func_qcfg_exit:
6997         hwrm_req_drop(bp, req);
6998         return rc;
6999 }
7000
7001 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
7002                         struct hwrm_func_backing_store_qcaps_output *resp)
7003 {
7004         struct bnxt_mem_init *mem_init;
7005         u16 init_mask;
7006         u8 init_val;
7007         u8 *offset;
7008         int i;
7009
7010         init_val = resp->ctx_kind_initializer;
7011         init_mask = le16_to_cpu(resp->ctx_init_mask);
7012         offset = &resp->qp_init_offset;
7013         mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7014         for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
7015                 mem_init->init_val = init_val;
7016                 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
7017                 if (!init_mask)
7018                         continue;
7019                 if (i == BNXT_CTX_MEM_INIT_STAT)
7020                         offset = &resp->stat_init_offset;
7021                 if (init_mask & (1 << i))
7022                         mem_init->offset = *offset * 4;
7023                 else
7024                         mem_init->init_val = 0;
7025         }
7026         ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
7027         ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
7028         ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
7029         ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
7030         ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
7031         ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
7032 }
7033
7034 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
7035 {
7036         struct hwrm_func_backing_store_qcaps_output *resp;
7037         struct hwrm_func_backing_store_qcaps_input *req;
7038         int rc;
7039
7040         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
7041                 return 0;
7042
7043         rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
7044         if (rc)
7045                 return rc;
7046
7047         resp = hwrm_req_hold(bp, req);
7048         rc = hwrm_req_send_silent(bp, req);
7049         if (!rc) {
7050                 struct bnxt_ctx_pg_info *ctx_pg;
7051                 struct bnxt_ctx_mem_info *ctx;
7052                 int i, tqm_rings;
7053
7054                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
7055                 if (!ctx) {
7056                         rc = -ENOMEM;
7057                         goto ctx_err;
7058                 }
7059                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
7060                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
7061                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
7062                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
7063                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
7064                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
7065                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
7066                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
7067                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
7068                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
7069                 ctx->vnic_max_vnic_entries =
7070                         le16_to_cpu(resp->vnic_max_vnic_entries);
7071                 ctx->vnic_max_ring_table_entries =
7072                         le16_to_cpu(resp->vnic_max_ring_table_entries);
7073                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
7074                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
7075                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
7076                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
7077                 ctx->tqm_min_entries_per_ring =
7078                         le32_to_cpu(resp->tqm_min_entries_per_ring);
7079                 ctx->tqm_max_entries_per_ring =
7080                         le32_to_cpu(resp->tqm_max_entries_per_ring);
7081                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
7082                 if (!ctx->tqm_entries_multiple)
7083                         ctx->tqm_entries_multiple = 1;
7084                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
7085                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
7086                 ctx->mrav_num_entries_units =
7087                         le16_to_cpu(resp->mrav_num_entries_units);
7088                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
7089                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
7090
7091                 bnxt_init_ctx_initializer(ctx, resp);
7092
7093                 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
7094                 if (!ctx->tqm_fp_rings_count)
7095                         ctx->tqm_fp_rings_count = bp->max_q;
7096                 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
7097                         ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
7098
7099                 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
7100                 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
7101                 if (!ctx_pg) {
7102                         kfree(ctx);
7103                         rc = -ENOMEM;
7104                         goto ctx_err;
7105                 }
7106                 for (i = 0; i < tqm_rings; i++, ctx_pg++)
7107                         ctx->tqm_mem[i] = ctx_pg;
7108                 bp->ctx = ctx;
7109         } else {
7110                 rc = 0;
7111         }
7112 ctx_err:
7113         hwrm_req_drop(bp, req);
7114         return rc;
7115 }
7116
7117 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
7118                                   __le64 *pg_dir)
7119 {
7120         if (!rmem->nr_pages)
7121                 return;
7122
7123         BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
7124         if (rmem->depth >= 1) {
7125                 if (rmem->depth == 2)
7126                         *pg_attr |= 2;
7127                 else
7128                         *pg_attr |= 1;
7129                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
7130         } else {
7131                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
7132         }
7133 }
7134
7135 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
7136         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
7137          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
7138          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
7139          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
7140          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7141
7142 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7143 {
7144         struct hwrm_func_backing_store_cfg_input *req;
7145         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7146         struct bnxt_ctx_pg_info *ctx_pg;
7147         void **__req = (void **)&req;
7148         u32 req_len = sizeof(*req);
7149         __le32 *num_entries;
7150         __le64 *pg_dir;
7151         u32 flags = 0;
7152         u8 *pg_attr;
7153         u32 ena;
7154         int rc;
7155         int i;
7156
7157         if (!ctx)
7158                 return 0;
7159
7160         if (req_len > bp->hwrm_max_ext_req_len)
7161                 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7162         rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7163         if (rc)
7164                 return rc;
7165
7166         req->enables = cpu_to_le32(enables);
7167         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7168                 ctx_pg = &ctx->qp_mem;
7169                 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
7170                 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7171                 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7172                 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7173                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7174                                       &req->qpc_pg_size_qpc_lvl,
7175                                       &req->qpc_page_dir);
7176         }
7177         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7178                 ctx_pg = &ctx->srq_mem;
7179                 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7180                 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7181                 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7182                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7183                                       &req->srq_pg_size_srq_lvl,
7184                                       &req->srq_page_dir);
7185         }
7186         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7187                 ctx_pg = &ctx->cq_mem;
7188                 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7189                 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7190                 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7191                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7192                                       &req->cq_pg_size_cq_lvl,
7193                                       &req->cq_page_dir);
7194         }
7195         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7196                 ctx_pg = &ctx->vnic_mem;
7197                 req->vnic_num_vnic_entries =
7198                         cpu_to_le16(ctx->vnic_max_vnic_entries);
7199                 req->vnic_num_ring_table_entries =
7200                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
7201                 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7202                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7203                                       &req->vnic_pg_size_vnic_lvl,
7204                                       &req->vnic_page_dir);
7205         }
7206         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7207                 ctx_pg = &ctx->stat_mem;
7208                 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7209                 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7210                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7211                                       &req->stat_pg_size_stat_lvl,
7212                                       &req->stat_page_dir);
7213         }
7214         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7215                 ctx_pg = &ctx->mrav_mem;
7216                 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7217                 if (ctx->mrav_num_entries_units)
7218                         flags |=
7219                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7220                 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7221                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7222                                       &req->mrav_pg_size_mrav_lvl,
7223                                       &req->mrav_page_dir);
7224         }
7225         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7226                 ctx_pg = &ctx->tim_mem;
7227                 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7228                 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7229                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7230                                       &req->tim_pg_size_tim_lvl,
7231                                       &req->tim_page_dir);
7232         }
7233         for (i = 0, num_entries = &req->tqm_sp_num_entries,
7234              pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7235              pg_dir = &req->tqm_sp_page_dir,
7236              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7237              i < BNXT_MAX_TQM_RINGS;
7238              i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7239                 if (!(enables & ena))
7240                         continue;
7241
7242                 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7243                 ctx_pg = ctx->tqm_mem[i];
7244                 *num_entries = cpu_to_le32(ctx_pg->entries);
7245                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7246         }
7247         req->flags = cpu_to_le32(flags);
7248         return hwrm_req_send(bp, req);
7249 }
7250
7251 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7252                                   struct bnxt_ctx_pg_info *ctx_pg)
7253 {
7254         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7255
7256         rmem->page_size = BNXT_PAGE_SIZE;
7257         rmem->pg_arr = ctx_pg->ctx_pg_arr;
7258         rmem->dma_arr = ctx_pg->ctx_dma_arr;
7259         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7260         if (rmem->depth >= 1)
7261                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7262         return bnxt_alloc_ring(bp, rmem);
7263 }
7264
7265 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7266                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7267                                   u8 depth, struct bnxt_mem_init *mem_init)
7268 {
7269         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7270         int rc;
7271
7272         if (!mem_size)
7273                 return -EINVAL;
7274
7275         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7276         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7277                 ctx_pg->nr_pages = 0;
7278                 return -EINVAL;
7279         }
7280         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7281                 int nr_tbls, i;
7282
7283                 rmem->depth = 2;
7284                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7285                                              GFP_KERNEL);
7286                 if (!ctx_pg->ctx_pg_tbl)
7287                         return -ENOMEM;
7288                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7289                 rmem->nr_pages = nr_tbls;
7290                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7291                 if (rc)
7292                         return rc;
7293                 for (i = 0; i < nr_tbls; i++) {
7294                         struct bnxt_ctx_pg_info *pg_tbl;
7295
7296                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7297                         if (!pg_tbl)
7298                                 return -ENOMEM;
7299                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7300                         rmem = &pg_tbl->ring_mem;
7301                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7302                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7303                         rmem->depth = 1;
7304                         rmem->nr_pages = MAX_CTX_PAGES;
7305                         rmem->mem_init = mem_init;
7306                         if (i == (nr_tbls - 1)) {
7307                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7308
7309                                 if (rem)
7310                                         rmem->nr_pages = rem;
7311                         }
7312                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7313                         if (rc)
7314                                 break;
7315                 }
7316         } else {
7317                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7318                 if (rmem->nr_pages > 1 || depth)
7319                         rmem->depth = 1;
7320                 rmem->mem_init = mem_init;
7321                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7322         }
7323         return rc;
7324 }
7325
7326 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7327                                   struct bnxt_ctx_pg_info *ctx_pg)
7328 {
7329         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7330
7331         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7332             ctx_pg->ctx_pg_tbl) {
7333                 int i, nr_tbls = rmem->nr_pages;
7334
7335                 for (i = 0; i < nr_tbls; i++) {
7336                         struct bnxt_ctx_pg_info *pg_tbl;
7337                         struct bnxt_ring_mem_info *rmem2;
7338
7339                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
7340                         if (!pg_tbl)
7341                                 continue;
7342                         rmem2 = &pg_tbl->ring_mem;
7343                         bnxt_free_ring(bp, rmem2);
7344                         ctx_pg->ctx_pg_arr[i] = NULL;
7345                         kfree(pg_tbl);
7346                         ctx_pg->ctx_pg_tbl[i] = NULL;
7347                 }
7348                 kfree(ctx_pg->ctx_pg_tbl);
7349                 ctx_pg->ctx_pg_tbl = NULL;
7350         }
7351         bnxt_free_ring(bp, rmem);
7352         ctx_pg->nr_pages = 0;
7353 }
7354
7355 void bnxt_free_ctx_mem(struct bnxt *bp)
7356 {
7357         struct bnxt_ctx_mem_info *ctx = bp->ctx;
7358         int i;
7359
7360         if (!ctx)
7361                 return;
7362
7363         if (ctx->tqm_mem[0]) {
7364                 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7365                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7366                 kfree(ctx->tqm_mem[0]);
7367                 ctx->tqm_mem[0] = NULL;
7368         }
7369
7370         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7371         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7372         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7373         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7374         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7375         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7376         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7377         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7378 }
7379
7380 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7381 {
7382         struct bnxt_ctx_pg_info *ctx_pg;
7383         struct bnxt_ctx_mem_info *ctx;
7384         struct bnxt_mem_init *init;
7385         u32 mem_size, ena, entries;
7386         u32 entries_sp, min;
7387         u32 num_mr, num_ah;
7388         u32 extra_srqs = 0;
7389         u32 extra_qps = 0;
7390         u8 pg_lvl = 1;
7391         int i, rc;
7392
7393         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7394         if (rc) {
7395                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7396                            rc);
7397                 return rc;
7398         }
7399         ctx = bp->ctx;
7400         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7401                 return 0;
7402
7403         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7404                 pg_lvl = 2;
7405                 extra_qps = 65536;
7406                 extra_srqs = 8192;
7407         }
7408
7409         ctx_pg = &ctx->qp_mem;
7410         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7411                           extra_qps;
7412         if (ctx->qp_entry_size) {
7413                 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7414                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7415                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7416                 if (rc)
7417                         return rc;
7418         }
7419
7420         ctx_pg = &ctx->srq_mem;
7421         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7422         if (ctx->srq_entry_size) {
7423                 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7424                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7425                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7426                 if (rc)
7427                         return rc;
7428         }
7429
7430         ctx_pg = &ctx->cq_mem;
7431         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7432         if (ctx->cq_entry_size) {
7433                 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7434                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7435                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7436                 if (rc)
7437                         return rc;
7438         }
7439
7440         ctx_pg = &ctx->vnic_mem;
7441         ctx_pg->entries = ctx->vnic_max_vnic_entries +
7442                           ctx->vnic_max_ring_table_entries;
7443         if (ctx->vnic_entry_size) {
7444                 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7445                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7446                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7447                 if (rc)
7448                         return rc;
7449         }
7450
7451         ctx_pg = &ctx->stat_mem;
7452         ctx_pg->entries = ctx->stat_max_entries;
7453         if (ctx->stat_entry_size) {
7454                 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7455                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7456                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7457                 if (rc)
7458                         return rc;
7459         }
7460
7461         ena = 0;
7462         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7463                 goto skip_rdma;
7464
7465         ctx_pg = &ctx->mrav_mem;
7466         /* 128K extra is needed to accommodate static AH context
7467          * allocation by f/w.
7468          */
7469         num_mr = 1024 * 256;
7470         num_ah = 1024 * 128;
7471         ctx_pg->entries = num_mr + num_ah;
7472         if (ctx->mrav_entry_size) {
7473                 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7474                 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7475                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7476                 if (rc)
7477                         return rc;
7478         }
7479         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7480         if (ctx->mrav_num_entries_units)
7481                 ctx_pg->entries =
7482                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
7483                          (num_ah / ctx->mrav_num_entries_units);
7484
7485         ctx_pg = &ctx->tim_mem;
7486         ctx_pg->entries = ctx->qp_mem.entries;
7487         if (ctx->tim_entry_size) {
7488                 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7489                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7490                 if (rc)
7491                         return rc;
7492         }
7493         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7494
7495 skip_rdma:
7496         min = ctx->tqm_min_entries_per_ring;
7497         entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7498                      2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7499         entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7500         entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7501         entries = roundup(entries, ctx->tqm_entries_multiple);
7502         entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7503         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7504                 ctx_pg = ctx->tqm_mem[i];
7505                 ctx_pg->entries = i ? entries : entries_sp;
7506                 if (ctx->tqm_entry_size) {
7507                         mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7508                         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7509                                                     NULL);
7510                         if (rc)
7511                                 return rc;
7512                 }
7513                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7514         }
7515         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7516         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7517         if (rc) {
7518                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7519                            rc);
7520                 return rc;
7521         }
7522         ctx->flags |= BNXT_CTX_FLAG_INITED;
7523         return 0;
7524 }
7525
7526 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7527 {
7528         struct hwrm_func_resource_qcaps_output *resp;
7529         struct hwrm_func_resource_qcaps_input *req;
7530         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7531         int rc;
7532
7533         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7534         if (rc)
7535                 return rc;
7536
7537         req->fid = cpu_to_le16(0xffff);
7538         resp = hwrm_req_hold(bp, req);
7539         rc = hwrm_req_send_silent(bp, req);
7540         if (rc)
7541                 goto hwrm_func_resc_qcaps_exit;
7542
7543         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7544         if (!all)
7545                 goto hwrm_func_resc_qcaps_exit;
7546
7547         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7548         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7549         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7550         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7551         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7552         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7553         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7554         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7555         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7556         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7557         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7558         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7559         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7560         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7561         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7562         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7563
7564         if (bp->flags & BNXT_FLAG_CHIP_P5) {
7565                 u16 max_msix = le16_to_cpu(resp->max_msix);
7566
7567                 hw_resc->max_nqs = max_msix;
7568                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7569         }
7570
7571         if (BNXT_PF(bp)) {
7572                 struct bnxt_pf_info *pf = &bp->pf;
7573
7574                 pf->vf_resv_strategy =
7575                         le16_to_cpu(resp->vf_reservation_strategy);
7576                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7577                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7578         }
7579 hwrm_func_resc_qcaps_exit:
7580         hwrm_req_drop(bp, req);
7581         return rc;
7582 }
7583
7584 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7585 {
7586         struct hwrm_port_mac_ptp_qcfg_output *resp;
7587         struct hwrm_port_mac_ptp_qcfg_input *req;
7588         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7589         bool phc_cfg;
7590         u8 flags;
7591         int rc;
7592
7593         if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
7594                 rc = -ENODEV;
7595                 goto no_ptp;
7596         }
7597
7598         rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7599         if (rc)
7600                 goto no_ptp;
7601
7602         req->port_id = cpu_to_le16(bp->pf.port_id);
7603         resp = hwrm_req_hold(bp, req);
7604         rc = hwrm_req_send(bp, req);
7605         if (rc)
7606                 goto exit;
7607
7608         flags = resp->flags;
7609         if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7610                 rc = -ENODEV;
7611                 goto exit;
7612         }
7613         if (!ptp) {
7614                 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7615                 if (!ptp) {
7616                         rc = -ENOMEM;
7617                         goto exit;
7618                 }
7619                 ptp->bp = bp;
7620                 bp->ptp_cfg = ptp;
7621         }
7622         if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7623                 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7624                 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7625         } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7626                 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7627                 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7628         } else {
7629                 rc = -ENODEV;
7630                 goto exit;
7631         }
7632         phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
7633         rc = bnxt_ptp_init(bp, phc_cfg);
7634         if (rc)
7635                 netdev_warn(bp->dev, "PTP initialization failed.\n");
7636 exit:
7637         hwrm_req_drop(bp, req);
7638         if (!rc)
7639                 return 0;
7640
7641 no_ptp:
7642         bnxt_ptp_clear(bp);
7643         kfree(ptp);
7644         bp->ptp_cfg = NULL;
7645         return rc;
7646 }
7647
7648 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7649 {
7650         struct hwrm_func_qcaps_output *resp;
7651         struct hwrm_func_qcaps_input *req;
7652         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7653         u32 flags, flags_ext, flags_ext2;
7654         int rc;
7655
7656         rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7657         if (rc)
7658                 return rc;
7659
7660         req->fid = cpu_to_le16(0xffff);
7661         resp = hwrm_req_hold(bp, req);
7662         rc = hwrm_req_send(bp, req);
7663         if (rc)
7664                 goto hwrm_func_qcaps_exit;
7665
7666         flags = le32_to_cpu(resp->flags);
7667         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7668                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7669         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7670                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7671         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7672                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7673         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7674                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7675         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7676                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7677         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7678                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7679         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7680                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7681         if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7682                 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7683         if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
7684                 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
7685
7686         flags_ext = le32_to_cpu(resp->flags_ext);
7687         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7688                 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7689         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7690                 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7691         if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
7692                 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
7693         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
7694                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
7695         if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
7696                 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
7697
7698         flags_ext2 = le32_to_cpu(resp->flags_ext2);
7699         if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
7700                 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
7701
7702         bp->tx_push_thresh = 0;
7703         if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7704             BNXT_FW_MAJ(bp) > 217)
7705                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7706
7707         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7708         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7709         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7710         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7711         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7712         if (!hw_resc->max_hw_ring_grps)
7713                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7714         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7715         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7716         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7717
7718         if (BNXT_PF(bp)) {
7719                 struct bnxt_pf_info *pf = &bp->pf;
7720
7721                 pf->fw_fid = le16_to_cpu(resp->fid);
7722                 pf->port_id = le16_to_cpu(resp->port_id);
7723                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7724                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7725                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7726                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7727                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7728                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7729                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7730                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7731                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7732                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7733                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7734                         bp->flags |= BNXT_FLAG_WOL_CAP;
7735                 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7736                         bp->fw_cap |= BNXT_FW_CAP_PTP;
7737                 } else {
7738                         bnxt_ptp_clear(bp);
7739                         kfree(bp->ptp_cfg);
7740                         bp->ptp_cfg = NULL;
7741                 }
7742         } else {
7743 #ifdef CONFIG_BNXT_SRIOV
7744                 struct bnxt_vf_info *vf = &bp->vf;
7745
7746                 vf->fw_fid = le16_to_cpu(resp->fid);
7747                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7748 #endif
7749         }
7750
7751 hwrm_func_qcaps_exit:
7752         hwrm_req_drop(bp, req);
7753         return rc;
7754 }
7755
7756 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
7757 {
7758         struct hwrm_dbg_qcaps_output *resp;
7759         struct hwrm_dbg_qcaps_input *req;
7760         int rc;
7761
7762         bp->fw_dbg_cap = 0;
7763         if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
7764                 return;
7765
7766         rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
7767         if (rc)
7768                 return;
7769
7770         req->fid = cpu_to_le16(0xffff);
7771         resp = hwrm_req_hold(bp, req);
7772         rc = hwrm_req_send(bp, req);
7773         if (rc)
7774                 goto hwrm_dbg_qcaps_exit;
7775
7776         bp->fw_dbg_cap = le32_to_cpu(resp->flags);
7777
7778 hwrm_dbg_qcaps_exit:
7779         hwrm_req_drop(bp, req);
7780 }
7781
7782 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7783
7784 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7785 {
7786         int rc;
7787
7788         rc = __bnxt_hwrm_func_qcaps(bp);
7789         if (rc)
7790                 return rc;
7791
7792         bnxt_hwrm_dbg_qcaps(bp);
7793
7794         rc = bnxt_hwrm_queue_qportcfg(bp);
7795         if (rc) {
7796                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7797                 return rc;
7798         }
7799         if (bp->hwrm_spec_code >= 0x10803) {
7800                 rc = bnxt_alloc_ctx_mem(bp);
7801                 if (rc)
7802                         return rc;
7803                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7804                 if (!rc)
7805                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7806         }
7807         return 0;
7808 }
7809
7810 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7811 {
7812         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7813         struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
7814         u32 flags;
7815         int rc;
7816
7817         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7818                 return 0;
7819
7820         rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7821         if (rc)
7822                 return rc;
7823
7824         resp = hwrm_req_hold(bp, req);
7825         rc = hwrm_req_send(bp, req);
7826         if (rc)
7827                 goto hwrm_cfa_adv_qcaps_exit;
7828
7829         flags = le32_to_cpu(resp->flags);
7830         if (flags &
7831             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7832                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7833
7834 hwrm_cfa_adv_qcaps_exit:
7835         hwrm_req_drop(bp, req);
7836         return rc;
7837 }
7838
7839 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7840 {
7841         if (bp->fw_health)
7842                 return 0;
7843
7844         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7845         if (!bp->fw_health)
7846                 return -ENOMEM;
7847
7848         mutex_init(&bp->fw_health->lock);
7849         return 0;
7850 }
7851
7852 static int bnxt_alloc_fw_health(struct bnxt *bp)
7853 {
7854         int rc;
7855
7856         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7857             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7858                 return 0;
7859
7860         rc = __bnxt_alloc_fw_health(bp);
7861         if (rc) {
7862                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7863                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7864                 return rc;
7865         }
7866
7867         return 0;
7868 }
7869
7870 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7871 {
7872         writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7873                                          BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7874                                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7875 }
7876
7877 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7878 {
7879         struct bnxt_fw_health *fw_health = bp->fw_health;
7880         u32 reg_type;
7881
7882         if (!fw_health)
7883                 return;
7884
7885         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7886         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7887                 fw_health->status_reliable = false;
7888
7889         reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
7890         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7891                 fw_health->resets_reliable = false;
7892 }
7893
7894 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7895 {
7896         void __iomem *hs;
7897         u32 status_loc;
7898         u32 reg_type;
7899         u32 sig;
7900
7901         if (bp->fw_health)
7902                 bp->fw_health->status_reliable = false;
7903
7904         __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7905         hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7906
7907         sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7908         if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7909                 if (!bp->chip_num) {
7910                         __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7911                         bp->chip_num = readl(bp->bar0 +
7912                                              BNXT_FW_HEALTH_WIN_BASE +
7913                                              BNXT_GRC_REG_CHIP_NUM);
7914                 }
7915                 if (!BNXT_CHIP_P5(bp))
7916                         return;
7917
7918                 status_loc = BNXT_GRC_REG_STATUS_P5 |
7919                              BNXT_FW_HEALTH_REG_TYPE_BAR0;
7920         } else {
7921                 status_loc = readl(hs + offsetof(struct hcomm_status,
7922                                                  fw_status_loc));
7923         }
7924
7925         if (__bnxt_alloc_fw_health(bp)) {
7926                 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7927                 return;
7928         }
7929
7930         bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7931         reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7932         if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7933                 __bnxt_map_fw_health_reg(bp, status_loc);
7934                 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7935                         BNXT_FW_HEALTH_WIN_OFF(status_loc);
7936         }
7937
7938         bp->fw_health->status_reliable = true;
7939 }
7940
7941 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7942 {
7943         struct bnxt_fw_health *fw_health = bp->fw_health;
7944         u32 reg_base = 0xffffffff;
7945         int i;
7946
7947         bp->fw_health->status_reliable = false;
7948         bp->fw_health->resets_reliable = false;
7949         /* Only pre-map the monitoring GRC registers using window 3 */
7950         for (i = 0; i < 4; i++) {
7951                 u32 reg = fw_health->regs[i];
7952
7953                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7954                         continue;
7955                 if (reg_base == 0xffffffff)
7956                         reg_base = reg & BNXT_GRC_BASE_MASK;
7957                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7958                         return -ERANGE;
7959                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7960         }
7961         bp->fw_health->status_reliable = true;
7962         bp->fw_health->resets_reliable = true;
7963         if (reg_base == 0xffffffff)
7964                 return 0;
7965
7966         __bnxt_map_fw_health_reg(bp, reg_base);
7967         return 0;
7968 }
7969
7970 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
7971 {
7972         if (!bp->fw_health)
7973                 return;
7974
7975         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
7976                 bp->fw_health->status_reliable = true;
7977                 bp->fw_health->resets_reliable = true;
7978         } else {
7979                 bnxt_try_map_fw_health_reg(bp);
7980         }
7981 }
7982
7983 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7984 {
7985         struct bnxt_fw_health *fw_health = bp->fw_health;
7986         struct hwrm_error_recovery_qcfg_output *resp;
7987         struct hwrm_error_recovery_qcfg_input *req;
7988         int rc, i;
7989
7990         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7991                 return 0;
7992
7993         rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7994         if (rc)
7995                 return rc;
7996
7997         resp = hwrm_req_hold(bp, req);
7998         rc = hwrm_req_send(bp, req);
7999         if (rc)
8000                 goto err_recovery_out;
8001         fw_health->flags = le32_to_cpu(resp->flags);
8002         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
8003             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
8004                 rc = -EINVAL;
8005                 goto err_recovery_out;
8006         }
8007         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
8008         fw_health->master_func_wait_dsecs =
8009                 le32_to_cpu(resp->master_func_wait_period);
8010         fw_health->normal_func_wait_dsecs =
8011                 le32_to_cpu(resp->normal_func_wait_period);
8012         fw_health->post_reset_wait_dsecs =
8013                 le32_to_cpu(resp->master_func_wait_period_after_reset);
8014         fw_health->post_reset_max_wait_dsecs =
8015                 le32_to_cpu(resp->max_bailout_time_after_reset);
8016         fw_health->regs[BNXT_FW_HEALTH_REG] =
8017                 le32_to_cpu(resp->fw_health_status_reg);
8018         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
8019                 le32_to_cpu(resp->fw_heartbeat_reg);
8020         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
8021                 le32_to_cpu(resp->fw_reset_cnt_reg);
8022         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
8023                 le32_to_cpu(resp->reset_inprogress_reg);
8024         fw_health->fw_reset_inprog_reg_mask =
8025                 le32_to_cpu(resp->reset_inprogress_reg_mask);
8026         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
8027         if (fw_health->fw_reset_seq_cnt >= 16) {
8028                 rc = -EINVAL;
8029                 goto err_recovery_out;
8030         }
8031         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
8032                 fw_health->fw_reset_seq_regs[i] =
8033                         le32_to_cpu(resp->reset_reg[i]);
8034                 fw_health->fw_reset_seq_vals[i] =
8035                         le32_to_cpu(resp->reset_reg_val[i]);
8036                 fw_health->fw_reset_seq_delay_msec[i] =
8037                         resp->delay_after_reset[i];
8038         }
8039 err_recovery_out:
8040         hwrm_req_drop(bp, req);
8041         if (!rc)
8042                 rc = bnxt_map_fw_health_regs(bp);
8043         if (rc)
8044                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
8045         return rc;
8046 }
8047
8048 static int bnxt_hwrm_func_reset(struct bnxt *bp)
8049 {
8050         struct hwrm_func_reset_input *req;
8051         int rc;
8052
8053         rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
8054         if (rc)
8055                 return rc;
8056
8057         req->enables = 0;
8058         hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
8059         return hwrm_req_send(bp, req);
8060 }
8061
8062 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
8063 {
8064         struct hwrm_nvm_get_dev_info_output nvm_info;
8065
8066         if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
8067                 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
8068                          nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
8069                          nvm_info.nvm_cfg_ver_upd);
8070 }
8071
8072 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
8073 {
8074         struct hwrm_queue_qportcfg_output *resp;
8075         struct hwrm_queue_qportcfg_input *req;
8076         u8 i, j, *qptr;
8077         bool no_rdma;
8078         int rc = 0;
8079
8080         rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
8081         if (rc)
8082                 return rc;
8083
8084         resp = hwrm_req_hold(bp, req);
8085         rc = hwrm_req_send(bp, req);
8086         if (rc)
8087                 goto qportcfg_exit;
8088
8089         if (!resp->max_configurable_queues) {
8090                 rc = -EINVAL;
8091                 goto qportcfg_exit;
8092         }
8093         bp->max_tc = resp->max_configurable_queues;
8094         bp->max_lltc = resp->max_configurable_lossless_queues;
8095         if (bp->max_tc > BNXT_MAX_QUEUE)
8096                 bp->max_tc = BNXT_MAX_QUEUE;
8097
8098         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
8099         qptr = &resp->queue_id0;
8100         for (i = 0, j = 0; i < bp->max_tc; i++) {
8101                 bp->q_info[j].queue_id = *qptr;
8102                 bp->q_ids[i] = *qptr++;
8103                 bp->q_info[j].queue_profile = *qptr++;
8104                 bp->tc_to_qidx[j] = j;
8105                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
8106                     (no_rdma && BNXT_PF(bp)))
8107                         j++;
8108         }
8109         bp->max_q = bp->max_tc;
8110         bp->max_tc = max_t(u8, j, 1);
8111
8112         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
8113                 bp->max_tc = 1;
8114
8115         if (bp->max_lltc > bp->max_tc)
8116                 bp->max_lltc = bp->max_tc;
8117
8118 qportcfg_exit:
8119         hwrm_req_drop(bp, req);
8120         return rc;
8121 }
8122
8123 static int bnxt_hwrm_poll(struct bnxt *bp)
8124 {
8125         struct hwrm_ver_get_input *req;
8126         int rc;
8127
8128         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8129         if (rc)
8130                 return rc;
8131
8132         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8133         req->hwrm_intf_min = HWRM_VERSION_MINOR;
8134         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8135
8136         hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
8137         rc = hwrm_req_send(bp, req);
8138         return rc;
8139 }
8140
8141 static int bnxt_hwrm_ver_get(struct bnxt *bp)
8142 {
8143         struct hwrm_ver_get_output *resp;
8144         struct hwrm_ver_get_input *req;
8145         u16 fw_maj, fw_min, fw_bld, fw_rsv;
8146         u32 dev_caps_cfg, hwrm_ver;
8147         int rc, len;
8148
8149         rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8150         if (rc)
8151                 return rc;
8152
8153         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
8154         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
8155         req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8156         req->hwrm_intf_min = HWRM_VERSION_MINOR;
8157         req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8158
8159         resp = hwrm_req_hold(bp, req);
8160         rc = hwrm_req_send(bp, req);
8161         if (rc)
8162                 goto hwrm_ver_get_exit;
8163
8164         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
8165
8166         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
8167                              resp->hwrm_intf_min_8b << 8 |
8168                              resp->hwrm_intf_upd_8b;
8169         if (resp->hwrm_intf_maj_8b < 1) {
8170                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
8171                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8172                             resp->hwrm_intf_upd_8b);
8173                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
8174         }
8175
8176         hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
8177                         HWRM_VERSION_UPDATE;
8178
8179         if (bp->hwrm_spec_code > hwrm_ver)
8180                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8181                          HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
8182                          HWRM_VERSION_UPDATE);
8183         else
8184                 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8185                          resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8186                          resp->hwrm_intf_upd_8b);
8187
8188         fw_maj = le16_to_cpu(resp->hwrm_fw_major);
8189         if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
8190                 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
8191                 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
8192                 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
8193                 len = FW_VER_STR_LEN;
8194         } else {
8195                 fw_maj = resp->hwrm_fw_maj_8b;
8196                 fw_min = resp->hwrm_fw_min_8b;
8197                 fw_bld = resp->hwrm_fw_bld_8b;
8198                 fw_rsv = resp->hwrm_fw_rsvd_8b;
8199                 len = BC_HWRM_STR_LEN;
8200         }
8201         bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
8202         snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
8203                  fw_rsv);
8204
8205         if (strlen(resp->active_pkg_name)) {
8206                 int fw_ver_len = strlen(bp->fw_ver_str);
8207
8208                 snprintf(bp->fw_ver_str + fw_ver_len,
8209                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8210                          resp->active_pkg_name);
8211                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8212         }
8213
8214         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8215         if (!bp->hwrm_cmd_timeout)
8216                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8217         bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
8218         if (!bp->hwrm_cmd_max_timeout)
8219                 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
8220         else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
8221                 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
8222                             bp->hwrm_cmd_max_timeout / 1000);
8223
8224         if (resp->hwrm_intf_maj_8b >= 1) {
8225                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8226                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8227         }
8228         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8229                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8230
8231         bp->chip_num = le16_to_cpu(resp->chip_num);
8232         bp->chip_rev = resp->chip_rev;
8233         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8234             !resp->chip_metal)
8235                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8236
8237         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8238         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8239             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8240                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8241
8242         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8243                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8244
8245         if (dev_caps_cfg &
8246             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8247                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8248
8249         if (dev_caps_cfg &
8250             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8251                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8252
8253         if (dev_caps_cfg &
8254             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8255                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8256
8257 hwrm_ver_get_exit:
8258         hwrm_req_drop(bp, req);
8259         return rc;
8260 }
8261
8262 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8263 {
8264         struct hwrm_fw_set_time_input *req;
8265         struct tm tm;
8266         time64_t now = ktime_get_real_seconds();
8267         int rc;
8268
8269         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8270             bp->hwrm_spec_code < 0x10400)
8271                 return -EOPNOTSUPP;
8272
8273         time64_to_tm(now, 0, &tm);
8274         rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8275         if (rc)
8276                 return rc;
8277
8278         req->year = cpu_to_le16(1900 + tm.tm_year);
8279         req->month = 1 + tm.tm_mon;
8280         req->day = tm.tm_mday;
8281         req->hour = tm.tm_hour;
8282         req->minute = tm.tm_min;
8283         req->second = tm.tm_sec;
8284         return hwrm_req_send(bp, req);
8285 }
8286
8287 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8288 {
8289         u64 sw_tmp;
8290
8291         hw &= mask;
8292         sw_tmp = (*sw & ~mask) | hw;
8293         if (hw < (*sw & mask))
8294                 sw_tmp += mask + 1;
8295         WRITE_ONCE(*sw, sw_tmp);
8296 }
8297
8298 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8299                                     int count, bool ignore_zero)
8300 {
8301         int i;
8302
8303         for (i = 0; i < count; i++) {
8304                 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8305
8306                 if (ignore_zero && !hw)
8307                         continue;
8308
8309                 if (masks[i] == -1ULL)
8310                         sw_stats[i] = hw;
8311                 else
8312                         bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8313         }
8314 }
8315
8316 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8317 {
8318         if (!stats->hw_stats)
8319                 return;
8320
8321         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8322                                 stats->hw_masks, stats->len / 8, false);
8323 }
8324
8325 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8326 {
8327         struct bnxt_stats_mem *ring0_stats;
8328         bool ignore_zero = false;
8329         int i;
8330
8331         /* Chip bug.  Counter intermittently becomes 0. */
8332         if (bp->flags & BNXT_FLAG_CHIP_P5)
8333                 ignore_zero = true;
8334
8335         for (i = 0; i < bp->cp_nr_rings; i++) {
8336                 struct bnxt_napi *bnapi = bp->bnapi[i];
8337                 struct bnxt_cp_ring_info *cpr;
8338                 struct bnxt_stats_mem *stats;
8339
8340                 cpr = &bnapi->cp_ring;
8341                 stats = &cpr->stats;
8342                 if (!i)
8343                         ring0_stats = stats;
8344                 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8345                                         ring0_stats->hw_masks,
8346                                         ring0_stats->len / 8, ignore_zero);
8347         }
8348         if (bp->flags & BNXT_FLAG_PORT_STATS) {
8349                 struct bnxt_stats_mem *stats = &bp->port_stats;
8350                 __le64 *hw_stats = stats->hw_stats;
8351                 u64 *sw_stats = stats->sw_stats;
8352                 u64 *masks = stats->hw_masks;
8353                 int cnt;
8354
8355                 cnt = sizeof(struct rx_port_stats) / 8;
8356                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8357
8358                 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8359                 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8360                 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8361                 cnt = sizeof(struct tx_port_stats) / 8;
8362                 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8363         }
8364         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8365                 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8366                 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8367         }
8368 }
8369
8370 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8371 {
8372         struct hwrm_port_qstats_input *req;
8373         struct bnxt_pf_info *pf = &bp->pf;
8374         int rc;
8375
8376         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8377                 return 0;
8378
8379         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8380                 return -EOPNOTSUPP;
8381
8382         rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8383         if (rc)
8384                 return rc;
8385
8386         req->flags = flags;
8387         req->port_id = cpu_to_le16(pf->port_id);
8388         req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8389                                             BNXT_TX_PORT_STATS_BYTE_OFFSET);
8390         req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8391         return hwrm_req_send(bp, req);
8392 }
8393
8394 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8395 {
8396         struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8397         struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8398         struct hwrm_port_qstats_ext_output *resp_qs;
8399         struct hwrm_port_qstats_ext_input *req_qs;
8400         struct bnxt_pf_info *pf = &bp->pf;
8401         u32 tx_stat_size;
8402         int rc;
8403
8404         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8405                 return 0;
8406
8407         if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8408                 return -EOPNOTSUPP;
8409
8410         rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8411         if (rc)
8412                 return rc;
8413
8414         req_qs->flags = flags;
8415         req_qs->port_id = cpu_to_le16(pf->port_id);
8416         req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8417         req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8418         tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8419                        sizeof(struct tx_port_stats_ext) : 0;
8420         req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8421         req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8422         resp_qs = hwrm_req_hold(bp, req_qs);
8423         rc = hwrm_req_send(bp, req_qs);
8424         if (!rc) {
8425                 bp->fw_rx_stats_ext_size =
8426                         le16_to_cpu(resp_qs->rx_stat_size) / 8;
8427                 if (BNXT_FW_MAJ(bp) < 220 &&
8428                     bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
8429                         bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
8430
8431                 bp->fw_tx_stats_ext_size = tx_stat_size ?
8432                         le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
8433         } else {
8434                 bp->fw_rx_stats_ext_size = 0;
8435                 bp->fw_tx_stats_ext_size = 0;
8436         }
8437         hwrm_req_drop(bp, req_qs);
8438
8439         if (flags)
8440                 return rc;
8441
8442         if (bp->fw_tx_stats_ext_size <=
8443             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8444                 bp->pri2cos_valid = 0;
8445                 return rc;
8446         }
8447
8448         rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8449         if (rc)
8450                 return rc;
8451
8452         req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8453
8454         resp_qc = hwrm_req_hold(bp, req_qc);
8455         rc = hwrm_req_send(bp, req_qc);
8456         if (!rc) {
8457                 u8 *pri2cos;
8458                 int i, j;
8459
8460                 pri2cos = &resp_qc->pri0_cos_queue_id;
8461                 for (i = 0; i < 8; i++) {
8462                         u8 queue_id = pri2cos[i];
8463                         u8 queue_idx;
8464
8465                         /* Per port queue IDs start from 0, 10, 20, etc */
8466                         queue_idx = queue_id % 10;
8467                         if (queue_idx > BNXT_MAX_QUEUE) {
8468                                 bp->pri2cos_valid = false;
8469                                 hwrm_req_drop(bp, req_qc);
8470                                 return rc;
8471                         }
8472                         for (j = 0; j < bp->max_q; j++) {
8473                                 if (bp->q_ids[j] == queue_id)
8474                                         bp->pri2cos_idx[i] = queue_idx;
8475                         }
8476                 }
8477                 bp->pri2cos_valid = true;
8478         }
8479         hwrm_req_drop(bp, req_qc);
8480
8481         return rc;
8482 }
8483
8484 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8485 {
8486         bnxt_hwrm_tunnel_dst_port_free(bp,
8487                 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8488         bnxt_hwrm_tunnel_dst_port_free(bp,
8489                 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8490 }
8491
8492 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8493 {
8494         int rc, i;
8495         u32 tpa_flags = 0;
8496
8497         if (set_tpa)
8498                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8499         else if (BNXT_NO_FW_ACCESS(bp))
8500                 return 0;
8501         for (i = 0; i < bp->nr_vnics; i++) {
8502                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8503                 if (rc) {
8504                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8505                                    i, rc);
8506                         return rc;
8507                 }
8508         }
8509         return 0;
8510 }
8511
8512 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8513 {
8514         int i;
8515
8516         for (i = 0; i < bp->nr_vnics; i++)
8517                 bnxt_hwrm_vnic_set_rss(bp, i, false);
8518 }
8519
8520 static void bnxt_clear_vnic(struct bnxt *bp)
8521 {
8522         if (!bp->vnic_info)
8523                 return;
8524
8525         bnxt_hwrm_clear_vnic_filter(bp);
8526         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8527                 /* clear all RSS setting before free vnic ctx */
8528                 bnxt_hwrm_clear_vnic_rss(bp);
8529                 bnxt_hwrm_vnic_ctx_free(bp);
8530         }
8531         /* before free the vnic, undo the vnic tpa settings */
8532         if (bp->flags & BNXT_FLAG_TPA)
8533                 bnxt_set_tpa(bp, false);
8534         bnxt_hwrm_vnic_free(bp);
8535         if (bp->flags & BNXT_FLAG_CHIP_P5)
8536                 bnxt_hwrm_vnic_ctx_free(bp);
8537 }
8538
8539 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8540                                     bool irq_re_init)
8541 {
8542         bnxt_clear_vnic(bp);
8543         bnxt_hwrm_ring_free(bp, close_path);
8544         bnxt_hwrm_ring_grp_free(bp);
8545         if (irq_re_init) {
8546                 bnxt_hwrm_stat_ctx_free(bp);
8547                 bnxt_hwrm_free_tunnel_ports(bp);
8548         }
8549 }
8550
8551 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8552 {
8553         struct hwrm_func_cfg_input *req;
8554         u8 evb_mode;
8555         int rc;
8556
8557         if (br_mode == BRIDGE_MODE_VEB)
8558                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8559         else if (br_mode == BRIDGE_MODE_VEPA)
8560                 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8561         else
8562                 return -EINVAL;
8563
8564         rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8565         if (rc)
8566                 return rc;
8567
8568         req->fid = cpu_to_le16(0xffff);
8569         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8570         req->evb_mode = evb_mode;
8571         return hwrm_req_send(bp, req);
8572 }
8573
8574 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8575 {
8576         struct hwrm_func_cfg_input *req;
8577         int rc;
8578
8579         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8580                 return 0;
8581
8582         rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8583         if (rc)
8584                 return rc;
8585
8586         req->fid = cpu_to_le16(0xffff);
8587         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8588         req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8589         if (size == 128)
8590                 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8591
8592         return hwrm_req_send(bp, req);
8593 }
8594
8595 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8596 {
8597         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8598         int rc;
8599
8600         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8601                 goto skip_rss_ctx;
8602
8603         /* allocate context for vnic */
8604         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8605         if (rc) {
8606                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8607                            vnic_id, rc);
8608                 goto vnic_setup_err;
8609         }
8610         bp->rsscos_nr_ctxs++;
8611
8612         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8613                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8614                 if (rc) {
8615                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8616                                    vnic_id, rc);
8617                         goto vnic_setup_err;
8618                 }
8619                 bp->rsscos_nr_ctxs++;
8620         }
8621
8622 skip_rss_ctx:
8623         /* configure default vnic, ring grp */
8624         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8625         if (rc) {
8626                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8627                            vnic_id, rc);
8628                 goto vnic_setup_err;
8629         }
8630
8631         /* Enable RSS hashing on vnic */
8632         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8633         if (rc) {
8634                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8635                            vnic_id, rc);
8636                 goto vnic_setup_err;
8637         }
8638
8639         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8640                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8641                 if (rc) {
8642                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8643                                    vnic_id, rc);
8644                 }
8645         }
8646
8647 vnic_setup_err:
8648         return rc;
8649 }
8650
8651 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8652 {
8653         int rc, i, nr_ctxs;
8654
8655         nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8656         for (i = 0; i < nr_ctxs; i++) {
8657                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8658                 if (rc) {
8659                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8660                                    vnic_id, i, rc);
8661                         break;
8662                 }
8663                 bp->rsscos_nr_ctxs++;
8664         }
8665         if (i < nr_ctxs)
8666                 return -ENOMEM;
8667
8668         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8669         if (rc) {
8670                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8671                            vnic_id, rc);
8672                 return rc;
8673         }
8674         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8675         if (rc) {
8676                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8677                            vnic_id, rc);
8678                 return rc;
8679         }
8680         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8681                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8682                 if (rc) {
8683                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8684                                    vnic_id, rc);
8685                 }
8686         }
8687         return rc;
8688 }
8689
8690 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8691 {
8692         if (bp->flags & BNXT_FLAG_CHIP_P5)
8693                 return __bnxt_setup_vnic_p5(bp, vnic_id);
8694         else
8695                 return __bnxt_setup_vnic(bp, vnic_id);
8696 }
8697
8698 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8699 {
8700 #ifdef CONFIG_RFS_ACCEL
8701         int i, rc = 0;
8702
8703         if (bp->flags & BNXT_FLAG_CHIP_P5)
8704                 return 0;
8705
8706         for (i = 0; i < bp->rx_nr_rings; i++) {
8707                 struct bnxt_vnic_info *vnic;
8708                 u16 vnic_id = i + 1;
8709                 u16 ring_id = i;
8710
8711                 if (vnic_id >= bp->nr_vnics)
8712                         break;
8713
8714                 vnic = &bp->vnic_info[vnic_id];
8715                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8716                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8717                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8718                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8719                 if (rc) {
8720                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8721                                    vnic_id, rc);
8722                         break;
8723                 }
8724                 rc = bnxt_setup_vnic(bp, vnic_id);
8725                 if (rc)
8726                         break;
8727         }
8728         return rc;
8729 #else
8730         return 0;
8731 #endif
8732 }
8733
8734 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8735 static bool bnxt_promisc_ok(struct bnxt *bp)
8736 {
8737 #ifdef CONFIG_BNXT_SRIOV
8738         if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8739                 return false;
8740 #endif
8741         return true;
8742 }
8743
8744 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8745 {
8746         unsigned int rc = 0;
8747
8748         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8749         if (rc) {
8750                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8751                            rc);
8752                 return rc;
8753         }
8754
8755         rc = bnxt_hwrm_vnic_cfg(bp, 1);
8756         if (rc) {
8757                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8758                            rc);
8759                 return rc;
8760         }
8761         return rc;
8762 }
8763
8764 static int bnxt_cfg_rx_mode(struct bnxt *);
8765 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8766
8767 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8768 {
8769         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8770         int rc = 0;
8771         unsigned int rx_nr_rings = bp->rx_nr_rings;
8772
8773         if (irq_re_init) {
8774                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8775                 if (rc) {
8776                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8777                                    rc);
8778                         goto err_out;
8779                 }
8780         }
8781
8782         rc = bnxt_hwrm_ring_alloc(bp);
8783         if (rc) {
8784                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8785                 goto err_out;
8786         }
8787
8788         rc = bnxt_hwrm_ring_grp_alloc(bp);
8789         if (rc) {
8790                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8791                 goto err_out;
8792         }
8793
8794         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8795                 rx_nr_rings--;
8796
8797         /* default vnic 0 */
8798         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8799         if (rc) {
8800                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8801                 goto err_out;
8802         }
8803
8804         if (BNXT_VF(bp))
8805                 bnxt_hwrm_func_qcfg(bp);
8806
8807         rc = bnxt_setup_vnic(bp, 0);
8808         if (rc)
8809                 goto err_out;
8810         if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
8811                 bnxt_hwrm_update_rss_hash_cfg(bp);
8812
8813         if (bp->flags & BNXT_FLAG_RFS) {
8814                 rc = bnxt_alloc_rfs_vnics(bp);
8815                 if (rc)
8816                         goto err_out;
8817         }
8818
8819         if (bp->flags & BNXT_FLAG_TPA) {
8820                 rc = bnxt_set_tpa(bp, true);
8821                 if (rc)
8822                         goto err_out;
8823         }
8824
8825         if (BNXT_VF(bp))
8826                 bnxt_update_vf_mac(bp);
8827
8828         /* Filter for default vnic 0 */
8829         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8830         if (rc) {
8831                 if (BNXT_VF(bp) && rc == -ENODEV)
8832                         netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
8833                 else
8834                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8835                 goto err_out;
8836         }
8837         vnic->uc_filter_count = 1;
8838
8839         vnic->rx_mask = 0;
8840         if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
8841                 goto skip_rx_mask;
8842
8843         if (bp->dev->flags & IFF_BROADCAST)
8844                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8845
8846         if (bp->dev->flags & IFF_PROMISC)
8847                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8848
8849         if (bp->dev->flags & IFF_ALLMULTI) {
8850                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8851                 vnic->mc_list_count = 0;
8852         } else if (bp->dev->flags & IFF_MULTICAST) {
8853                 u32 mask = 0;
8854
8855                 bnxt_mc_list_updated(bp, &mask);
8856                 vnic->rx_mask |= mask;
8857         }
8858
8859         rc = bnxt_cfg_rx_mode(bp);
8860         if (rc)
8861                 goto err_out;
8862
8863 skip_rx_mask:
8864         rc = bnxt_hwrm_set_coal(bp);
8865         if (rc)
8866                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8867                                 rc);
8868
8869         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8870                 rc = bnxt_setup_nitroa0_vnic(bp);
8871                 if (rc)
8872                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8873                                    rc);
8874         }
8875
8876         if (BNXT_VF(bp)) {
8877                 bnxt_hwrm_func_qcfg(bp);
8878                 netdev_update_features(bp->dev);
8879         }
8880
8881         return 0;
8882
8883 err_out:
8884         bnxt_hwrm_resource_free(bp, 0, true);
8885
8886         return rc;
8887 }
8888
8889 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8890 {
8891         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8892         return 0;
8893 }
8894
8895 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8896 {
8897         bnxt_init_cp_rings(bp);
8898         bnxt_init_rx_rings(bp);
8899         bnxt_init_tx_rings(bp);
8900         bnxt_init_ring_grps(bp, irq_re_init);
8901         bnxt_init_vnics(bp);
8902
8903         return bnxt_init_chip(bp, irq_re_init);
8904 }
8905
8906 static int bnxt_set_real_num_queues(struct bnxt *bp)
8907 {
8908         int rc;
8909         struct net_device *dev = bp->dev;
8910
8911         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8912                                           bp->tx_nr_rings_xdp);
8913         if (rc)
8914                 return rc;
8915
8916         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8917         if (rc)
8918                 return rc;
8919
8920 #ifdef CONFIG_RFS_ACCEL
8921         if (bp->flags & BNXT_FLAG_RFS)
8922                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8923 #endif
8924
8925         return rc;
8926 }
8927
8928 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8929                            bool shared)
8930 {
8931         int _rx = *rx, _tx = *tx;
8932
8933         if (shared) {
8934                 *rx = min_t(int, _rx, max);
8935                 *tx = min_t(int, _tx, max);
8936         } else {
8937                 if (max < 2)
8938                         return -ENOMEM;
8939
8940                 while (_rx + _tx > max) {
8941                         if (_rx > _tx && _rx > 1)
8942                                 _rx--;
8943                         else if (_tx > 1)
8944                                 _tx--;
8945                 }
8946                 *rx = _rx;
8947                 *tx = _tx;
8948         }
8949         return 0;
8950 }
8951
8952 static void bnxt_setup_msix(struct bnxt *bp)
8953 {
8954         const int len = sizeof(bp->irq_tbl[0].name);
8955         struct net_device *dev = bp->dev;
8956         int tcs, i;
8957
8958         tcs = netdev_get_num_tc(dev);
8959         if (tcs) {
8960                 int i, off, count;
8961
8962                 for (i = 0; i < tcs; i++) {
8963                         count = bp->tx_nr_rings_per_tc;
8964                         off = i * count;
8965                         netdev_set_tc_queue(dev, i, count, off);
8966                 }
8967         }
8968
8969         for (i = 0; i < bp->cp_nr_rings; i++) {
8970                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8971                 char *attr;
8972
8973                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8974                         attr = "TxRx";
8975                 else if (i < bp->rx_nr_rings)
8976                         attr = "rx";
8977                 else
8978                         attr = "tx";
8979
8980                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8981                          attr, i);
8982                 bp->irq_tbl[map_idx].handler = bnxt_msix;
8983         }
8984 }
8985
8986 static void bnxt_setup_inta(struct bnxt *bp)
8987 {
8988         const int len = sizeof(bp->irq_tbl[0].name);
8989
8990         if (netdev_get_num_tc(bp->dev))
8991                 netdev_reset_tc(bp->dev);
8992
8993         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8994                  0);
8995         bp->irq_tbl[0].handler = bnxt_inta;
8996 }
8997
8998 static int bnxt_init_int_mode(struct bnxt *bp);
8999
9000 static int bnxt_setup_int_mode(struct bnxt *bp)
9001 {
9002         int rc;
9003
9004         if (!bp->irq_tbl) {
9005                 rc = bnxt_init_int_mode(bp);
9006                 if (rc || !bp->irq_tbl)
9007                         return rc ?: -ENODEV;
9008         }
9009
9010         if (bp->flags & BNXT_FLAG_USING_MSIX)
9011                 bnxt_setup_msix(bp);
9012         else
9013                 bnxt_setup_inta(bp);
9014
9015         rc = bnxt_set_real_num_queues(bp);
9016         return rc;
9017 }
9018
9019 #ifdef CONFIG_RFS_ACCEL
9020 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
9021 {
9022         return bp->hw_resc.max_rsscos_ctxs;
9023 }
9024
9025 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
9026 {
9027         return bp->hw_resc.max_vnics;
9028 }
9029 #endif
9030
9031 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
9032 {
9033         return bp->hw_resc.max_stat_ctxs;
9034 }
9035
9036 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
9037 {
9038         return bp->hw_resc.max_cp_rings;
9039 }
9040
9041 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
9042 {
9043         unsigned int cp = bp->hw_resc.max_cp_rings;
9044
9045         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9046                 cp -= bnxt_get_ulp_msix_num(bp);
9047
9048         return cp;
9049 }
9050
9051 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
9052 {
9053         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9054
9055         if (bp->flags & BNXT_FLAG_CHIP_P5)
9056                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
9057
9058         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
9059 }
9060
9061 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
9062 {
9063         bp->hw_resc.max_irqs = max_irqs;
9064 }
9065
9066 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
9067 {
9068         unsigned int cp;
9069
9070         cp = bnxt_get_max_func_cp_rings_for_en(bp);
9071         if (bp->flags & BNXT_FLAG_CHIP_P5)
9072                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
9073         else
9074                 return cp - bp->cp_nr_rings;
9075 }
9076
9077 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
9078 {
9079         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
9080 }
9081
9082 int bnxt_get_avail_msix(struct bnxt *bp, int num)
9083 {
9084         int max_cp = bnxt_get_max_func_cp_rings(bp);
9085         int max_irq = bnxt_get_max_func_irqs(bp);
9086         int total_req = bp->cp_nr_rings + num;
9087         int max_idx, avail_msix;
9088
9089         max_idx = bp->total_irqs;
9090         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9091                 max_idx = min_t(int, bp->total_irqs, max_cp);
9092         avail_msix = max_idx - bp->cp_nr_rings;
9093         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
9094                 return avail_msix;
9095
9096         if (max_irq < total_req) {
9097                 num = max_irq - bp->cp_nr_rings;
9098                 if (num <= 0)
9099                         return 0;
9100         }
9101         return num;
9102 }
9103
9104 static int bnxt_get_num_msix(struct bnxt *bp)
9105 {
9106         if (!BNXT_NEW_RM(bp))
9107                 return bnxt_get_max_func_irqs(bp);
9108
9109         return bnxt_nq_rings_in_use(bp);
9110 }
9111
9112 static int bnxt_init_msix(struct bnxt *bp)
9113 {
9114         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
9115         struct msix_entry *msix_ent;
9116
9117         total_vecs = bnxt_get_num_msix(bp);
9118         max = bnxt_get_max_func_irqs(bp);
9119         if (total_vecs > max)
9120                 total_vecs = max;
9121
9122         if (!total_vecs)
9123                 return 0;
9124
9125         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
9126         if (!msix_ent)
9127                 return -ENOMEM;
9128
9129         for (i = 0; i < total_vecs; i++) {
9130                 msix_ent[i].entry = i;
9131                 msix_ent[i].vector = 0;
9132         }
9133
9134         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
9135                 min = 2;
9136
9137         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
9138         ulp_msix = bnxt_get_ulp_msix_num(bp);
9139         if (total_vecs < 0 || total_vecs < ulp_msix) {
9140                 rc = -ENODEV;
9141                 goto msix_setup_exit;
9142         }
9143
9144         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
9145         if (bp->irq_tbl) {
9146                 for (i = 0; i < total_vecs; i++)
9147                         bp->irq_tbl[i].vector = msix_ent[i].vector;
9148
9149                 bp->total_irqs = total_vecs;
9150                 /* Trim rings based upon num of vectors allocated */
9151                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
9152                                      total_vecs - ulp_msix, min == 1);
9153                 if (rc)
9154                         goto msix_setup_exit;
9155
9156                 bp->cp_nr_rings = (min == 1) ?
9157                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9158                                   bp->tx_nr_rings + bp->rx_nr_rings;
9159
9160         } else {
9161                 rc = -ENOMEM;
9162                 goto msix_setup_exit;
9163         }
9164         bp->flags |= BNXT_FLAG_USING_MSIX;
9165         kfree(msix_ent);
9166         return 0;
9167
9168 msix_setup_exit:
9169         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
9170         kfree(bp->irq_tbl);
9171         bp->irq_tbl = NULL;
9172         pci_disable_msix(bp->pdev);
9173         kfree(msix_ent);
9174         return rc;
9175 }
9176
9177 static int bnxt_init_inta(struct bnxt *bp)
9178 {
9179         bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
9180         if (!bp->irq_tbl)
9181                 return -ENOMEM;
9182
9183         bp->total_irqs = 1;
9184         bp->rx_nr_rings = 1;
9185         bp->tx_nr_rings = 1;
9186         bp->cp_nr_rings = 1;
9187         bp->flags |= BNXT_FLAG_SHARED_RINGS;
9188         bp->irq_tbl[0].vector = bp->pdev->irq;
9189         return 0;
9190 }
9191
9192 static int bnxt_init_int_mode(struct bnxt *bp)
9193 {
9194         int rc = -ENODEV;
9195
9196         if (bp->flags & BNXT_FLAG_MSIX_CAP)
9197                 rc = bnxt_init_msix(bp);
9198
9199         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
9200                 /* fallback to INTA */
9201                 rc = bnxt_init_inta(bp);
9202         }
9203         return rc;
9204 }
9205
9206 static void bnxt_clear_int_mode(struct bnxt *bp)
9207 {
9208         if (bp->flags & BNXT_FLAG_USING_MSIX)
9209                 pci_disable_msix(bp->pdev);
9210
9211         kfree(bp->irq_tbl);
9212         bp->irq_tbl = NULL;
9213         bp->flags &= ~BNXT_FLAG_USING_MSIX;
9214 }
9215
9216 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
9217 {
9218         int tcs = netdev_get_num_tc(bp->dev);
9219         bool irq_cleared = false;
9220         int rc;
9221
9222         if (!bnxt_need_reserve_rings(bp))
9223                 return 0;
9224
9225         if (irq_re_init && BNXT_NEW_RM(bp) &&
9226             bnxt_get_num_msix(bp) != bp->total_irqs) {
9227                 bnxt_ulp_irq_stop(bp);
9228                 bnxt_clear_int_mode(bp);
9229                 irq_cleared = true;
9230         }
9231         rc = __bnxt_reserve_rings(bp);
9232         if (irq_cleared) {
9233                 if (!rc)
9234                         rc = bnxt_init_int_mode(bp);
9235                 bnxt_ulp_irq_restart(bp, rc);
9236         }
9237         if (rc) {
9238                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9239                 return rc;
9240         }
9241         if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
9242                     bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
9243                 netdev_err(bp->dev, "tx ring reservation failure\n");
9244                 netdev_reset_tc(bp->dev);
9245                 if (bp->tx_nr_rings_xdp)
9246                         bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
9247                 else
9248                         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9249                 return -ENOMEM;
9250         }
9251         return 0;
9252 }
9253
9254 static void bnxt_free_irq(struct bnxt *bp)
9255 {
9256         struct bnxt_irq *irq;
9257         int i;
9258
9259 #ifdef CONFIG_RFS_ACCEL
9260         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9261         bp->dev->rx_cpu_rmap = NULL;
9262 #endif
9263         if (!bp->irq_tbl || !bp->bnapi)
9264                 return;
9265
9266         for (i = 0; i < bp->cp_nr_rings; i++) {
9267                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9268
9269                 irq = &bp->irq_tbl[map_idx];
9270                 if (irq->requested) {
9271                         if (irq->have_cpumask) {
9272                                 irq_set_affinity_hint(irq->vector, NULL);
9273                                 free_cpumask_var(irq->cpu_mask);
9274                                 irq->have_cpumask = 0;
9275                         }
9276                         free_irq(irq->vector, bp->bnapi[i]);
9277                 }
9278
9279                 irq->requested = 0;
9280         }
9281 }
9282
9283 static int bnxt_request_irq(struct bnxt *bp)
9284 {
9285         int i, j, rc = 0;
9286         unsigned long flags = 0;
9287 #ifdef CONFIG_RFS_ACCEL
9288         struct cpu_rmap *rmap;
9289 #endif
9290
9291         rc = bnxt_setup_int_mode(bp);
9292         if (rc) {
9293                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9294                            rc);
9295                 return rc;
9296         }
9297 #ifdef CONFIG_RFS_ACCEL
9298         rmap = bp->dev->rx_cpu_rmap;
9299 #endif
9300         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9301                 flags = IRQF_SHARED;
9302
9303         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9304                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9305                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9306
9307 #ifdef CONFIG_RFS_ACCEL
9308                 if (rmap && bp->bnapi[i]->rx_ring) {
9309                         rc = irq_cpu_rmap_add(rmap, irq->vector);
9310                         if (rc)
9311                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9312                                             j);
9313                         j++;
9314                 }
9315 #endif
9316                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9317                                  bp->bnapi[i]);
9318                 if (rc)
9319                         break;
9320
9321                 irq->requested = 1;
9322
9323                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9324                         int numa_node = dev_to_node(&bp->pdev->dev);
9325
9326                         irq->have_cpumask = 1;
9327                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9328                                         irq->cpu_mask);
9329                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9330                         if (rc) {
9331                                 netdev_warn(bp->dev,
9332                                             "Set affinity failed, IRQ = %d\n",
9333                                             irq->vector);
9334                                 break;
9335                         }
9336                 }
9337         }
9338         return rc;
9339 }
9340
9341 static void bnxt_del_napi(struct bnxt *bp)
9342 {
9343         int i;
9344
9345         if (!bp->bnapi)
9346                 return;
9347
9348         for (i = 0; i < bp->cp_nr_rings; i++) {
9349                 struct bnxt_napi *bnapi = bp->bnapi[i];
9350
9351                 __netif_napi_del(&bnapi->napi);
9352         }
9353         /* We called __netif_napi_del(), we need
9354          * to respect an RCU grace period before freeing napi structures.
9355          */
9356         synchronize_net();
9357 }
9358
9359 static void bnxt_init_napi(struct bnxt *bp)
9360 {
9361         int i;
9362         unsigned int cp_nr_rings = bp->cp_nr_rings;
9363         struct bnxt_napi *bnapi;
9364
9365         if (bp->flags & BNXT_FLAG_USING_MSIX) {
9366                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9367
9368                 if (bp->flags & BNXT_FLAG_CHIP_P5)
9369                         poll_fn = bnxt_poll_p5;
9370                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9371                         cp_nr_rings--;
9372                 for (i = 0; i < cp_nr_rings; i++) {
9373                         bnapi = bp->bnapi[i];
9374                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
9375                 }
9376                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9377                         bnapi = bp->bnapi[cp_nr_rings];
9378                         netif_napi_add(bp->dev, &bnapi->napi,
9379                                        bnxt_poll_nitroa0);
9380                 }
9381         } else {
9382                 bnapi = bp->bnapi[0];
9383                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
9384         }
9385 }
9386
9387 static void bnxt_disable_napi(struct bnxt *bp)
9388 {
9389         int i;
9390
9391         if (!bp->bnapi ||
9392             test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9393                 return;
9394
9395         for (i = 0; i < bp->cp_nr_rings; i++) {
9396                 struct bnxt_napi *bnapi = bp->bnapi[i];
9397                 struct bnxt_cp_ring_info *cpr;
9398
9399                 cpr = &bnapi->cp_ring;
9400                 if (bnapi->tx_fault)
9401                         cpr->sw_stats.tx.tx_resets++;
9402                 if (bnapi->in_reset)
9403                         cpr->sw_stats.rx.rx_resets++;
9404                 napi_disable(&bnapi->napi);
9405                 if (bnapi->rx_ring)
9406                         cancel_work_sync(&cpr->dim.work);
9407         }
9408 }
9409
9410 static void bnxt_enable_napi(struct bnxt *bp)
9411 {
9412         int i;
9413
9414         clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9415         for (i = 0; i < bp->cp_nr_rings; i++) {
9416                 struct bnxt_napi *bnapi = bp->bnapi[i];
9417                 struct bnxt_cp_ring_info *cpr;
9418
9419                 bnapi->tx_fault = 0;
9420
9421                 cpr = &bnapi->cp_ring;
9422                 bnapi->in_reset = false;
9423
9424                 bnapi->tx_pkts = 0;
9425
9426                 if (bnapi->rx_ring) {
9427                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9428                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9429                 }
9430                 napi_enable(&bnapi->napi);
9431         }
9432 }
9433
9434 void bnxt_tx_disable(struct bnxt *bp)
9435 {
9436         int i;
9437         struct bnxt_tx_ring_info *txr;
9438
9439         if (bp->tx_ring) {
9440                 for (i = 0; i < bp->tx_nr_rings; i++) {
9441                         txr = &bp->tx_ring[i];
9442                         WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9443                 }
9444         }
9445         /* Make sure napi polls see @dev_state change */
9446         synchronize_net();
9447         /* Drop carrier first to prevent TX timeout */
9448         netif_carrier_off(bp->dev);
9449         /* Stop all TX queues */
9450         netif_tx_disable(bp->dev);
9451 }
9452
9453 void bnxt_tx_enable(struct bnxt *bp)
9454 {
9455         int i;
9456         struct bnxt_tx_ring_info *txr;
9457
9458         for (i = 0; i < bp->tx_nr_rings; i++) {
9459                 txr = &bp->tx_ring[i];
9460                 WRITE_ONCE(txr->dev_state, 0);
9461         }
9462         /* Make sure napi polls see @dev_state change */
9463         synchronize_net();
9464         netif_tx_wake_all_queues(bp->dev);
9465         if (BNXT_LINK_IS_UP(bp))
9466                 netif_carrier_on(bp->dev);
9467 }
9468
9469 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9470 {
9471         u8 active_fec = link_info->active_fec_sig_mode &
9472                         PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9473
9474         switch (active_fec) {
9475         default:
9476         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9477                 return "None";
9478         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9479                 return "Clause 74 BaseR";
9480         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9481                 return "Clause 91 RS(528,514)";
9482         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9483                 return "Clause 91 RS544_1XN";
9484         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9485                 return "Clause 91 RS(544,514)";
9486         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9487                 return "Clause 91 RS272_1XN";
9488         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9489                 return "Clause 91 RS(272,257)";
9490         }
9491 }
9492
9493 void bnxt_report_link(struct bnxt *bp)
9494 {
9495         if (BNXT_LINK_IS_UP(bp)) {
9496                 const char *signal = "";
9497                 const char *flow_ctrl;
9498                 const char *duplex;
9499                 u32 speed;
9500                 u16 fec;
9501
9502                 netif_carrier_on(bp->dev);
9503                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9504                 if (speed == SPEED_UNKNOWN) {
9505                         netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9506                         return;
9507                 }
9508                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9509                         duplex = "full";
9510                 else
9511                         duplex = "half";
9512                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9513                         flow_ctrl = "ON - receive & transmit";
9514                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9515                         flow_ctrl = "ON - transmit";
9516                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9517                         flow_ctrl = "ON - receive";
9518                 else
9519                         flow_ctrl = "none";
9520                 if (bp->link_info.phy_qcfg_resp.option_flags &
9521                     PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9522                         u8 sig_mode = bp->link_info.active_fec_sig_mode &
9523                                       PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9524                         switch (sig_mode) {
9525                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9526                                 signal = "(NRZ) ";
9527                                 break;
9528                         case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9529                                 signal = "(PAM4) ";
9530                                 break;
9531                         default:
9532                                 break;
9533                         }
9534                 }
9535                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9536                             speed, signal, duplex, flow_ctrl);
9537                 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9538                         netdev_info(bp->dev, "EEE is %s\n",
9539                                     bp->eee.eee_active ? "active" :
9540                                                          "not active");
9541                 fec = bp->link_info.fec_cfg;
9542                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9543                         netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9544                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9545                                     bnxt_report_fec(&bp->link_info));
9546         } else {
9547                 netif_carrier_off(bp->dev);
9548                 netdev_err(bp->dev, "NIC Link is Down\n");
9549         }
9550 }
9551
9552 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9553 {
9554         if (!resp->supported_speeds_auto_mode &&
9555             !resp->supported_speeds_force_mode &&
9556             !resp->supported_pam4_speeds_auto_mode &&
9557             !resp->supported_pam4_speeds_force_mode)
9558                 return true;
9559         return false;
9560 }
9561
9562 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9563 {
9564         struct bnxt_link_info *link_info = &bp->link_info;
9565         struct hwrm_port_phy_qcaps_output *resp;
9566         struct hwrm_port_phy_qcaps_input *req;
9567         int rc = 0;
9568
9569         if (bp->hwrm_spec_code < 0x10201)
9570                 return 0;
9571
9572         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9573         if (rc)
9574                 return rc;
9575
9576         resp = hwrm_req_hold(bp, req);
9577         rc = hwrm_req_send(bp, req);
9578         if (rc)
9579                 goto hwrm_phy_qcaps_exit;
9580
9581         bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
9582         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9583                 struct ethtool_eee *eee = &bp->eee;
9584                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9585
9586                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9587                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9588                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9589                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9590                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9591         }
9592
9593         if (bp->hwrm_spec_code >= 0x10a01) {
9594                 if (bnxt_phy_qcaps_no_speed(resp)) {
9595                         link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9596                         netdev_warn(bp->dev, "Ethernet link disabled\n");
9597                 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9598                         link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9599                         netdev_info(bp->dev, "Ethernet link enabled\n");
9600                         /* Phy re-enabled, reprobe the speeds */
9601                         link_info->support_auto_speeds = 0;
9602                         link_info->support_pam4_auto_speeds = 0;
9603                 }
9604         }
9605         if (resp->supported_speeds_auto_mode)
9606                 link_info->support_auto_speeds =
9607                         le16_to_cpu(resp->supported_speeds_auto_mode);
9608         if (resp->supported_pam4_speeds_auto_mode)
9609                 link_info->support_pam4_auto_speeds =
9610                         le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9611
9612         bp->port_count = resp->port_cnt;
9613
9614 hwrm_phy_qcaps_exit:
9615         hwrm_req_drop(bp, req);
9616         return rc;
9617 }
9618
9619 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9620 {
9621         u16 diff = advertising ^ supported;
9622
9623         return ((supported | diff) != supported);
9624 }
9625
9626 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9627 {
9628         struct bnxt_link_info *link_info = &bp->link_info;
9629         struct hwrm_port_phy_qcfg_output *resp;
9630         struct hwrm_port_phy_qcfg_input *req;
9631         u8 link_state = link_info->link_state;
9632         bool support_changed = false;
9633         int rc;
9634
9635         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9636         if (rc)
9637                 return rc;
9638
9639         resp = hwrm_req_hold(bp, req);
9640         rc = hwrm_req_send(bp, req);
9641         if (rc) {
9642                 hwrm_req_drop(bp, req);
9643                 if (BNXT_VF(bp) && rc == -ENODEV) {
9644                         netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
9645                         rc = 0;
9646                 }
9647                 return rc;
9648         }
9649
9650         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9651         link_info->phy_link_status = resp->link;
9652         link_info->duplex = resp->duplex_cfg;
9653         if (bp->hwrm_spec_code >= 0x10800)
9654                 link_info->duplex = resp->duplex_state;
9655         link_info->pause = resp->pause;
9656         link_info->auto_mode = resp->auto_mode;
9657         link_info->auto_pause_setting = resp->auto_pause;
9658         link_info->lp_pause = resp->link_partner_adv_pause;
9659         link_info->force_pause_setting = resp->force_pause;
9660         link_info->duplex_setting = resp->duplex_cfg;
9661         if (link_info->phy_link_status == BNXT_LINK_LINK)
9662                 link_info->link_speed = le16_to_cpu(resp->link_speed);
9663         else
9664                 link_info->link_speed = 0;
9665         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9666         link_info->force_pam4_link_speed =
9667                 le16_to_cpu(resp->force_pam4_link_speed);
9668         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9669         link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9670         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9671         link_info->auto_pam4_link_speeds =
9672                 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9673         link_info->lp_auto_link_speeds =
9674                 le16_to_cpu(resp->link_partner_adv_speeds);
9675         link_info->lp_auto_pam4_link_speeds =
9676                 resp->link_partner_pam4_adv_speeds;
9677         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9678         link_info->phy_ver[0] = resp->phy_maj;
9679         link_info->phy_ver[1] = resp->phy_min;
9680         link_info->phy_ver[2] = resp->phy_bld;
9681         link_info->media_type = resp->media_type;
9682         link_info->phy_type = resp->phy_type;
9683         link_info->transceiver = resp->xcvr_pkg_type;
9684         link_info->phy_addr = resp->eee_config_phy_addr &
9685                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9686         link_info->module_status = resp->module_status;
9687
9688         if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9689                 struct ethtool_eee *eee = &bp->eee;
9690                 u16 fw_speeds;
9691
9692                 eee->eee_active = 0;
9693                 if (resp->eee_config_phy_addr &
9694                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9695                         eee->eee_active = 1;
9696                         fw_speeds = le16_to_cpu(
9697                                 resp->link_partner_adv_eee_link_speed_mask);
9698                         eee->lp_advertised =
9699                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9700                 }
9701
9702                 /* Pull initial EEE config */
9703                 if (!chng_link_state) {
9704                         if (resp->eee_config_phy_addr &
9705                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9706                                 eee->eee_enabled = 1;
9707
9708                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9709                         eee->advertised =
9710                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9711
9712                         if (resp->eee_config_phy_addr &
9713                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9714                                 __le32 tmr;
9715
9716                                 eee->tx_lpi_enabled = 1;
9717                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9718                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9719                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9720                         }
9721                 }
9722         }
9723
9724         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9725         if (bp->hwrm_spec_code >= 0x10504) {
9726                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9727                 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9728         }
9729         /* TODO: need to add more logic to report VF link */
9730         if (chng_link_state) {
9731                 if (link_info->phy_link_status == BNXT_LINK_LINK)
9732                         link_info->link_state = BNXT_LINK_STATE_UP;
9733                 else
9734                         link_info->link_state = BNXT_LINK_STATE_DOWN;
9735                 if (link_state != link_info->link_state)
9736                         bnxt_report_link(bp);
9737         } else {
9738                 /* always link down if not require to update link state */
9739                 link_info->link_state = BNXT_LINK_STATE_DOWN;
9740         }
9741         hwrm_req_drop(bp, req);
9742
9743         if (!BNXT_PHY_CFG_ABLE(bp))
9744                 return 0;
9745
9746         /* Check if any advertised speeds are no longer supported. The caller
9747          * holds the link_lock mutex, so we can modify link_info settings.
9748          */
9749         if (bnxt_support_dropped(link_info->advertising,
9750                                  link_info->support_auto_speeds)) {
9751                 link_info->advertising = link_info->support_auto_speeds;
9752                 support_changed = true;
9753         }
9754         if (bnxt_support_dropped(link_info->advertising_pam4,
9755                                  link_info->support_pam4_auto_speeds)) {
9756                 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9757                 support_changed = true;
9758         }
9759         if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9760                 bnxt_hwrm_set_link_setting(bp, true, false);
9761         return 0;
9762 }
9763
9764 static void bnxt_get_port_module_status(struct bnxt *bp)
9765 {
9766         struct bnxt_link_info *link_info = &bp->link_info;
9767         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9768         u8 module_status;
9769
9770         if (bnxt_update_link(bp, true))
9771                 return;
9772
9773         module_status = link_info->module_status;
9774         switch (module_status) {
9775         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9776         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9777         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9778                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9779                             bp->pf.port_id);
9780                 if (bp->hwrm_spec_code >= 0x10201) {
9781                         netdev_warn(bp->dev, "Module part number %s\n",
9782                                     resp->phy_vendor_partnumber);
9783                 }
9784                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9785                         netdev_warn(bp->dev, "TX is disabled\n");
9786                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9787                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9788         }
9789 }
9790
9791 static void
9792 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9793 {
9794         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9795                 if (bp->hwrm_spec_code >= 0x10201)
9796                         req->auto_pause =
9797                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9798                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9799                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9800                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9801                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9802                 req->enables |=
9803                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9804         } else {
9805                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9806                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9807                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9808                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9809                 req->enables |=
9810                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9811                 if (bp->hwrm_spec_code >= 0x10201) {
9812                         req->auto_pause = req->force_pause;
9813                         req->enables |= cpu_to_le32(
9814                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9815                 }
9816         }
9817 }
9818
9819 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9820 {
9821         if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9822                 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9823                 if (bp->link_info.advertising) {
9824                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9825                         req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9826                 }
9827                 if (bp->link_info.advertising_pam4) {
9828                         req->enables |=
9829                                 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9830                         req->auto_link_pam4_speed_mask =
9831                                 cpu_to_le16(bp->link_info.advertising_pam4);
9832                 }
9833                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9834                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9835         } else {
9836                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9837                 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9838                         req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9839                         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9840                 } else {
9841                         req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9842                 }
9843         }
9844
9845         /* tell chimp that the setting takes effect immediately */
9846         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9847 }
9848
9849 int bnxt_hwrm_set_pause(struct bnxt *bp)
9850 {
9851         struct hwrm_port_phy_cfg_input *req;
9852         int rc;
9853
9854         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9855         if (rc)
9856                 return rc;
9857
9858         bnxt_hwrm_set_pause_common(bp, req);
9859
9860         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9861             bp->link_info.force_link_chng)
9862                 bnxt_hwrm_set_link_common(bp, req);
9863
9864         rc = hwrm_req_send(bp, req);
9865         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9866                 /* since changing of pause setting doesn't trigger any link
9867                  * change event, the driver needs to update the current pause
9868                  * result upon successfully return of the phy_cfg command
9869                  */
9870                 bp->link_info.pause =
9871                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9872                 bp->link_info.auto_pause_setting = 0;
9873                 if (!bp->link_info.force_link_chng)
9874                         bnxt_report_link(bp);
9875         }
9876         bp->link_info.force_link_chng = false;
9877         return rc;
9878 }
9879
9880 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9881                               struct hwrm_port_phy_cfg_input *req)
9882 {
9883         struct ethtool_eee *eee = &bp->eee;
9884
9885         if (eee->eee_enabled) {
9886                 u16 eee_speeds;
9887                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9888
9889                 if (eee->tx_lpi_enabled)
9890                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9891                 else
9892                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9893
9894                 req->flags |= cpu_to_le32(flags);
9895                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9896                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9897                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9898         } else {
9899                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9900         }
9901 }
9902
9903 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9904 {
9905         struct hwrm_port_phy_cfg_input *req;
9906         int rc;
9907
9908         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9909         if (rc)
9910                 return rc;
9911
9912         if (set_pause)
9913                 bnxt_hwrm_set_pause_common(bp, req);
9914
9915         bnxt_hwrm_set_link_common(bp, req);
9916
9917         if (set_eee)
9918                 bnxt_hwrm_set_eee(bp, req);
9919         return hwrm_req_send(bp, req);
9920 }
9921
9922 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9923 {
9924         struct hwrm_port_phy_cfg_input *req;
9925         int rc;
9926
9927         if (!BNXT_SINGLE_PF(bp))
9928                 return 0;
9929
9930         if (pci_num_vf(bp->pdev) &&
9931             !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9932                 return 0;
9933
9934         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9935         if (rc)
9936                 return rc;
9937
9938         req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9939         rc = hwrm_req_send(bp, req);
9940         if (!rc) {
9941                 mutex_lock(&bp->link_lock);
9942                 /* Device is not obliged link down in certain scenarios, even
9943                  * when forced. Setting the state unknown is consistent with
9944                  * driver startup and will force link state to be reported
9945                  * during subsequent open based on PORT_PHY_QCFG.
9946                  */
9947                 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
9948                 mutex_unlock(&bp->link_lock);
9949         }
9950         return rc;
9951 }
9952
9953 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9954 {
9955 #ifdef CONFIG_TEE_BNXT_FW
9956         int rc = tee_bnxt_fw_load();
9957
9958         if (rc)
9959                 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9960
9961         return rc;
9962 #else
9963         netdev_err(bp->dev, "OP-TEE not supported\n");
9964         return -ENODEV;
9965 #endif
9966 }
9967
9968 static int bnxt_try_recover_fw(struct bnxt *bp)
9969 {
9970         if (bp->fw_health && bp->fw_health->status_reliable) {
9971                 int retry = 0, rc;
9972                 u32 sts;
9973
9974                 do {
9975                         sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9976                         rc = bnxt_hwrm_poll(bp);
9977                         if (!BNXT_FW_IS_BOOTING(sts) &&
9978                             !BNXT_FW_IS_RECOVERING(sts))
9979                                 break;
9980                         retry++;
9981                 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9982
9983                 if (!BNXT_FW_IS_HEALTHY(sts)) {
9984                         netdev_err(bp->dev,
9985                                    "Firmware not responding, status: 0x%x\n",
9986                                    sts);
9987                         rc = -ENODEV;
9988                 }
9989                 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9990                         netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9991                         return bnxt_fw_reset_via_optee(bp);
9992                 }
9993                 return rc;
9994         }
9995
9996         return -ENODEV;
9997 }
9998
9999 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
10000 {
10001         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10002
10003         if (!BNXT_NEW_RM(bp))
10004                 return; /* no resource reservations required */
10005
10006         hw_resc->resv_cp_rings = 0;
10007         hw_resc->resv_stat_ctxs = 0;
10008         hw_resc->resv_irqs = 0;
10009         hw_resc->resv_tx_rings = 0;
10010         hw_resc->resv_rx_rings = 0;
10011         hw_resc->resv_hw_ring_grps = 0;
10012         hw_resc->resv_vnics = 0;
10013         if (!fw_reset) {
10014                 bp->tx_nr_rings = 0;
10015                 bp->rx_nr_rings = 0;
10016         }
10017 }
10018
10019 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
10020 {
10021         int rc;
10022
10023         if (!BNXT_NEW_RM(bp))
10024                 return 0; /* no resource reservations required */
10025
10026         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
10027         if (rc)
10028                 netdev_err(bp->dev, "resc_qcaps failed\n");
10029
10030         bnxt_clear_reservations(bp, fw_reset);
10031
10032         return rc;
10033 }
10034
10035 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
10036 {
10037         struct hwrm_func_drv_if_change_output *resp;
10038         struct hwrm_func_drv_if_change_input *req;
10039         bool fw_reset = !bp->irq_tbl;
10040         bool resc_reinit = false;
10041         int rc, retry = 0;
10042         u32 flags = 0;
10043
10044         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
10045                 return 0;
10046
10047         rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
10048         if (rc)
10049                 return rc;
10050
10051         if (up)
10052                 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
10053         resp = hwrm_req_hold(bp, req);
10054
10055         hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10056         while (retry < BNXT_FW_IF_RETRY) {
10057                 rc = hwrm_req_send(bp, req);
10058                 if (rc != -EAGAIN)
10059                         break;
10060
10061                 msleep(50);
10062                 retry++;
10063         }
10064
10065         if (rc == -EAGAIN) {
10066                 hwrm_req_drop(bp, req);
10067                 return rc;
10068         } else if (!rc) {
10069                 flags = le32_to_cpu(resp->flags);
10070         } else if (up) {
10071                 rc = bnxt_try_recover_fw(bp);
10072                 fw_reset = true;
10073         }
10074         hwrm_req_drop(bp, req);
10075         if (rc)
10076                 return rc;
10077
10078         if (!up) {
10079                 bnxt_inv_fw_health_reg(bp);
10080                 return 0;
10081         }
10082
10083         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
10084                 resc_reinit = true;
10085         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
10086             test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
10087                 fw_reset = true;
10088         else
10089                 bnxt_remap_fw_health_regs(bp);
10090
10091         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
10092                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
10093                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10094                 return -ENODEV;
10095         }
10096         if (resc_reinit || fw_reset) {
10097                 if (fw_reset) {
10098                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10099                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10100                                 bnxt_ulp_stop(bp);
10101                         bnxt_free_ctx_mem(bp);
10102                         kfree(bp->ctx);
10103                         bp->ctx = NULL;
10104                         bnxt_dcb_free(bp);
10105                         rc = bnxt_fw_init_one(bp);
10106                         if (rc) {
10107                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10108                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10109                                 return rc;
10110                         }
10111                         bnxt_clear_int_mode(bp);
10112                         rc = bnxt_init_int_mode(bp);
10113                         if (rc) {
10114                                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10115                                 netdev_err(bp->dev, "init int mode failed\n");
10116                                 return rc;
10117                         }
10118                 }
10119                 rc = bnxt_cancel_reservations(bp, fw_reset);
10120         }
10121         return rc;
10122 }
10123
10124 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
10125 {
10126         struct hwrm_port_led_qcaps_output *resp;
10127         struct hwrm_port_led_qcaps_input *req;
10128         struct bnxt_pf_info *pf = &bp->pf;
10129         int rc;
10130
10131         bp->num_leds = 0;
10132         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
10133                 return 0;
10134
10135         rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
10136         if (rc)
10137                 return rc;
10138
10139         req->port_id = cpu_to_le16(pf->port_id);
10140         resp = hwrm_req_hold(bp, req);
10141         rc = hwrm_req_send(bp, req);
10142         if (rc) {
10143                 hwrm_req_drop(bp, req);
10144                 return rc;
10145         }
10146         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
10147                 int i;
10148
10149                 bp->num_leds = resp->num_leds;
10150                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
10151                                                  bp->num_leds);
10152                 for (i = 0; i < bp->num_leds; i++) {
10153                         struct bnxt_led_info *led = &bp->leds[i];
10154                         __le16 caps = led->led_state_caps;
10155
10156                         if (!led->led_group_id ||
10157                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
10158                                 bp->num_leds = 0;
10159                                 break;
10160                         }
10161                 }
10162         }
10163         hwrm_req_drop(bp, req);
10164         return 0;
10165 }
10166
10167 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
10168 {
10169         struct hwrm_wol_filter_alloc_output *resp;
10170         struct hwrm_wol_filter_alloc_input *req;
10171         int rc;
10172
10173         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
10174         if (rc)
10175                 return rc;
10176
10177         req->port_id = cpu_to_le16(bp->pf.port_id);
10178         req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
10179         req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
10180         memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
10181
10182         resp = hwrm_req_hold(bp, req);
10183         rc = hwrm_req_send(bp, req);
10184         if (!rc)
10185                 bp->wol_filter_id = resp->wol_filter_id;
10186         hwrm_req_drop(bp, req);
10187         return rc;
10188 }
10189
10190 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
10191 {
10192         struct hwrm_wol_filter_free_input *req;
10193         int rc;
10194
10195         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
10196         if (rc)
10197                 return rc;
10198
10199         req->port_id = cpu_to_le16(bp->pf.port_id);
10200         req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
10201         req->wol_filter_id = bp->wol_filter_id;
10202
10203         return hwrm_req_send(bp, req);
10204 }
10205
10206 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
10207 {
10208         struct hwrm_wol_filter_qcfg_output *resp;
10209         struct hwrm_wol_filter_qcfg_input *req;
10210         u16 next_handle = 0;
10211         int rc;
10212
10213         rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
10214         if (rc)
10215                 return rc;
10216
10217         req->port_id = cpu_to_le16(bp->pf.port_id);
10218         req->handle = cpu_to_le16(handle);
10219         resp = hwrm_req_hold(bp, req);
10220         rc = hwrm_req_send(bp, req);
10221         if (!rc) {
10222                 next_handle = le16_to_cpu(resp->next_handle);
10223                 if (next_handle != 0) {
10224                         if (resp->wol_type ==
10225                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
10226                                 bp->wol = 1;
10227                                 bp->wol_filter_id = resp->wol_filter_id;
10228                         }
10229                 }
10230         }
10231         hwrm_req_drop(bp, req);
10232         return next_handle;
10233 }
10234
10235 static void bnxt_get_wol_settings(struct bnxt *bp)
10236 {
10237         u16 handle = 0;
10238
10239         bp->wol = 0;
10240         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
10241                 return;
10242
10243         do {
10244                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
10245         } while (handle && handle != 0xffff);
10246 }
10247
10248 #ifdef CONFIG_BNXT_HWMON
10249 static ssize_t bnxt_show_temp(struct device *dev,
10250                               struct device_attribute *devattr, char *buf)
10251 {
10252         struct hwrm_temp_monitor_query_output *resp;
10253         struct hwrm_temp_monitor_query_input *req;
10254         struct bnxt *bp = dev_get_drvdata(dev);
10255         u32 len = 0;
10256         int rc;
10257
10258         rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10259         if (rc)
10260                 return rc;
10261         resp = hwrm_req_hold(bp, req);
10262         rc = hwrm_req_send(bp, req);
10263         if (!rc)
10264                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
10265         hwrm_req_drop(bp, req);
10266         if (rc)
10267                 return rc;
10268         return len;
10269 }
10270 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
10271
10272 static struct attribute *bnxt_attrs[] = {
10273         &sensor_dev_attr_temp1_input.dev_attr.attr,
10274         NULL
10275 };
10276 ATTRIBUTE_GROUPS(bnxt);
10277
10278 static void bnxt_hwmon_close(struct bnxt *bp)
10279 {
10280         if (bp->hwmon_dev) {
10281                 hwmon_device_unregister(bp->hwmon_dev);
10282                 bp->hwmon_dev = NULL;
10283         }
10284 }
10285
10286 static void bnxt_hwmon_open(struct bnxt *bp)
10287 {
10288         struct hwrm_temp_monitor_query_input *req;
10289         struct pci_dev *pdev = bp->pdev;
10290         int rc;
10291
10292         rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10293         if (!rc)
10294                 rc = hwrm_req_send_silent(bp, req);
10295         if (rc == -EACCES || rc == -EOPNOTSUPP) {
10296                 bnxt_hwmon_close(bp);
10297                 return;
10298         }
10299
10300         if (bp->hwmon_dev)
10301                 return;
10302
10303         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10304                                                           DRV_MODULE_NAME, bp,
10305                                                           bnxt_groups);
10306         if (IS_ERR(bp->hwmon_dev)) {
10307                 bp->hwmon_dev = NULL;
10308                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10309         }
10310 }
10311 #else
10312 static void bnxt_hwmon_close(struct bnxt *bp)
10313 {
10314 }
10315
10316 static void bnxt_hwmon_open(struct bnxt *bp)
10317 {
10318 }
10319 #endif
10320
10321 static bool bnxt_eee_config_ok(struct bnxt *bp)
10322 {
10323         struct ethtool_eee *eee = &bp->eee;
10324         struct bnxt_link_info *link_info = &bp->link_info;
10325
10326         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10327                 return true;
10328
10329         if (eee->eee_enabled) {
10330                 u32 advertising =
10331                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10332
10333                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10334                         eee->eee_enabled = 0;
10335                         return false;
10336                 }
10337                 if (eee->advertised & ~advertising) {
10338                         eee->advertised = advertising & eee->supported;
10339                         return false;
10340                 }
10341         }
10342         return true;
10343 }
10344
10345 static int bnxt_update_phy_setting(struct bnxt *bp)
10346 {
10347         int rc;
10348         bool update_link = false;
10349         bool update_pause = false;
10350         bool update_eee = false;
10351         struct bnxt_link_info *link_info = &bp->link_info;
10352
10353         rc = bnxt_update_link(bp, true);
10354         if (rc) {
10355                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10356                            rc);
10357                 return rc;
10358         }
10359         if (!BNXT_SINGLE_PF(bp))
10360                 return 0;
10361
10362         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10363             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10364             link_info->req_flow_ctrl)
10365                 update_pause = true;
10366         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10367             link_info->force_pause_setting != link_info->req_flow_ctrl)
10368                 update_pause = true;
10369         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10370                 if (BNXT_AUTO_MODE(link_info->auto_mode))
10371                         update_link = true;
10372                 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10373                     link_info->req_link_speed != link_info->force_link_speed)
10374                         update_link = true;
10375                 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10376                          link_info->req_link_speed != link_info->force_pam4_link_speed)
10377                         update_link = true;
10378                 if (link_info->req_duplex != link_info->duplex_setting)
10379                         update_link = true;
10380         } else {
10381                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10382                         update_link = true;
10383                 if (link_info->advertising != link_info->auto_link_speeds ||
10384                     link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10385                         update_link = true;
10386         }
10387
10388         /* The last close may have shutdown the link, so need to call
10389          * PHY_CFG to bring it back up.
10390          */
10391         if (!BNXT_LINK_IS_UP(bp))
10392                 update_link = true;
10393
10394         if (!bnxt_eee_config_ok(bp))
10395                 update_eee = true;
10396
10397         if (update_link)
10398                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10399         else if (update_pause)
10400                 rc = bnxt_hwrm_set_pause(bp);
10401         if (rc) {
10402                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10403                            rc);
10404                 return rc;
10405         }
10406
10407         return rc;
10408 }
10409
10410 /* Common routine to pre-map certain register block to different GRC window.
10411  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10412  * in PF and 3 windows in VF that can be customized to map in different
10413  * register blocks.
10414  */
10415 static void bnxt_preset_reg_win(struct bnxt *bp)
10416 {
10417         if (BNXT_PF(bp)) {
10418                 /* CAG registers map to GRC window #4 */
10419                 writel(BNXT_CAG_REG_BASE,
10420                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10421         }
10422 }
10423
10424 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10425
10426 static int bnxt_reinit_after_abort(struct bnxt *bp)
10427 {
10428         int rc;
10429
10430         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10431                 return -EBUSY;
10432
10433         if (bp->dev->reg_state == NETREG_UNREGISTERED)
10434                 return -ENODEV;
10435
10436         rc = bnxt_fw_init_one(bp);
10437         if (!rc) {
10438                 bnxt_clear_int_mode(bp);
10439                 rc = bnxt_init_int_mode(bp);
10440                 if (!rc) {
10441                         clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10442                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10443                 }
10444         }
10445         return rc;
10446 }
10447
10448 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10449 {
10450         int rc = 0;
10451
10452         bnxt_preset_reg_win(bp);
10453         netif_carrier_off(bp->dev);
10454         if (irq_re_init) {
10455                 /* Reserve rings now if none were reserved at driver probe. */
10456                 rc = bnxt_init_dflt_ring_mode(bp);
10457                 if (rc) {
10458                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10459                         return rc;
10460                 }
10461         }
10462         rc = bnxt_reserve_rings(bp, irq_re_init);
10463         if (rc)
10464                 return rc;
10465         if ((bp->flags & BNXT_FLAG_RFS) &&
10466             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10467                 /* disable RFS if falling back to INTA */
10468                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10469                 bp->flags &= ~BNXT_FLAG_RFS;
10470         }
10471
10472         rc = bnxt_alloc_mem(bp, irq_re_init);
10473         if (rc) {
10474                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10475                 goto open_err_free_mem;
10476         }
10477
10478         if (irq_re_init) {
10479                 bnxt_init_napi(bp);
10480                 rc = bnxt_request_irq(bp);
10481                 if (rc) {
10482                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10483                         goto open_err_irq;
10484                 }
10485         }
10486
10487         rc = bnxt_init_nic(bp, irq_re_init);
10488         if (rc) {
10489                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10490                 goto open_err_irq;
10491         }
10492
10493         bnxt_enable_napi(bp);
10494         bnxt_debug_dev_init(bp);
10495
10496         if (link_re_init) {
10497                 mutex_lock(&bp->link_lock);
10498                 rc = bnxt_update_phy_setting(bp);
10499                 mutex_unlock(&bp->link_lock);
10500                 if (rc) {
10501                         netdev_warn(bp->dev, "failed to update phy settings\n");
10502                         if (BNXT_SINGLE_PF(bp)) {
10503                                 bp->link_info.phy_retry = true;
10504                                 bp->link_info.phy_retry_expires =
10505                                         jiffies + 5 * HZ;
10506                         }
10507                 }
10508         }
10509
10510         if (irq_re_init)
10511                 udp_tunnel_nic_reset_ntf(bp->dev);
10512
10513         if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
10514                 if (!static_key_enabled(&bnxt_xdp_locking_key))
10515                         static_branch_enable(&bnxt_xdp_locking_key);
10516         } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
10517                 static_branch_disable(&bnxt_xdp_locking_key);
10518         }
10519         set_bit(BNXT_STATE_OPEN, &bp->state);
10520         bnxt_enable_int(bp);
10521         /* Enable TX queues */
10522         bnxt_tx_enable(bp);
10523         mod_timer(&bp->timer, jiffies + bp->current_interval);
10524         /* Poll link status and check for SFP+ module status */
10525         mutex_lock(&bp->link_lock);
10526         bnxt_get_port_module_status(bp);
10527         mutex_unlock(&bp->link_lock);
10528
10529         /* VF-reps may need to be re-opened after the PF is re-opened */
10530         if (BNXT_PF(bp))
10531                 bnxt_vf_reps_open(bp);
10532         bnxt_ptp_init_rtc(bp, true);
10533         bnxt_ptp_cfg_tstamp_filters(bp);
10534         return 0;
10535
10536 open_err_irq:
10537         bnxt_del_napi(bp);
10538
10539 open_err_free_mem:
10540         bnxt_free_skbs(bp);
10541         bnxt_free_irq(bp);
10542         bnxt_free_mem(bp, true);
10543         return rc;
10544 }
10545
10546 /* rtnl_lock held */
10547 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10548 {
10549         int rc = 0;
10550
10551         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10552                 rc = -EIO;
10553         if (!rc)
10554                 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10555         if (rc) {
10556                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10557                 dev_close(bp->dev);
10558         }
10559         return rc;
10560 }
10561
10562 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10563  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
10564  * self tests.
10565  */
10566 int bnxt_half_open_nic(struct bnxt *bp)
10567 {
10568         int rc = 0;
10569
10570         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10571                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10572                 rc = -ENODEV;
10573                 goto half_open_err;
10574         }
10575
10576         rc = bnxt_alloc_mem(bp, true);
10577         if (rc) {
10578                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10579                 goto half_open_err;
10580         }
10581         set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10582         rc = bnxt_init_nic(bp, true);
10583         if (rc) {
10584                 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10585                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10586                 goto half_open_err;
10587         }
10588         return 0;
10589
10590 half_open_err:
10591         bnxt_free_skbs(bp);
10592         bnxt_free_mem(bp, true);
10593         dev_close(bp->dev);
10594         return rc;
10595 }
10596
10597 /* rtnl_lock held, this call can only be made after a previous successful
10598  * call to bnxt_half_open_nic().
10599  */
10600 void bnxt_half_close_nic(struct bnxt *bp)
10601 {
10602         bnxt_hwrm_resource_free(bp, false, true);
10603         bnxt_free_skbs(bp);
10604         bnxt_free_mem(bp, true);
10605         clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10606 }
10607
10608 void bnxt_reenable_sriov(struct bnxt *bp)
10609 {
10610         if (BNXT_PF(bp)) {
10611                 struct bnxt_pf_info *pf = &bp->pf;
10612                 int n = pf->active_vfs;
10613
10614                 if (n)
10615                         bnxt_cfg_hw_sriov(bp, &n, true);
10616         }
10617 }
10618
10619 static int bnxt_open(struct net_device *dev)
10620 {
10621         struct bnxt *bp = netdev_priv(dev);
10622         int rc;
10623
10624         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10625                 rc = bnxt_reinit_after_abort(bp);
10626                 if (rc) {
10627                         if (rc == -EBUSY)
10628                                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10629                         else
10630                                 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10631                         return -ENODEV;
10632                 }
10633         }
10634
10635         rc = bnxt_hwrm_if_change(bp, true);
10636         if (rc)
10637                 return rc;
10638
10639         rc = __bnxt_open_nic(bp, true, true);
10640         if (rc) {
10641                 bnxt_hwrm_if_change(bp, false);
10642         } else {
10643                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10644                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10645                                 bnxt_ulp_start(bp, 0);
10646                                 bnxt_reenable_sriov(bp);
10647                         }
10648                 }
10649                 bnxt_hwmon_open(bp);
10650         }
10651
10652         return rc;
10653 }
10654
10655 static bool bnxt_drv_busy(struct bnxt *bp)
10656 {
10657         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10658                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10659 }
10660
10661 static void bnxt_get_ring_stats(struct bnxt *bp,
10662                                 struct rtnl_link_stats64 *stats);
10663
10664 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10665                              bool link_re_init)
10666 {
10667         /* Close the VF-reps before closing PF */
10668         if (BNXT_PF(bp))
10669                 bnxt_vf_reps_close(bp);
10670
10671         /* Change device state to avoid TX queue wake up's */
10672         bnxt_tx_disable(bp);
10673
10674         clear_bit(BNXT_STATE_OPEN, &bp->state);
10675         smp_mb__after_atomic();
10676         while (bnxt_drv_busy(bp))
10677                 msleep(20);
10678
10679         /* Flush rings and disable interrupts */
10680         bnxt_shutdown_nic(bp, irq_re_init);
10681
10682         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10683
10684         bnxt_debug_dev_exit(bp);
10685         bnxt_disable_napi(bp);
10686         del_timer_sync(&bp->timer);
10687         bnxt_free_skbs(bp);
10688
10689         /* Save ring stats before shutdown */
10690         if (bp->bnapi && irq_re_init) {
10691                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10692                 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
10693         }
10694         if (irq_re_init) {
10695                 bnxt_free_irq(bp);
10696                 bnxt_del_napi(bp);
10697         }
10698         bnxt_free_mem(bp, irq_re_init);
10699 }
10700
10701 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10702 {
10703         int rc = 0;
10704
10705         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10706                 /* If we get here, it means firmware reset is in progress
10707                  * while we are trying to close.  We can safely proceed with
10708                  * the close because we are holding rtnl_lock().  Some firmware
10709                  * messages may fail as we proceed to close.  We set the
10710                  * ABORT_ERR flag here so that the FW reset thread will later
10711                  * abort when it gets the rtnl_lock() and sees the flag.
10712                  */
10713                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10714                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10715         }
10716
10717 #ifdef CONFIG_BNXT_SRIOV
10718         if (bp->sriov_cfg) {
10719                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10720                                                       !bp->sriov_cfg,
10721                                                       BNXT_SRIOV_CFG_WAIT_TMO);
10722                 if (rc)
10723                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10724         }
10725 #endif
10726         __bnxt_close_nic(bp, irq_re_init, link_re_init);
10727         return rc;
10728 }
10729
10730 static int bnxt_close(struct net_device *dev)
10731 {
10732         struct bnxt *bp = netdev_priv(dev);
10733
10734         bnxt_hwmon_close(bp);
10735         bnxt_close_nic(bp, true, true);
10736         bnxt_hwrm_shutdown_link(bp);
10737         bnxt_hwrm_if_change(bp, false);
10738         return 0;
10739 }
10740
10741 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10742                                    u16 *val)
10743 {
10744         struct hwrm_port_phy_mdio_read_output *resp;
10745         struct hwrm_port_phy_mdio_read_input *req;
10746         int rc;
10747
10748         if (bp->hwrm_spec_code < 0x10a00)
10749                 return -EOPNOTSUPP;
10750
10751         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10752         if (rc)
10753                 return rc;
10754
10755         req->port_id = cpu_to_le16(bp->pf.port_id);
10756         req->phy_addr = phy_addr;
10757         req->reg_addr = cpu_to_le16(reg & 0x1f);
10758         if (mdio_phy_id_is_c45(phy_addr)) {
10759                 req->cl45_mdio = 1;
10760                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10761                 req->dev_addr = mdio_phy_id_devad(phy_addr);
10762                 req->reg_addr = cpu_to_le16(reg);
10763         }
10764
10765         resp = hwrm_req_hold(bp, req);
10766         rc = hwrm_req_send(bp, req);
10767         if (!rc)
10768                 *val = le16_to_cpu(resp->reg_data);
10769         hwrm_req_drop(bp, req);
10770         return rc;
10771 }
10772
10773 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10774                                     u16 val)
10775 {
10776         struct hwrm_port_phy_mdio_write_input *req;
10777         int rc;
10778
10779         if (bp->hwrm_spec_code < 0x10a00)
10780                 return -EOPNOTSUPP;
10781
10782         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10783         if (rc)
10784                 return rc;
10785
10786         req->port_id = cpu_to_le16(bp->pf.port_id);
10787         req->phy_addr = phy_addr;
10788         req->reg_addr = cpu_to_le16(reg & 0x1f);
10789         if (mdio_phy_id_is_c45(phy_addr)) {
10790                 req->cl45_mdio = 1;
10791                 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10792                 req->dev_addr = mdio_phy_id_devad(phy_addr);
10793                 req->reg_addr = cpu_to_le16(reg);
10794         }
10795         req->reg_data = cpu_to_le16(val);
10796
10797         return hwrm_req_send(bp, req);
10798 }
10799
10800 /* rtnl_lock held */
10801 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10802 {
10803         struct mii_ioctl_data *mdio = if_mii(ifr);
10804         struct bnxt *bp = netdev_priv(dev);
10805         int rc;
10806
10807         switch (cmd) {
10808         case SIOCGMIIPHY:
10809                 mdio->phy_id = bp->link_info.phy_addr;
10810
10811                 fallthrough;
10812         case SIOCGMIIREG: {
10813                 u16 mii_regval = 0;
10814
10815                 if (!netif_running(dev))
10816                         return -EAGAIN;
10817
10818                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10819                                              &mii_regval);
10820                 mdio->val_out = mii_regval;
10821                 return rc;
10822         }
10823
10824         case SIOCSMIIREG:
10825                 if (!netif_running(dev))
10826                         return -EAGAIN;
10827
10828                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10829                                                 mdio->val_in);
10830
10831         case SIOCSHWTSTAMP:
10832                 return bnxt_hwtstamp_set(dev, ifr);
10833
10834         case SIOCGHWTSTAMP:
10835                 return bnxt_hwtstamp_get(dev, ifr);
10836
10837         default:
10838                 /* do nothing */
10839                 break;
10840         }
10841         return -EOPNOTSUPP;
10842 }
10843
10844 static void bnxt_get_ring_stats(struct bnxt *bp,
10845                                 struct rtnl_link_stats64 *stats)
10846 {
10847         int i;
10848
10849         for (i = 0; i < bp->cp_nr_rings; i++) {
10850                 struct bnxt_napi *bnapi = bp->bnapi[i];
10851                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10852                 u64 *sw = cpr->stats.sw_stats;
10853
10854                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10855                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10856                 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10857
10858                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10859                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10860                 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10861
10862                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10863                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10864                 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10865
10866                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10867                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10868                 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10869
10870                 stats->rx_missed_errors +=
10871                         BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10872
10873                 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10874
10875                 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10876
10877                 stats->rx_dropped +=
10878                         cpr->sw_stats.rx.rx_netpoll_discards +
10879                         cpr->sw_stats.rx.rx_oom_discards;
10880         }
10881 }
10882
10883 static void bnxt_add_prev_stats(struct bnxt *bp,
10884                                 struct rtnl_link_stats64 *stats)
10885 {
10886         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10887
10888         stats->rx_packets += prev_stats->rx_packets;
10889         stats->tx_packets += prev_stats->tx_packets;
10890         stats->rx_bytes += prev_stats->rx_bytes;
10891         stats->tx_bytes += prev_stats->tx_bytes;
10892         stats->rx_missed_errors += prev_stats->rx_missed_errors;
10893         stats->multicast += prev_stats->multicast;
10894         stats->rx_dropped += prev_stats->rx_dropped;
10895         stats->tx_dropped += prev_stats->tx_dropped;
10896 }
10897
10898 static void
10899 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10900 {
10901         struct bnxt *bp = netdev_priv(dev);
10902
10903         set_bit(BNXT_STATE_READ_STATS, &bp->state);
10904         /* Make sure bnxt_close_nic() sees that we are reading stats before
10905          * we check the BNXT_STATE_OPEN flag.
10906          */
10907         smp_mb__after_atomic();
10908         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10909                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10910                 *stats = bp->net_stats_prev;
10911                 return;
10912         }
10913
10914         bnxt_get_ring_stats(bp, stats);
10915         bnxt_add_prev_stats(bp, stats);
10916
10917         if (bp->flags & BNXT_FLAG_PORT_STATS) {
10918                 u64 *rx = bp->port_stats.sw_stats;
10919                 u64 *tx = bp->port_stats.sw_stats +
10920                           BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10921
10922                 stats->rx_crc_errors =
10923                         BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10924                 stats->rx_frame_errors =
10925                         BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10926                 stats->rx_length_errors =
10927                         BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10928                         BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10929                         BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10930                 stats->rx_errors =
10931                         BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10932                         BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10933                 stats->collisions =
10934                         BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10935                 stats->tx_fifo_errors =
10936                         BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10937                 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10938         }
10939         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10940 }
10941
10942 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
10943                                         struct bnxt_total_ring_err_stats *stats,
10944                                         struct bnxt_cp_ring_info *cpr)
10945 {
10946         struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
10947         u64 *hw_stats = cpr->stats.sw_stats;
10948
10949         stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
10950         stats->rx_total_resets += sw_stats->rx.rx_resets;
10951         stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
10952         stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
10953         stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
10954         stats->rx_total_ring_discards +=
10955                 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
10956         stats->tx_total_resets += sw_stats->tx.tx_resets;
10957         stats->tx_total_ring_discards +=
10958                 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
10959         stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
10960 }
10961
10962 void bnxt_get_ring_err_stats(struct bnxt *bp,
10963                              struct bnxt_total_ring_err_stats *stats)
10964 {
10965         int i;
10966
10967         for (i = 0; i < bp->cp_nr_rings; i++)
10968                 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
10969 }
10970
10971 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10972 {
10973         struct net_device *dev = bp->dev;
10974         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10975         struct netdev_hw_addr *ha;
10976         u8 *haddr;
10977         int mc_count = 0;
10978         bool update = false;
10979         int off = 0;
10980
10981         netdev_for_each_mc_addr(ha, dev) {
10982                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10983                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10984                         vnic->mc_list_count = 0;
10985                         return false;
10986                 }
10987                 haddr = ha->addr;
10988                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10989                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10990                         update = true;
10991                 }
10992                 off += ETH_ALEN;
10993                 mc_count++;
10994         }
10995         if (mc_count)
10996                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10997
10998         if (mc_count != vnic->mc_list_count) {
10999                 vnic->mc_list_count = mc_count;
11000                 update = true;
11001         }
11002         return update;
11003 }
11004
11005 static bool bnxt_uc_list_updated(struct bnxt *bp)
11006 {
11007         struct net_device *dev = bp->dev;
11008         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11009         struct netdev_hw_addr *ha;
11010         int off = 0;
11011
11012         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
11013                 return true;
11014
11015         netdev_for_each_uc_addr(ha, dev) {
11016                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
11017                         return true;
11018
11019                 off += ETH_ALEN;
11020         }
11021         return false;
11022 }
11023
11024 static void bnxt_set_rx_mode(struct net_device *dev)
11025 {
11026         struct bnxt *bp = netdev_priv(dev);
11027         struct bnxt_vnic_info *vnic;
11028         bool mc_update = false;
11029         bool uc_update;
11030         u32 mask;
11031
11032         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
11033                 return;
11034
11035         vnic = &bp->vnic_info[0];
11036         mask = vnic->rx_mask;
11037         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
11038                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
11039                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
11040                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
11041
11042         if (dev->flags & IFF_PROMISC)
11043                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11044
11045         uc_update = bnxt_uc_list_updated(bp);
11046
11047         if (dev->flags & IFF_BROADCAST)
11048                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11049         if (dev->flags & IFF_ALLMULTI) {
11050                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11051                 vnic->mc_list_count = 0;
11052         } else if (dev->flags & IFF_MULTICAST) {
11053                 mc_update = bnxt_mc_list_updated(bp, &mask);
11054         }
11055
11056         if (mask != vnic->rx_mask || uc_update || mc_update) {
11057                 vnic->rx_mask = mask;
11058
11059                 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
11060         }
11061 }
11062
11063 static int bnxt_cfg_rx_mode(struct bnxt *bp)
11064 {
11065         struct net_device *dev = bp->dev;
11066         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11067         struct hwrm_cfa_l2_filter_free_input *req;
11068         struct netdev_hw_addr *ha;
11069         int i, off = 0, rc;
11070         bool uc_update;
11071
11072         netif_addr_lock_bh(dev);
11073         uc_update = bnxt_uc_list_updated(bp);
11074         netif_addr_unlock_bh(dev);
11075
11076         if (!uc_update)
11077                 goto skip_uc;
11078
11079         rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
11080         if (rc)
11081                 return rc;
11082         hwrm_req_hold(bp, req);
11083         for (i = 1; i < vnic->uc_filter_count; i++) {
11084                 req->l2_filter_id = vnic->fw_l2_filter_id[i];
11085
11086                 rc = hwrm_req_send(bp, req);
11087         }
11088         hwrm_req_drop(bp, req);
11089
11090         vnic->uc_filter_count = 1;
11091
11092         netif_addr_lock_bh(dev);
11093         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
11094                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11095         } else {
11096                 netdev_for_each_uc_addr(ha, dev) {
11097                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
11098                         off += ETH_ALEN;
11099                         vnic->uc_filter_count++;
11100                 }
11101         }
11102         netif_addr_unlock_bh(dev);
11103
11104         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
11105                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
11106                 if (rc) {
11107                         if (BNXT_VF(bp) && rc == -ENODEV) {
11108                                 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11109                                         netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
11110                                 else
11111                                         netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
11112                                 rc = 0;
11113                         } else {
11114                                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11115                         }
11116                         vnic->uc_filter_count = i;
11117                         return rc;
11118                 }
11119         }
11120         if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11121                 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
11122
11123 skip_uc:
11124         if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
11125             !bnxt_promisc_ok(bp))
11126                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11127         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11128         if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
11129                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
11130                             rc);
11131                 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
11132                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11133                 vnic->mc_list_count = 0;
11134                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11135         }
11136         if (rc)
11137                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
11138                            rc);
11139
11140         return rc;
11141 }
11142
11143 static bool bnxt_can_reserve_rings(struct bnxt *bp)
11144 {
11145 #ifdef CONFIG_BNXT_SRIOV
11146         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
11147                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11148
11149                 /* No minimum rings were provisioned by the PF.  Don't
11150                  * reserve rings by default when device is down.
11151                  */
11152                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
11153                         return true;
11154
11155                 if (!netif_running(bp->dev))
11156                         return false;
11157         }
11158 #endif
11159         return true;
11160 }
11161
11162 /* If the chip and firmware supports RFS */
11163 static bool bnxt_rfs_supported(struct bnxt *bp)
11164 {
11165         if (bp->flags & BNXT_FLAG_CHIP_P5) {
11166                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
11167                         return true;
11168                 return false;
11169         }
11170         /* 212 firmware is broken for aRFS */
11171         if (BNXT_FW_MAJ(bp) == 212)
11172                 return false;
11173         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
11174                 return true;
11175         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11176                 return true;
11177         return false;
11178 }
11179
11180 /* If runtime conditions support RFS */
11181 static bool bnxt_rfs_capable(struct bnxt *bp)
11182 {
11183 #ifdef CONFIG_RFS_ACCEL
11184         int vnics, max_vnics, max_rss_ctxs;
11185
11186         if (bp->flags & BNXT_FLAG_CHIP_P5)
11187                 return bnxt_rfs_supported(bp);
11188         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
11189                 return false;
11190
11191         vnics = 1 + bp->rx_nr_rings;
11192         max_vnics = bnxt_get_max_func_vnics(bp);
11193         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
11194
11195         /* RSS contexts not a limiting factor */
11196         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11197                 max_rss_ctxs = max_vnics;
11198         if (vnics > max_vnics || vnics > max_rss_ctxs) {
11199                 if (bp->rx_nr_rings > 1)
11200                         netdev_warn(bp->dev,
11201                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
11202                                     min(max_rss_ctxs - 1, max_vnics - 1));
11203                 return false;
11204         }
11205
11206         if (!BNXT_NEW_RM(bp))
11207                 return true;
11208
11209         if (vnics == bp->hw_resc.resv_vnics)
11210                 return true;
11211
11212         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
11213         if (vnics <= bp->hw_resc.resv_vnics)
11214                 return true;
11215
11216         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
11217         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
11218         return false;
11219 #else
11220         return false;
11221 #endif
11222 }
11223
11224 static netdev_features_t bnxt_fix_features(struct net_device *dev,
11225                                            netdev_features_t features)
11226 {
11227         struct bnxt *bp = netdev_priv(dev);
11228         netdev_features_t vlan_features;
11229
11230         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
11231                 features &= ~NETIF_F_NTUPLE;
11232
11233         if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
11234                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11235
11236         if (!(features & NETIF_F_GRO))
11237                 features &= ~NETIF_F_GRO_HW;
11238
11239         if (features & NETIF_F_GRO_HW)
11240                 features &= ~NETIF_F_LRO;
11241
11242         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
11243          * turned on or off together.
11244          */
11245         vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
11246         if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
11247                 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11248                         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11249                 else if (vlan_features)
11250                         features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
11251         }
11252 #ifdef CONFIG_BNXT_SRIOV
11253         if (BNXT_VF(bp) && bp->vf.vlan)
11254                 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11255 #endif
11256         return features;
11257 }
11258
11259 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
11260 {
11261         struct bnxt *bp = netdev_priv(dev);
11262         u32 flags = bp->flags;
11263         u32 changes;
11264         int rc = 0;
11265         bool re_init = false;
11266         bool update_tpa = false;
11267
11268         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
11269         if (features & NETIF_F_GRO_HW)
11270                 flags |= BNXT_FLAG_GRO;
11271         else if (features & NETIF_F_LRO)
11272                 flags |= BNXT_FLAG_LRO;
11273
11274         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11275                 flags &= ~BNXT_FLAG_TPA;
11276
11277         if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11278                 flags |= BNXT_FLAG_STRIP_VLAN;
11279
11280         if (features & NETIF_F_NTUPLE)
11281                 flags |= BNXT_FLAG_RFS;
11282
11283         changes = flags ^ bp->flags;
11284         if (changes & BNXT_FLAG_TPA) {
11285                 update_tpa = true;
11286                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
11287                     (flags & BNXT_FLAG_TPA) == 0 ||
11288                     (bp->flags & BNXT_FLAG_CHIP_P5))
11289                         re_init = true;
11290         }
11291
11292         if (changes & ~BNXT_FLAG_TPA)
11293                 re_init = true;
11294
11295         if (flags != bp->flags) {
11296                 u32 old_flags = bp->flags;
11297
11298                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11299                         bp->flags = flags;
11300                         if (update_tpa)
11301                                 bnxt_set_ring_params(bp);
11302                         return rc;
11303                 }
11304
11305                 if (re_init) {
11306                         bnxt_close_nic(bp, false, false);
11307                         bp->flags = flags;
11308                         if (update_tpa)
11309                                 bnxt_set_ring_params(bp);
11310
11311                         return bnxt_open_nic(bp, false, false);
11312                 }
11313                 if (update_tpa) {
11314                         bp->flags = flags;
11315                         rc = bnxt_set_tpa(bp,
11316                                           (flags & BNXT_FLAG_TPA) ?
11317                                           true : false);
11318                         if (rc)
11319                                 bp->flags = old_flags;
11320                 }
11321         }
11322         return rc;
11323 }
11324
11325 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
11326                               u8 **nextp)
11327 {
11328         struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
11329         struct hop_jumbo_hdr *jhdr;
11330         int hdr_count = 0;
11331         u8 *nexthdr;
11332         int start;
11333
11334         /* Check that there are at most 2 IPv6 extension headers, no
11335          * fragment header, and each is <= 64 bytes.
11336          */
11337         start = nw_off + sizeof(*ip6h);
11338         nexthdr = &ip6h->nexthdr;
11339         while (ipv6_ext_hdr(*nexthdr)) {
11340                 struct ipv6_opt_hdr *hp;
11341                 int hdrlen;
11342
11343                 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11344                     *nexthdr == NEXTHDR_FRAGMENT)
11345                         return false;
11346                 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11347                                           skb_headlen(skb), NULL);
11348                 if (!hp)
11349                         return false;
11350                 if (*nexthdr == NEXTHDR_AUTH)
11351                         hdrlen = ipv6_authlen(hp);
11352                 else
11353                         hdrlen = ipv6_optlen(hp);
11354
11355                 if (hdrlen > 64)
11356                         return false;
11357
11358                 /* The ext header may be a hop-by-hop header inserted for
11359                  * big TCP purposes. This will be removed before sending
11360                  * from NIC, so do not count it.
11361                  */
11362                 if (*nexthdr == NEXTHDR_HOP) {
11363                         if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
11364                                 goto increment_hdr;
11365
11366                         jhdr = (struct hop_jumbo_hdr *)hp;
11367                         if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
11368                             jhdr->nexthdr != IPPROTO_TCP)
11369                                 goto increment_hdr;
11370
11371                         goto next_hdr;
11372                 }
11373 increment_hdr:
11374                 hdr_count++;
11375 next_hdr:
11376                 nexthdr = &hp->nexthdr;
11377                 start += hdrlen;
11378         }
11379         if (nextp) {
11380                 /* Caller will check inner protocol */
11381                 if (skb->encapsulation) {
11382                         *nextp = nexthdr;
11383                         return true;
11384                 }
11385                 *nextp = NULL;
11386         }
11387         /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11388         return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11389 }
11390
11391 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11392 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11393 {
11394         struct udphdr *uh = udp_hdr(skb);
11395         __be16 udp_port = uh->dest;
11396
11397         if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11398                 return false;
11399         if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11400                 struct ethhdr *eh = inner_eth_hdr(skb);
11401
11402                 switch (eh->h_proto) {
11403                 case htons(ETH_P_IP):
11404                         return true;
11405                 case htons(ETH_P_IPV6):
11406                         return bnxt_exthdr_check(bp, skb,
11407                                                  skb_inner_network_offset(skb),
11408                                                  NULL);
11409                 }
11410         }
11411         return false;
11412 }
11413
11414 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11415 {
11416         switch (l4_proto) {
11417         case IPPROTO_UDP:
11418                 return bnxt_udp_tunl_check(bp, skb);
11419         case IPPROTO_IPIP:
11420                 return true;
11421         case IPPROTO_GRE: {
11422                 switch (skb->inner_protocol) {
11423                 default:
11424                         return false;
11425                 case htons(ETH_P_IP):
11426                         return true;
11427                 case htons(ETH_P_IPV6):
11428                         fallthrough;
11429                 }
11430         }
11431         case IPPROTO_IPV6:
11432                 /* Check ext headers of inner ipv6 */
11433                 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11434                                          NULL);
11435         }
11436         return false;
11437 }
11438
11439 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11440                                              struct net_device *dev,
11441                                              netdev_features_t features)
11442 {
11443         struct bnxt *bp = netdev_priv(dev);
11444         u8 *l4_proto;
11445
11446         features = vlan_features_check(skb, features);
11447         switch (vlan_get_protocol(skb)) {
11448         case htons(ETH_P_IP):
11449                 if (!skb->encapsulation)
11450                         return features;
11451                 l4_proto = &ip_hdr(skb)->protocol;
11452                 if (bnxt_tunl_check(bp, skb, *l4_proto))
11453                         return features;
11454                 break;
11455         case htons(ETH_P_IPV6):
11456                 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11457                                        &l4_proto))
11458                         break;
11459                 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11460                         return features;
11461                 break;
11462         }
11463         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11464 }
11465
11466 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11467                          u32 *reg_buf)
11468 {
11469         struct hwrm_dbg_read_direct_output *resp;
11470         struct hwrm_dbg_read_direct_input *req;
11471         __le32 *dbg_reg_buf;
11472         dma_addr_t mapping;
11473         int rc, i;
11474
11475         rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11476         if (rc)
11477                 return rc;
11478
11479         dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11480                                          &mapping);
11481         if (!dbg_reg_buf) {
11482                 rc = -ENOMEM;
11483                 goto dbg_rd_reg_exit;
11484         }
11485
11486         req->host_dest_addr = cpu_to_le64(mapping);
11487
11488         resp = hwrm_req_hold(bp, req);
11489         req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11490         req->read_len32 = cpu_to_le32(num_words);
11491
11492         rc = hwrm_req_send(bp, req);
11493         if (rc || resp->error_code) {
11494                 rc = -EIO;
11495                 goto dbg_rd_reg_exit;
11496         }
11497         for (i = 0; i < num_words; i++)
11498                 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11499
11500 dbg_rd_reg_exit:
11501         hwrm_req_drop(bp, req);
11502         return rc;
11503 }
11504
11505 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11506                                        u32 ring_id, u32 *prod, u32 *cons)
11507 {
11508         struct hwrm_dbg_ring_info_get_output *resp;
11509         struct hwrm_dbg_ring_info_get_input *req;
11510         int rc;
11511
11512         rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11513         if (rc)
11514                 return rc;
11515
11516         req->ring_type = ring_type;
11517         req->fw_ring_id = cpu_to_le32(ring_id);
11518         resp = hwrm_req_hold(bp, req);
11519         rc = hwrm_req_send(bp, req);
11520         if (!rc) {
11521                 *prod = le32_to_cpu(resp->producer_index);
11522                 *cons = le32_to_cpu(resp->consumer_index);
11523         }
11524         hwrm_req_drop(bp, req);
11525         return rc;
11526 }
11527
11528 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11529 {
11530         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11531         int i = bnapi->index;
11532
11533         if (!txr)
11534                 return;
11535
11536         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11537                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11538                     txr->tx_cons);
11539 }
11540
11541 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11542 {
11543         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11544         int i = bnapi->index;
11545
11546         if (!rxr)
11547                 return;
11548
11549         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11550                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11551                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11552                     rxr->rx_sw_agg_prod);
11553 }
11554
11555 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11556 {
11557         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11558         int i = bnapi->index;
11559
11560         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11561                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11562 }
11563
11564 static void bnxt_dbg_dump_states(struct bnxt *bp)
11565 {
11566         int i;
11567         struct bnxt_napi *bnapi;
11568
11569         for (i = 0; i < bp->cp_nr_rings; i++) {
11570                 bnapi = bp->bnapi[i];
11571                 if (netif_msg_drv(bp)) {
11572                         bnxt_dump_tx_sw_state(bnapi);
11573                         bnxt_dump_rx_sw_state(bnapi);
11574                         bnxt_dump_cp_sw_state(bnapi);
11575                 }
11576         }
11577 }
11578
11579 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11580 {
11581         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11582         struct hwrm_ring_reset_input *req;
11583         struct bnxt_napi *bnapi = rxr->bnapi;
11584         struct bnxt_cp_ring_info *cpr;
11585         u16 cp_ring_id;
11586         int rc;
11587
11588         rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11589         if (rc)
11590                 return rc;
11591
11592         cpr = &bnapi->cp_ring;
11593         cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11594         req->cmpl_ring = cpu_to_le16(cp_ring_id);
11595         req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11596         req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11597         return hwrm_req_send_silent(bp, req);
11598 }
11599
11600 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11601 {
11602         if (!silent)
11603                 bnxt_dbg_dump_states(bp);
11604         if (netif_running(bp->dev)) {
11605                 int rc;
11606
11607                 if (silent) {
11608                         bnxt_close_nic(bp, false, false);
11609                         bnxt_open_nic(bp, false, false);
11610                 } else {
11611                         bnxt_ulp_stop(bp);
11612                         bnxt_close_nic(bp, true, false);
11613                         rc = bnxt_open_nic(bp, true, false);
11614                         bnxt_ulp_start(bp, rc);
11615                 }
11616         }
11617 }
11618
11619 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11620 {
11621         struct bnxt *bp = netdev_priv(dev);
11622
11623         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
11624         bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
11625 }
11626
11627 static void bnxt_fw_health_check(struct bnxt *bp)
11628 {
11629         struct bnxt_fw_health *fw_health = bp->fw_health;
11630         struct pci_dev *pdev = bp->pdev;
11631         u32 val;
11632
11633         if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11634                 return;
11635
11636         /* Make sure it is enabled before checking the tmr_counter. */
11637         smp_rmb();
11638         if (fw_health->tmr_counter) {
11639                 fw_health->tmr_counter--;
11640                 return;
11641         }
11642
11643         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11644         if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
11645                 fw_health->arrests++;
11646                 goto fw_reset;
11647         }
11648
11649         fw_health->last_fw_heartbeat = val;
11650
11651         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11652         if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
11653                 fw_health->discoveries++;
11654                 goto fw_reset;
11655         }
11656
11657         fw_health->tmr_counter = fw_health->tmr_multiplier;
11658         return;
11659
11660 fw_reset:
11661         bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
11662 }
11663
11664 static void bnxt_timer(struct timer_list *t)
11665 {
11666         struct bnxt *bp = from_timer(bp, t, timer);
11667         struct net_device *dev = bp->dev;
11668
11669         if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11670                 return;
11671
11672         if (atomic_read(&bp->intr_sem) != 0)
11673                 goto bnxt_restart_timer;
11674
11675         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11676                 bnxt_fw_health_check(bp);
11677
11678         if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
11679                 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
11680
11681         if (bnxt_tc_flower_enabled(bp))
11682                 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
11683
11684 #ifdef CONFIG_RFS_ACCEL
11685         if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
11686                 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
11687 #endif /*CONFIG_RFS_ACCEL*/
11688
11689         if (bp->link_info.phy_retry) {
11690                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11691                         bp->link_info.phy_retry = false;
11692                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11693                 } else {
11694                         bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
11695                 }
11696         }
11697
11698         if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11699                 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
11700
11701         if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11702             netif_carrier_ok(dev))
11703                 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
11704
11705 bnxt_restart_timer:
11706         mod_timer(&bp->timer, jiffies + bp->current_interval);
11707 }
11708
11709 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11710 {
11711         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11712          * set.  If the device is being closed, bnxt_close() may be holding
11713          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
11714          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11715          */
11716         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11717         rtnl_lock();
11718 }
11719
11720 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11721 {
11722         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11723         rtnl_unlock();
11724 }
11725
11726 /* Only called from bnxt_sp_task() */
11727 static void bnxt_reset(struct bnxt *bp, bool silent)
11728 {
11729         bnxt_rtnl_lock_sp(bp);
11730         if (test_bit(BNXT_STATE_OPEN, &bp->state))
11731                 bnxt_reset_task(bp, silent);
11732         bnxt_rtnl_unlock_sp(bp);
11733 }
11734
11735 /* Only called from bnxt_sp_task() */
11736 static void bnxt_rx_ring_reset(struct bnxt *bp)
11737 {
11738         int i;
11739
11740         bnxt_rtnl_lock_sp(bp);
11741         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11742                 bnxt_rtnl_unlock_sp(bp);
11743                 return;
11744         }
11745         /* Disable and flush TPA before resetting the RX ring */
11746         if (bp->flags & BNXT_FLAG_TPA)
11747                 bnxt_set_tpa(bp, false);
11748         for (i = 0; i < bp->rx_nr_rings; i++) {
11749                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11750                 struct bnxt_cp_ring_info *cpr;
11751                 int rc;
11752
11753                 if (!rxr->bnapi->in_reset)
11754                         continue;
11755
11756                 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11757                 if (rc) {
11758                         if (rc == -EINVAL || rc == -EOPNOTSUPP)
11759                                 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11760                         else
11761                                 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11762                                             rc);
11763                         bnxt_reset_task(bp, true);
11764                         break;
11765                 }
11766                 bnxt_free_one_rx_ring_skbs(bp, i);
11767                 rxr->rx_prod = 0;
11768                 rxr->rx_agg_prod = 0;
11769                 rxr->rx_sw_agg_prod = 0;
11770                 rxr->rx_next_cons = 0;
11771                 rxr->bnapi->in_reset = false;
11772                 bnxt_alloc_one_rx_ring(bp, i);
11773                 cpr = &rxr->bnapi->cp_ring;
11774                 cpr->sw_stats.rx.rx_resets++;
11775                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11776                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11777                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11778         }
11779         if (bp->flags & BNXT_FLAG_TPA)
11780                 bnxt_set_tpa(bp, true);
11781         bnxt_rtnl_unlock_sp(bp);
11782 }
11783
11784 static void bnxt_fw_reset_close(struct bnxt *bp)
11785 {
11786         bnxt_ulp_stop(bp);
11787         /* When firmware is in fatal state, quiesce device and disable
11788          * bus master to prevent any potential bad DMAs before freeing
11789          * kernel memory.
11790          */
11791         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11792                 u16 val = 0;
11793
11794                 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11795                 if (val == 0xffff)
11796                         bp->fw_reset_min_dsecs = 0;
11797                 bnxt_tx_disable(bp);
11798                 bnxt_disable_napi(bp);
11799                 bnxt_disable_int_sync(bp);
11800                 bnxt_free_irq(bp);
11801                 bnxt_clear_int_mode(bp);
11802                 pci_disable_device(bp->pdev);
11803         }
11804         __bnxt_close_nic(bp, true, false);
11805         bnxt_vf_reps_free(bp);
11806         bnxt_clear_int_mode(bp);
11807         bnxt_hwrm_func_drv_unrgtr(bp);
11808         if (pci_is_enabled(bp->pdev))
11809                 pci_disable_device(bp->pdev);
11810         bnxt_free_ctx_mem(bp);
11811         kfree(bp->ctx);
11812         bp->ctx = NULL;
11813 }
11814
11815 static bool is_bnxt_fw_ok(struct bnxt *bp)
11816 {
11817         struct bnxt_fw_health *fw_health = bp->fw_health;
11818         bool no_heartbeat = false, has_reset = false;
11819         u32 val;
11820
11821         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11822         if (val == fw_health->last_fw_heartbeat)
11823                 no_heartbeat = true;
11824
11825         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11826         if (val != fw_health->last_fw_reset_cnt)
11827                 has_reset = true;
11828
11829         if (!no_heartbeat && has_reset)
11830                 return true;
11831
11832         return false;
11833 }
11834
11835 /* rtnl_lock is acquired before calling this function */
11836 static void bnxt_force_fw_reset(struct bnxt *bp)
11837 {
11838         struct bnxt_fw_health *fw_health = bp->fw_health;
11839         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11840         u32 wait_dsecs;
11841
11842         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11843             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11844                 return;
11845
11846         if (ptp) {
11847                 spin_lock_bh(&ptp->ptp_lock);
11848                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11849                 spin_unlock_bh(&ptp->ptp_lock);
11850         } else {
11851                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11852         }
11853         bnxt_fw_reset_close(bp);
11854         wait_dsecs = fw_health->master_func_wait_dsecs;
11855         if (fw_health->primary) {
11856                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11857                         wait_dsecs = 0;
11858                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11859         } else {
11860                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11861                 wait_dsecs = fw_health->normal_func_wait_dsecs;
11862                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11863         }
11864
11865         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11866         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11867         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11868 }
11869
11870 void bnxt_fw_exception(struct bnxt *bp)
11871 {
11872         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11873         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11874         bnxt_rtnl_lock_sp(bp);
11875         bnxt_force_fw_reset(bp);
11876         bnxt_rtnl_unlock_sp(bp);
11877 }
11878
11879 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11880  * < 0 on error.
11881  */
11882 static int bnxt_get_registered_vfs(struct bnxt *bp)
11883 {
11884 #ifdef CONFIG_BNXT_SRIOV
11885         int rc;
11886
11887         if (!BNXT_PF(bp))
11888                 return 0;
11889
11890         rc = bnxt_hwrm_func_qcfg(bp);
11891         if (rc) {
11892                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11893                 return rc;
11894         }
11895         if (bp->pf.registered_vfs)
11896                 return bp->pf.registered_vfs;
11897         if (bp->sriov_cfg)
11898                 return 1;
11899 #endif
11900         return 0;
11901 }
11902
11903 void bnxt_fw_reset(struct bnxt *bp)
11904 {
11905         bnxt_rtnl_lock_sp(bp);
11906         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11907             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11908                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11909                 int n = 0, tmo;
11910
11911                 if (ptp) {
11912                         spin_lock_bh(&ptp->ptp_lock);
11913                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11914                         spin_unlock_bh(&ptp->ptp_lock);
11915                 } else {
11916                         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11917                 }
11918                 if (bp->pf.active_vfs &&
11919                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11920                         n = bnxt_get_registered_vfs(bp);
11921                 if (n < 0) {
11922                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11923                                    n);
11924                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11925                         dev_close(bp->dev);
11926                         goto fw_reset_exit;
11927                 } else if (n > 0) {
11928                         u16 vf_tmo_dsecs = n * 10;
11929
11930                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11931                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11932                         bp->fw_reset_state =
11933                                 BNXT_FW_RESET_STATE_POLL_VF;
11934                         bnxt_queue_fw_reset_work(bp, HZ / 10);
11935                         goto fw_reset_exit;
11936                 }
11937                 bnxt_fw_reset_close(bp);
11938                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11939                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11940                         tmo = HZ / 10;
11941                 } else {
11942                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11943                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
11944                 }
11945                 bnxt_queue_fw_reset_work(bp, tmo);
11946         }
11947 fw_reset_exit:
11948         bnxt_rtnl_unlock_sp(bp);
11949 }
11950
11951 static void bnxt_chk_missed_irq(struct bnxt *bp)
11952 {
11953         int i;
11954
11955         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11956                 return;
11957
11958         for (i = 0; i < bp->cp_nr_rings; i++) {
11959                 struct bnxt_napi *bnapi = bp->bnapi[i];
11960                 struct bnxt_cp_ring_info *cpr;
11961                 u32 fw_ring_id;
11962                 int j;
11963
11964                 if (!bnapi)
11965                         continue;
11966
11967                 cpr = &bnapi->cp_ring;
11968                 for (j = 0; j < 2; j++) {
11969                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11970                         u32 val[2];
11971
11972                         if (!cpr2 || cpr2->has_more_work ||
11973                             !bnxt_has_work(bp, cpr2))
11974                                 continue;
11975
11976                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11977                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11978                                 continue;
11979                         }
11980                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11981                         bnxt_dbg_hwrm_ring_info_get(bp,
11982                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11983                                 fw_ring_id, &val[0], &val[1]);
11984                         cpr->sw_stats.cmn.missed_irqs++;
11985                 }
11986         }
11987 }
11988
11989 static void bnxt_cfg_ntp_filters(struct bnxt *);
11990
11991 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11992 {
11993         struct bnxt_link_info *link_info = &bp->link_info;
11994
11995         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11996                 link_info->autoneg = BNXT_AUTONEG_SPEED;
11997                 if (bp->hwrm_spec_code >= 0x10201) {
11998                         if (link_info->auto_pause_setting &
11999                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
12000                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
12001                 } else {
12002                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
12003                 }
12004                 link_info->advertising = link_info->auto_link_speeds;
12005                 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
12006         } else {
12007                 link_info->req_link_speed = link_info->force_link_speed;
12008                 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
12009                 if (link_info->force_pam4_link_speed) {
12010                         link_info->req_link_speed =
12011                                 link_info->force_pam4_link_speed;
12012                         link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
12013                 }
12014                 link_info->req_duplex = link_info->duplex_setting;
12015         }
12016         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
12017                 link_info->req_flow_ctrl =
12018                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
12019         else
12020                 link_info->req_flow_ctrl = link_info->force_pause_setting;
12021 }
12022
12023 static void bnxt_fw_echo_reply(struct bnxt *bp)
12024 {
12025         struct bnxt_fw_health *fw_health = bp->fw_health;
12026         struct hwrm_func_echo_response_input *req;
12027         int rc;
12028
12029         rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
12030         if (rc)
12031                 return;
12032         req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
12033         req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
12034         hwrm_req_send(bp, req);
12035 }
12036
12037 static void bnxt_sp_task(struct work_struct *work)
12038 {
12039         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
12040
12041         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12042         smp_mb__after_atomic();
12043         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12044                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12045                 return;
12046         }
12047
12048         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
12049                 bnxt_cfg_rx_mode(bp);
12050
12051         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
12052                 bnxt_cfg_ntp_filters(bp);
12053         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
12054                 bnxt_hwrm_exec_fwd_req(bp);
12055         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
12056                 bnxt_hwrm_port_qstats(bp, 0);
12057                 bnxt_hwrm_port_qstats_ext(bp, 0);
12058                 bnxt_accumulate_all_stats(bp);
12059         }
12060
12061         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
12062                 int rc;
12063
12064                 mutex_lock(&bp->link_lock);
12065                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
12066                                        &bp->sp_event))
12067                         bnxt_hwrm_phy_qcaps(bp);
12068
12069                 rc = bnxt_update_link(bp, true);
12070                 if (rc)
12071                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
12072                                    rc);
12073
12074                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
12075                                        &bp->sp_event))
12076                         bnxt_init_ethtool_link_settings(bp);
12077                 mutex_unlock(&bp->link_lock);
12078         }
12079         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
12080                 int rc;
12081
12082                 mutex_lock(&bp->link_lock);
12083                 rc = bnxt_update_phy_setting(bp);
12084                 mutex_unlock(&bp->link_lock);
12085                 if (rc) {
12086                         netdev_warn(bp->dev, "update phy settings retry failed\n");
12087                 } else {
12088                         bp->link_info.phy_retry = false;
12089                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
12090                 }
12091         }
12092         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
12093                 mutex_lock(&bp->link_lock);
12094                 bnxt_get_port_module_status(bp);
12095                 mutex_unlock(&bp->link_lock);
12096         }
12097
12098         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
12099                 bnxt_tc_flow_stats_work(bp);
12100
12101         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
12102                 bnxt_chk_missed_irq(bp);
12103
12104         if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
12105                 bnxt_fw_echo_reply(bp);
12106
12107         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
12108          * must be the last functions to be called before exiting.
12109          */
12110         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
12111                 bnxt_reset(bp, false);
12112
12113         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
12114                 bnxt_reset(bp, true);
12115
12116         if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
12117                 bnxt_rx_ring_reset(bp);
12118
12119         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
12120                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
12121                     test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
12122                         bnxt_devlink_health_fw_report(bp);
12123                 else
12124                         bnxt_fw_reset(bp);
12125         }
12126
12127         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
12128                 if (!is_bnxt_fw_ok(bp))
12129                         bnxt_devlink_health_fw_report(bp);
12130         }
12131
12132         smp_mb__before_atomic();
12133         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12134 }
12135
12136 /* Under rtnl_lock */
12137 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
12138                      int tx_xdp)
12139 {
12140         int max_rx, max_tx, tx_sets = 1;
12141         int tx_rings_needed, stats;
12142         int rx_rings = rx;
12143         int cp, vnics, rc;
12144
12145         if (tcs)
12146                 tx_sets = tcs;
12147
12148         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
12149         if (rc)
12150                 return rc;
12151
12152         if (max_rx < rx)
12153                 return -ENOMEM;
12154
12155         tx_rings_needed = tx * tx_sets + tx_xdp;
12156         if (max_tx < tx_rings_needed)
12157                 return -ENOMEM;
12158
12159         vnics = 1;
12160         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
12161                 vnics += rx_rings;
12162
12163         if (bp->flags & BNXT_FLAG_AGG_RINGS)
12164                 rx_rings <<= 1;
12165         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
12166         stats = cp;
12167         if (BNXT_NEW_RM(bp)) {
12168                 cp += bnxt_get_ulp_msix_num(bp);
12169                 stats += bnxt_get_ulp_stat_ctxs(bp);
12170         }
12171         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
12172                                      stats, vnics);
12173 }
12174
12175 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
12176 {
12177         if (bp->bar2) {
12178                 pci_iounmap(pdev, bp->bar2);
12179                 bp->bar2 = NULL;
12180         }
12181
12182         if (bp->bar1) {
12183                 pci_iounmap(pdev, bp->bar1);
12184                 bp->bar1 = NULL;
12185         }
12186
12187         if (bp->bar0) {
12188                 pci_iounmap(pdev, bp->bar0);
12189                 bp->bar0 = NULL;
12190         }
12191 }
12192
12193 static void bnxt_cleanup_pci(struct bnxt *bp)
12194 {
12195         bnxt_unmap_bars(bp, bp->pdev);
12196         pci_release_regions(bp->pdev);
12197         if (pci_is_enabled(bp->pdev))
12198                 pci_disable_device(bp->pdev);
12199 }
12200
12201 static void bnxt_init_dflt_coal(struct bnxt *bp)
12202 {
12203         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
12204         struct bnxt_coal *coal;
12205         u16 flags = 0;
12206
12207         if (coal_cap->cmpl_params &
12208             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
12209                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
12210
12211         /* Tick values in micro seconds.
12212          * 1 coal_buf x bufs_per_record = 1 completion record.
12213          */
12214         coal = &bp->rx_coal;
12215         coal->coal_ticks = 10;
12216         coal->coal_bufs = 30;
12217         coal->coal_ticks_irq = 1;
12218         coal->coal_bufs_irq = 2;
12219         coal->idle_thresh = 50;
12220         coal->bufs_per_record = 2;
12221         coal->budget = 64;              /* NAPI budget */
12222         coal->flags = flags;
12223
12224         coal = &bp->tx_coal;
12225         coal->coal_ticks = 28;
12226         coal->coal_bufs = 30;
12227         coal->coal_ticks_irq = 2;
12228         coal->coal_bufs_irq = 2;
12229         coal->bufs_per_record = 1;
12230         coal->flags = flags;
12231
12232         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
12233 }
12234
12235 static int bnxt_fw_init_one_p1(struct bnxt *bp)
12236 {
12237         int rc;
12238
12239         bp->fw_cap = 0;
12240         rc = bnxt_hwrm_ver_get(bp);
12241         bnxt_try_map_fw_health_reg(bp);
12242         if (rc) {
12243                 rc = bnxt_try_recover_fw(bp);
12244                 if (rc)
12245                         return rc;
12246                 rc = bnxt_hwrm_ver_get(bp);
12247                 if (rc)
12248                         return rc;
12249         }
12250
12251         bnxt_nvm_cfg_ver_get(bp);
12252
12253         rc = bnxt_hwrm_func_reset(bp);
12254         if (rc)
12255                 return -ENODEV;
12256
12257         bnxt_hwrm_fw_set_time(bp);
12258         return 0;
12259 }
12260
12261 static int bnxt_fw_init_one_p2(struct bnxt *bp)
12262 {
12263         int rc;
12264
12265         /* Get the MAX capabilities for this function */
12266         rc = bnxt_hwrm_func_qcaps(bp);
12267         if (rc) {
12268                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
12269                            rc);
12270                 return -ENODEV;
12271         }
12272
12273         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
12274         if (rc)
12275                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
12276                             rc);
12277
12278         if (bnxt_alloc_fw_health(bp)) {
12279                 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
12280         } else {
12281                 rc = bnxt_hwrm_error_recovery_qcfg(bp);
12282                 if (rc)
12283                         netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
12284                                     rc);
12285         }
12286
12287         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
12288         if (rc)
12289                 return -ENODEV;
12290
12291         bnxt_hwrm_func_qcfg(bp);
12292         bnxt_hwrm_vnic_qcaps(bp);
12293         bnxt_hwrm_port_led_qcaps(bp);
12294         bnxt_ethtool_init(bp);
12295         if (bp->fw_cap & BNXT_FW_CAP_PTP)
12296                 __bnxt_hwrm_ptp_qcfg(bp);
12297         bnxt_dcb_init(bp);
12298         return 0;
12299 }
12300
12301 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
12302 {
12303         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
12304         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
12305                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
12306                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
12307                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
12308         if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
12309                 bp->rss_hash_delta = bp->rss_hash_cfg;
12310         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
12311                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
12312                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
12313                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
12314         }
12315 }
12316
12317 static void bnxt_set_dflt_rfs(struct bnxt *bp)
12318 {
12319         struct net_device *dev = bp->dev;
12320
12321         dev->hw_features &= ~NETIF_F_NTUPLE;
12322         dev->features &= ~NETIF_F_NTUPLE;
12323         bp->flags &= ~BNXT_FLAG_RFS;
12324         if (bnxt_rfs_supported(bp)) {
12325                 dev->hw_features |= NETIF_F_NTUPLE;
12326                 if (bnxt_rfs_capable(bp)) {
12327                         bp->flags |= BNXT_FLAG_RFS;
12328                         dev->features |= NETIF_F_NTUPLE;
12329                 }
12330         }
12331 }
12332
12333 static void bnxt_fw_init_one_p3(struct bnxt *bp)
12334 {
12335         struct pci_dev *pdev = bp->pdev;
12336
12337         bnxt_set_dflt_rss_hash_type(bp);
12338         bnxt_set_dflt_rfs(bp);
12339
12340         bnxt_get_wol_settings(bp);
12341         if (bp->flags & BNXT_FLAG_WOL_CAP)
12342                 device_set_wakeup_enable(&pdev->dev, bp->wol);
12343         else
12344                 device_set_wakeup_capable(&pdev->dev, false);
12345
12346         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
12347         bnxt_hwrm_coal_params_qcaps(bp);
12348 }
12349
12350 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
12351
12352 int bnxt_fw_init_one(struct bnxt *bp)
12353 {
12354         int rc;
12355
12356         rc = bnxt_fw_init_one_p1(bp);
12357         if (rc) {
12358                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
12359                 return rc;
12360         }
12361         rc = bnxt_fw_init_one_p2(bp);
12362         if (rc) {
12363                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12364                 return rc;
12365         }
12366         rc = bnxt_probe_phy(bp, false);
12367         if (rc)
12368                 return rc;
12369         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12370         if (rc)
12371                 return rc;
12372
12373         bnxt_fw_init_one_p3(bp);
12374         return 0;
12375 }
12376
12377 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12378 {
12379         struct bnxt_fw_health *fw_health = bp->fw_health;
12380         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12381         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12382         u32 reg_type, reg_off, delay_msecs;
12383
12384         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12385         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12386         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12387         switch (reg_type) {
12388         case BNXT_FW_HEALTH_REG_TYPE_CFG:
12389                 pci_write_config_dword(bp->pdev, reg_off, val);
12390                 break;
12391         case BNXT_FW_HEALTH_REG_TYPE_GRC:
12392                 writel(reg_off & BNXT_GRC_BASE_MASK,
12393                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12394                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
12395                 fallthrough;
12396         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12397                 writel(val, bp->bar0 + reg_off);
12398                 break;
12399         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12400                 writel(val, bp->bar1 + reg_off);
12401                 break;
12402         }
12403         if (delay_msecs) {
12404                 pci_read_config_dword(bp->pdev, 0, &val);
12405                 msleep(delay_msecs);
12406         }
12407 }
12408
12409 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
12410 {
12411         struct hwrm_func_qcfg_output *resp;
12412         struct hwrm_func_qcfg_input *req;
12413         bool result = true; /* firmware will enforce if unknown */
12414
12415         if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
12416                 return result;
12417
12418         if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
12419                 return result;
12420
12421         req->fid = cpu_to_le16(0xffff);
12422         resp = hwrm_req_hold(bp, req);
12423         if (!hwrm_req_send(bp, req))
12424                 result = !!(le16_to_cpu(resp->flags) &
12425                             FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
12426         hwrm_req_drop(bp, req);
12427         return result;
12428 }
12429
12430 static void bnxt_reset_all(struct bnxt *bp)
12431 {
12432         struct bnxt_fw_health *fw_health = bp->fw_health;
12433         int i, rc;
12434
12435         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12436                 bnxt_fw_reset_via_optee(bp);
12437                 bp->fw_reset_timestamp = jiffies;
12438                 return;
12439         }
12440
12441         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12442                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12443                         bnxt_fw_reset_writel(bp, i);
12444         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12445                 struct hwrm_fw_reset_input *req;
12446
12447                 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12448                 if (!rc) {
12449                         req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12450                         req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12451                         req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12452                         req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12453                         rc = hwrm_req_send(bp, req);
12454                 }
12455                 if (rc != -ENODEV)
12456                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12457         }
12458         bp->fw_reset_timestamp = jiffies;
12459 }
12460
12461 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12462 {
12463         return time_after(jiffies, bp->fw_reset_timestamp +
12464                           (bp->fw_reset_max_dsecs * HZ / 10));
12465 }
12466
12467 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12468 {
12469         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12470         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12471                 bnxt_ulp_start(bp, rc);
12472                 bnxt_dl_health_fw_status_update(bp, false);
12473         }
12474         bp->fw_reset_state = 0;
12475         dev_close(bp->dev);
12476 }
12477
12478 static void bnxt_fw_reset_task(struct work_struct *work)
12479 {
12480         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12481         int rc = 0;
12482
12483         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12484                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12485                 return;
12486         }
12487
12488         switch (bp->fw_reset_state) {
12489         case BNXT_FW_RESET_STATE_POLL_VF: {
12490                 int n = bnxt_get_registered_vfs(bp);
12491                 int tmo;
12492
12493                 if (n < 0) {
12494                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12495                                    n, jiffies_to_msecs(jiffies -
12496                                    bp->fw_reset_timestamp));
12497                         goto fw_reset_abort;
12498                 } else if (n > 0) {
12499                         if (bnxt_fw_reset_timeout(bp)) {
12500                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12501                                 bp->fw_reset_state = 0;
12502                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12503                                            n);
12504                                 return;
12505                         }
12506                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12507                         return;
12508                 }
12509                 bp->fw_reset_timestamp = jiffies;
12510                 rtnl_lock();
12511                 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12512                         bnxt_fw_reset_abort(bp, rc);
12513                         rtnl_unlock();
12514                         return;
12515                 }
12516                 bnxt_fw_reset_close(bp);
12517                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12518                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12519                         tmo = HZ / 10;
12520                 } else {
12521                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12522                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
12523                 }
12524                 rtnl_unlock();
12525                 bnxt_queue_fw_reset_work(bp, tmo);
12526                 return;
12527         }
12528         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12529                 u32 val;
12530
12531                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12532                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12533                     !bnxt_fw_reset_timeout(bp)) {
12534                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12535                         return;
12536                 }
12537
12538                 if (!bp->fw_health->primary) {
12539                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12540
12541                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12542                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12543                         return;
12544                 }
12545                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12546         }
12547                 fallthrough;
12548         case BNXT_FW_RESET_STATE_RESET_FW:
12549                 bnxt_reset_all(bp);
12550                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12551                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12552                 return;
12553         case BNXT_FW_RESET_STATE_ENABLE_DEV:
12554                 bnxt_inv_fw_health_reg(bp);
12555                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12556                     !bp->fw_reset_min_dsecs) {
12557                         u16 val;
12558
12559                         pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12560                         if (val == 0xffff) {
12561                                 if (bnxt_fw_reset_timeout(bp)) {
12562                                         netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12563                                         rc = -ETIMEDOUT;
12564                                         goto fw_reset_abort;
12565                                 }
12566                                 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12567                                 return;
12568                         }
12569                 }
12570                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12571                 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
12572                 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
12573                     !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
12574                         bnxt_dl_remote_reload(bp);
12575                 if (pci_enable_device(bp->pdev)) {
12576                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12577                         rc = -ENODEV;
12578                         goto fw_reset_abort;
12579                 }
12580                 pci_set_master(bp->pdev);
12581                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12582                 fallthrough;
12583         case BNXT_FW_RESET_STATE_POLL_FW:
12584                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12585                 rc = bnxt_hwrm_poll(bp);
12586                 if (rc) {
12587                         if (bnxt_fw_reset_timeout(bp)) {
12588                                 netdev_err(bp->dev, "Firmware reset aborted\n");
12589                                 goto fw_reset_abort_status;
12590                         }
12591                         bnxt_queue_fw_reset_work(bp, HZ / 5);
12592                         return;
12593                 }
12594                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12595                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12596                 fallthrough;
12597         case BNXT_FW_RESET_STATE_OPENING:
12598                 while (!rtnl_trylock()) {
12599                         bnxt_queue_fw_reset_work(bp, HZ / 10);
12600                         return;
12601                 }
12602                 rc = bnxt_open(bp->dev);
12603                 if (rc) {
12604                         netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12605                         bnxt_fw_reset_abort(bp, rc);
12606                         rtnl_unlock();
12607                         return;
12608                 }
12609
12610                 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12611                     bp->fw_health->enabled) {
12612                         bp->fw_health->last_fw_reset_cnt =
12613                                 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12614                 }
12615                 bp->fw_reset_state = 0;
12616                 /* Make sure fw_reset_state is 0 before clearing the flag */
12617                 smp_mb__before_atomic();
12618                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12619                 bnxt_ulp_start(bp, 0);
12620                 bnxt_reenable_sriov(bp);
12621                 bnxt_vf_reps_alloc(bp);
12622                 bnxt_vf_reps_open(bp);
12623                 bnxt_ptp_reapply_pps(bp);
12624                 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
12625                 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
12626                         bnxt_dl_health_fw_recovery_done(bp);
12627                         bnxt_dl_health_fw_status_update(bp, true);
12628                 }
12629                 rtnl_unlock();
12630                 break;
12631         }
12632         return;
12633
12634 fw_reset_abort_status:
12635         if (bp->fw_health->status_reliable ||
12636             (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12637                 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12638
12639                 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12640         }
12641 fw_reset_abort:
12642         rtnl_lock();
12643         bnxt_fw_reset_abort(bp, rc);
12644         rtnl_unlock();
12645 }
12646
12647 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12648 {
12649         int rc;
12650         struct bnxt *bp = netdev_priv(dev);
12651
12652         SET_NETDEV_DEV(dev, &pdev->dev);
12653
12654         /* enable device (incl. PCI PM wakeup), and bus-mastering */
12655         rc = pci_enable_device(pdev);
12656         if (rc) {
12657                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12658                 goto init_err;
12659         }
12660
12661         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12662                 dev_err(&pdev->dev,
12663                         "Cannot find PCI device base address, aborting\n");
12664                 rc = -ENODEV;
12665                 goto init_err_disable;
12666         }
12667
12668         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12669         if (rc) {
12670                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12671                 goto init_err_disable;
12672         }
12673
12674         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12675             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12676                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12677                 rc = -EIO;
12678                 goto init_err_release;
12679         }
12680
12681         pci_set_master(pdev);
12682
12683         bp->dev = dev;
12684         bp->pdev = pdev;
12685
12686         /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12687          * determines the BAR size.
12688          */
12689         bp->bar0 = pci_ioremap_bar(pdev, 0);
12690         if (!bp->bar0) {
12691                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12692                 rc = -ENOMEM;
12693                 goto init_err_release;
12694         }
12695
12696         bp->bar2 = pci_ioremap_bar(pdev, 4);
12697         if (!bp->bar2) {
12698                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12699                 rc = -ENOMEM;
12700                 goto init_err_release;
12701         }
12702
12703         INIT_WORK(&bp->sp_task, bnxt_sp_task);
12704         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12705
12706         spin_lock_init(&bp->ntp_fltr_lock);
12707 #if BITS_PER_LONG == 32
12708         spin_lock_init(&bp->db_lock);
12709 #endif
12710
12711         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12712         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12713
12714         timer_setup(&bp->timer, bnxt_timer, 0);
12715         bp->current_interval = BNXT_TIMER_INTERVAL;
12716
12717         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12718         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12719
12720         clear_bit(BNXT_STATE_OPEN, &bp->state);
12721         return 0;
12722
12723 init_err_release:
12724         bnxt_unmap_bars(bp, pdev);
12725         pci_release_regions(pdev);
12726
12727 init_err_disable:
12728         pci_disable_device(pdev);
12729
12730 init_err:
12731         return rc;
12732 }
12733
12734 /* rtnl_lock held */
12735 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12736 {
12737         struct sockaddr *addr = p;
12738         struct bnxt *bp = netdev_priv(dev);
12739         int rc = 0;
12740
12741         if (!is_valid_ether_addr(addr->sa_data))
12742                 return -EADDRNOTAVAIL;
12743
12744         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12745                 return 0;
12746
12747         rc = bnxt_approve_mac(bp, addr->sa_data, true);
12748         if (rc)
12749                 return rc;
12750
12751         eth_hw_addr_set(dev, addr->sa_data);
12752         if (netif_running(dev)) {
12753                 bnxt_close_nic(bp, false, false);
12754                 rc = bnxt_open_nic(bp, false, false);
12755         }
12756
12757         return rc;
12758 }
12759
12760 /* rtnl_lock held */
12761 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12762 {
12763         struct bnxt *bp = netdev_priv(dev);
12764
12765         if (netif_running(dev))
12766                 bnxt_close_nic(bp, true, false);
12767
12768         dev->mtu = new_mtu;
12769         bnxt_set_ring_params(bp);
12770
12771         if (netif_running(dev))
12772                 return bnxt_open_nic(bp, true, false);
12773
12774         return 0;
12775 }
12776
12777 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12778 {
12779         struct bnxt *bp = netdev_priv(dev);
12780         bool sh = false;
12781         int rc;
12782
12783         if (tc > bp->max_tc) {
12784                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12785                            tc, bp->max_tc);
12786                 return -EINVAL;
12787         }
12788
12789         if (netdev_get_num_tc(dev) == tc)
12790                 return 0;
12791
12792         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12793                 sh = true;
12794
12795         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12796                               sh, tc, bp->tx_nr_rings_xdp);
12797         if (rc)
12798                 return rc;
12799
12800         /* Needs to close the device and do hw resource re-allocations */
12801         if (netif_running(bp->dev))
12802                 bnxt_close_nic(bp, true, false);
12803
12804         if (tc) {
12805                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12806                 netdev_set_num_tc(dev, tc);
12807         } else {
12808                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12809                 netdev_reset_tc(dev);
12810         }
12811         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12812         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12813                                bp->tx_nr_rings + bp->rx_nr_rings;
12814
12815         if (netif_running(bp->dev))
12816                 return bnxt_open_nic(bp, true, false);
12817
12818         return 0;
12819 }
12820
12821 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12822                                   void *cb_priv)
12823 {
12824         struct bnxt *bp = cb_priv;
12825
12826         if (!bnxt_tc_flower_enabled(bp) ||
12827             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12828                 return -EOPNOTSUPP;
12829
12830         switch (type) {
12831         case TC_SETUP_CLSFLOWER:
12832                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12833         default:
12834                 return -EOPNOTSUPP;
12835         }
12836 }
12837
12838 LIST_HEAD(bnxt_block_cb_list);
12839
12840 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12841                          void *type_data)
12842 {
12843         struct bnxt *bp = netdev_priv(dev);
12844
12845         switch (type) {
12846         case TC_SETUP_BLOCK:
12847                 return flow_block_cb_setup_simple(type_data,
12848                                                   &bnxt_block_cb_list,
12849                                                   bnxt_setup_tc_block_cb,
12850                                                   bp, bp, true);
12851         case TC_SETUP_QDISC_MQPRIO: {
12852                 struct tc_mqprio_qopt *mqprio = type_data;
12853
12854                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12855
12856                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12857         }
12858         default:
12859                 return -EOPNOTSUPP;
12860         }
12861 }
12862
12863 #ifdef CONFIG_RFS_ACCEL
12864 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12865                             struct bnxt_ntuple_filter *f2)
12866 {
12867         struct flow_keys *keys1 = &f1->fkeys;
12868         struct flow_keys *keys2 = &f2->fkeys;
12869
12870         if (keys1->basic.n_proto != keys2->basic.n_proto ||
12871             keys1->basic.ip_proto != keys2->basic.ip_proto)
12872                 return false;
12873
12874         if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12875                 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12876                     keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12877                         return false;
12878         } else {
12879                 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12880                            sizeof(keys1->addrs.v6addrs.src)) ||
12881                     memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12882                            sizeof(keys1->addrs.v6addrs.dst)))
12883                         return false;
12884         }
12885
12886         if (keys1->ports.ports == keys2->ports.ports &&
12887             keys1->control.flags == keys2->control.flags &&
12888             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12889             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12890                 return true;
12891
12892         return false;
12893 }
12894
12895 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12896                               u16 rxq_index, u32 flow_id)
12897 {
12898         struct bnxt *bp = netdev_priv(dev);
12899         struct bnxt_ntuple_filter *fltr, *new_fltr;
12900         struct flow_keys *fkeys;
12901         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12902         int rc = 0, idx, bit_id, l2_idx = 0;
12903         struct hlist_head *head;
12904         u32 flags;
12905
12906         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12907                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12908                 int off = 0, j;
12909
12910                 netif_addr_lock_bh(dev);
12911                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12912                         if (ether_addr_equal(eth->h_dest,
12913                                              vnic->uc_list + off)) {
12914                                 l2_idx = j + 1;
12915                                 break;
12916                         }
12917                 }
12918                 netif_addr_unlock_bh(dev);
12919                 if (!l2_idx)
12920                         return -EINVAL;
12921         }
12922         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12923         if (!new_fltr)
12924                 return -ENOMEM;
12925
12926         fkeys = &new_fltr->fkeys;
12927         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12928                 rc = -EPROTONOSUPPORT;
12929                 goto err_free;
12930         }
12931
12932         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12933              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12934             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12935              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12936                 rc = -EPROTONOSUPPORT;
12937                 goto err_free;
12938         }
12939         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12940             bp->hwrm_spec_code < 0x10601) {
12941                 rc = -EPROTONOSUPPORT;
12942                 goto err_free;
12943         }
12944         flags = fkeys->control.flags;
12945         if (((flags & FLOW_DIS_ENCAPSULATION) &&
12946              bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12947                 rc = -EPROTONOSUPPORT;
12948                 goto err_free;
12949         }
12950
12951         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12952         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12953
12954         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12955         head = &bp->ntp_fltr_hash_tbl[idx];
12956         rcu_read_lock();
12957         hlist_for_each_entry_rcu(fltr, head, hash) {
12958                 if (bnxt_fltr_match(fltr, new_fltr)) {
12959                         rc = fltr->sw_id;
12960                         rcu_read_unlock();
12961                         goto err_free;
12962                 }
12963         }
12964         rcu_read_unlock();
12965
12966         spin_lock_bh(&bp->ntp_fltr_lock);
12967         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12968                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
12969         if (bit_id < 0) {
12970                 spin_unlock_bh(&bp->ntp_fltr_lock);
12971                 rc = -ENOMEM;
12972                 goto err_free;
12973         }
12974
12975         new_fltr->sw_id = (u16)bit_id;
12976         new_fltr->flow_id = flow_id;
12977         new_fltr->l2_fltr_idx = l2_idx;
12978         new_fltr->rxq = rxq_index;
12979         hlist_add_head_rcu(&new_fltr->hash, head);
12980         bp->ntp_fltr_count++;
12981         spin_unlock_bh(&bp->ntp_fltr_lock);
12982
12983         bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
12984
12985         return new_fltr->sw_id;
12986
12987 err_free:
12988         kfree(new_fltr);
12989         return rc;
12990 }
12991
12992 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12993 {
12994         int i;
12995
12996         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12997                 struct hlist_head *head;
12998                 struct hlist_node *tmp;
12999                 struct bnxt_ntuple_filter *fltr;
13000                 int rc;
13001
13002                 head = &bp->ntp_fltr_hash_tbl[i];
13003                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
13004                         bool del = false;
13005
13006                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
13007                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
13008                                                         fltr->flow_id,
13009                                                         fltr->sw_id)) {
13010                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
13011                                                                          fltr);
13012                                         del = true;
13013                                 }
13014                         } else {
13015                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
13016                                                                        fltr);
13017                                 if (rc)
13018                                         del = true;
13019                                 else
13020                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
13021                         }
13022
13023                         if (del) {
13024                                 spin_lock_bh(&bp->ntp_fltr_lock);
13025                                 hlist_del_rcu(&fltr->hash);
13026                                 bp->ntp_fltr_count--;
13027                                 spin_unlock_bh(&bp->ntp_fltr_lock);
13028                                 synchronize_rcu();
13029                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
13030                                 kfree(fltr);
13031                         }
13032                 }
13033         }
13034         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
13035                 netdev_info(bp->dev, "Receive PF driver unload event!\n");
13036 }
13037
13038 #else
13039
13040 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
13041 {
13042 }
13043
13044 #endif /* CONFIG_RFS_ACCEL */
13045
13046 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
13047                                     unsigned int entry, struct udp_tunnel_info *ti)
13048 {
13049         struct bnxt *bp = netdev_priv(netdev);
13050         unsigned int cmd;
13051
13052         if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
13053                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
13054         else
13055                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
13056
13057         return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
13058 }
13059
13060 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
13061                                       unsigned int entry, struct udp_tunnel_info *ti)
13062 {
13063         struct bnxt *bp = netdev_priv(netdev);
13064         unsigned int cmd;
13065
13066         if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
13067                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
13068         else
13069                 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
13070
13071         return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
13072 }
13073
13074 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
13075         .set_port       = bnxt_udp_tunnel_set_port,
13076         .unset_port     = bnxt_udp_tunnel_unset_port,
13077         .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
13078                           UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
13079         .tables         = {
13080                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
13081                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
13082         },
13083 };
13084
13085 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13086                                struct net_device *dev, u32 filter_mask,
13087                                int nlflags)
13088 {
13089         struct bnxt *bp = netdev_priv(dev);
13090
13091         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
13092                                        nlflags, filter_mask, NULL);
13093 }
13094
13095 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
13096                                u16 flags, struct netlink_ext_ack *extack)
13097 {
13098         struct bnxt *bp = netdev_priv(dev);
13099         struct nlattr *attr, *br_spec;
13100         int rem, rc = 0;
13101
13102         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
13103                 return -EOPNOTSUPP;
13104
13105         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
13106         if (!br_spec)
13107                 return -EINVAL;
13108
13109         nla_for_each_nested(attr, br_spec, rem) {
13110                 u16 mode;
13111
13112                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
13113                         continue;
13114
13115                 mode = nla_get_u16(attr);
13116                 if (mode == bp->br_mode)
13117                         break;
13118
13119                 rc = bnxt_hwrm_set_br_mode(bp, mode);
13120                 if (!rc)
13121                         bp->br_mode = mode;
13122                 break;
13123         }
13124         return rc;
13125 }
13126
13127 int bnxt_get_port_parent_id(struct net_device *dev,
13128                             struct netdev_phys_item_id *ppid)
13129 {
13130         struct bnxt *bp = netdev_priv(dev);
13131
13132         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
13133                 return -EOPNOTSUPP;
13134
13135         /* The PF and it's VF-reps only support the switchdev framework */
13136         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
13137                 return -EOPNOTSUPP;
13138
13139         ppid->id_len = sizeof(bp->dsn);
13140         memcpy(ppid->id, bp->dsn, ppid->id_len);
13141
13142         return 0;
13143 }
13144
13145 static const struct net_device_ops bnxt_netdev_ops = {
13146         .ndo_open               = bnxt_open,
13147         .ndo_start_xmit         = bnxt_start_xmit,
13148         .ndo_stop               = bnxt_close,
13149         .ndo_get_stats64        = bnxt_get_stats64,
13150         .ndo_set_rx_mode        = bnxt_set_rx_mode,
13151         .ndo_eth_ioctl          = bnxt_ioctl,
13152         .ndo_validate_addr      = eth_validate_addr,
13153         .ndo_set_mac_address    = bnxt_change_mac_addr,
13154         .ndo_change_mtu         = bnxt_change_mtu,
13155         .ndo_fix_features       = bnxt_fix_features,
13156         .ndo_set_features       = bnxt_set_features,
13157         .ndo_features_check     = bnxt_features_check,
13158         .ndo_tx_timeout         = bnxt_tx_timeout,
13159 #ifdef CONFIG_BNXT_SRIOV
13160         .ndo_get_vf_config      = bnxt_get_vf_config,
13161         .ndo_set_vf_mac         = bnxt_set_vf_mac,
13162         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
13163         .ndo_set_vf_rate        = bnxt_set_vf_bw,
13164         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
13165         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
13166         .ndo_set_vf_trust       = bnxt_set_vf_trust,
13167 #endif
13168         .ndo_setup_tc           = bnxt_setup_tc,
13169 #ifdef CONFIG_RFS_ACCEL
13170         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
13171 #endif
13172         .ndo_bpf                = bnxt_xdp,
13173         .ndo_xdp_xmit           = bnxt_xdp_xmit,
13174         .ndo_bridge_getlink     = bnxt_bridge_getlink,
13175         .ndo_bridge_setlink     = bnxt_bridge_setlink,
13176 };
13177
13178 static void bnxt_remove_one(struct pci_dev *pdev)
13179 {
13180         struct net_device *dev = pci_get_drvdata(pdev);
13181         struct bnxt *bp = netdev_priv(dev);
13182
13183         if (BNXT_PF(bp))
13184                 bnxt_sriov_disable(bp);
13185
13186         bnxt_rdma_aux_device_uninit(bp);
13187
13188         bnxt_ptp_clear(bp);
13189         unregister_netdev(dev);
13190         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13191         /* Flush any pending tasks */
13192         cancel_work_sync(&bp->sp_task);
13193         cancel_delayed_work_sync(&bp->fw_reset_task);
13194         bp->sp_event = 0;
13195
13196         bnxt_dl_fw_reporters_destroy(bp);
13197         bnxt_dl_unregister(bp);
13198         bnxt_shutdown_tc(bp);
13199
13200         bnxt_clear_int_mode(bp);
13201         bnxt_hwrm_func_drv_unrgtr(bp);
13202         bnxt_free_hwrm_resources(bp);
13203         bnxt_ethtool_free(bp);
13204         bnxt_dcb_free(bp);
13205         kfree(bp->ptp_cfg);
13206         bp->ptp_cfg = NULL;
13207         kfree(bp->fw_health);
13208         bp->fw_health = NULL;
13209         bnxt_cleanup_pci(bp);
13210         bnxt_free_ctx_mem(bp);
13211         kfree(bp->ctx);
13212         bp->ctx = NULL;
13213         kfree(bp->rss_indir_tbl);
13214         bp->rss_indir_tbl = NULL;
13215         bnxt_free_port_stats(bp);
13216         free_netdev(dev);
13217 }
13218
13219 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
13220 {
13221         int rc = 0;
13222         struct bnxt_link_info *link_info = &bp->link_info;
13223
13224         bp->phy_flags = 0;
13225         rc = bnxt_hwrm_phy_qcaps(bp);
13226         if (rc) {
13227                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
13228                            rc);
13229                 return rc;
13230         }
13231         if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
13232                 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
13233         else
13234                 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
13235         if (!fw_dflt)
13236                 return 0;
13237
13238         mutex_lock(&bp->link_lock);
13239         rc = bnxt_update_link(bp, false);
13240         if (rc) {
13241                 mutex_unlock(&bp->link_lock);
13242                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
13243                            rc);
13244                 return rc;
13245         }
13246
13247         /* Older firmware does not have supported_auto_speeds, so assume
13248          * that all supported speeds can be autonegotiated.
13249          */
13250         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
13251                 link_info->support_auto_speeds = link_info->support_speeds;
13252
13253         bnxt_init_ethtool_link_settings(bp);
13254         mutex_unlock(&bp->link_lock);
13255         return 0;
13256 }
13257
13258 static int bnxt_get_max_irq(struct pci_dev *pdev)
13259 {
13260         u16 ctrl;
13261
13262         if (!pdev->msix_cap)
13263                 return 1;
13264
13265         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
13266         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
13267 }
13268
13269 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13270                                 int *max_cp)
13271 {
13272         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13273         int max_ring_grps = 0, max_irq;
13274
13275         *max_tx = hw_resc->max_tx_rings;
13276         *max_rx = hw_resc->max_rx_rings;
13277         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
13278         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
13279                         bnxt_get_ulp_msix_num(bp),
13280                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
13281         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
13282                 *max_cp = min_t(int, *max_cp, max_irq);
13283         max_ring_grps = hw_resc->max_hw_ring_grps;
13284         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
13285                 *max_cp -= 1;
13286                 *max_rx -= 2;
13287         }
13288         if (bp->flags & BNXT_FLAG_AGG_RINGS)
13289                 *max_rx >>= 1;
13290         if (bp->flags & BNXT_FLAG_CHIP_P5) {
13291                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
13292                 /* On P5 chips, max_cp output param should be available NQs */
13293                 *max_cp = max_irq;
13294         }
13295         *max_rx = min_t(int, *max_rx, max_ring_grps);
13296 }
13297
13298 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
13299 {
13300         int rx, tx, cp;
13301
13302         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
13303         *max_rx = rx;
13304         *max_tx = tx;
13305         if (!rx || !tx || !cp)
13306                 return -ENOMEM;
13307
13308         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
13309 }
13310
13311 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13312                                bool shared)
13313 {
13314         int rc;
13315
13316         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13317         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
13318                 /* Not enough rings, try disabling agg rings. */
13319                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
13320                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13321                 if (rc) {
13322                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
13323                         bp->flags |= BNXT_FLAG_AGG_RINGS;
13324                         return rc;
13325                 }
13326                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
13327                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13328                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13329                 bnxt_set_ring_params(bp);
13330         }
13331
13332         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
13333                 int max_cp, max_stat, max_irq;
13334
13335                 /* Reserve minimum resources for RoCE */
13336                 max_cp = bnxt_get_max_func_cp_rings(bp);
13337                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
13338                 max_irq = bnxt_get_max_func_irqs(bp);
13339                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
13340                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
13341                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
13342                         return 0;
13343
13344                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
13345                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
13346                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
13347                 max_cp = min_t(int, max_cp, max_irq);
13348                 max_cp = min_t(int, max_cp, max_stat);
13349                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
13350                 if (rc)
13351                         rc = 0;
13352         }
13353         return rc;
13354 }
13355
13356 /* In initial default shared ring setting, each shared ring must have a
13357  * RX/TX ring pair.
13358  */
13359 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
13360 {
13361         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
13362         bp->rx_nr_rings = bp->cp_nr_rings;
13363         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
13364         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13365 }
13366
13367 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
13368 {
13369         int dflt_rings, max_rx_rings, max_tx_rings, rc;
13370
13371         if (!bnxt_can_reserve_rings(bp))
13372                 return 0;
13373
13374         if (sh)
13375                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
13376         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
13377         /* Reduce default rings on multi-port cards so that total default
13378          * rings do not exceed CPU count.
13379          */
13380         if (bp->port_count > 1) {
13381                 int max_rings =
13382                         max_t(int, num_online_cpus() / bp->port_count, 1);
13383
13384                 dflt_rings = min_t(int, dflt_rings, max_rings);
13385         }
13386         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
13387         if (rc)
13388                 return rc;
13389         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13390         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
13391         if (sh)
13392                 bnxt_trim_dflt_sh_rings(bp);
13393         else
13394                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13395         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13396
13397         rc = __bnxt_reserve_rings(bp);
13398         if (rc && rc != -ENODEV)
13399                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
13400         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13401         if (sh)
13402                 bnxt_trim_dflt_sh_rings(bp);
13403
13404         /* Rings may have been trimmed, re-reserve the trimmed rings. */
13405         if (bnxt_need_reserve_rings(bp)) {
13406                 rc = __bnxt_reserve_rings(bp);
13407                 if (rc && rc != -ENODEV)
13408                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13409                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13410         }
13411         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13412                 bp->rx_nr_rings++;
13413                 bp->cp_nr_rings++;
13414         }
13415         if (rc) {
13416                 bp->tx_nr_rings = 0;
13417                 bp->rx_nr_rings = 0;
13418         }
13419         return rc;
13420 }
13421
13422 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13423 {
13424         int rc;
13425
13426         if (bp->tx_nr_rings)
13427                 return 0;
13428
13429         bnxt_ulp_irq_stop(bp);
13430         bnxt_clear_int_mode(bp);
13431         rc = bnxt_set_dflt_rings(bp, true);
13432         if (rc) {
13433                 if (BNXT_VF(bp) && rc == -ENODEV)
13434                         netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13435                 else
13436                         netdev_err(bp->dev, "Not enough rings available.\n");
13437                 goto init_dflt_ring_err;
13438         }
13439         rc = bnxt_init_int_mode(bp);
13440         if (rc)
13441                 goto init_dflt_ring_err;
13442
13443         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13444
13445         bnxt_set_dflt_rfs(bp);
13446
13447 init_dflt_ring_err:
13448         bnxt_ulp_irq_restart(bp, rc);
13449         return rc;
13450 }
13451
13452 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13453 {
13454         int rc;
13455
13456         ASSERT_RTNL();
13457         bnxt_hwrm_func_qcaps(bp);
13458
13459         if (netif_running(bp->dev))
13460                 __bnxt_close_nic(bp, true, false);
13461
13462         bnxt_ulp_irq_stop(bp);
13463         bnxt_clear_int_mode(bp);
13464         rc = bnxt_init_int_mode(bp);
13465         bnxt_ulp_irq_restart(bp, rc);
13466
13467         if (netif_running(bp->dev)) {
13468                 if (rc)
13469                         dev_close(bp->dev);
13470                 else
13471                         rc = bnxt_open_nic(bp, true, false);
13472         }
13473
13474         return rc;
13475 }
13476
13477 static int bnxt_init_mac_addr(struct bnxt *bp)
13478 {
13479         int rc = 0;
13480
13481         if (BNXT_PF(bp)) {
13482                 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
13483         } else {
13484 #ifdef CONFIG_BNXT_SRIOV
13485                 struct bnxt_vf_info *vf = &bp->vf;
13486                 bool strict_approval = true;
13487
13488                 if (is_valid_ether_addr(vf->mac_addr)) {
13489                         /* overwrite netdev dev_addr with admin VF MAC */
13490                         eth_hw_addr_set(bp->dev, vf->mac_addr);
13491                         /* Older PF driver or firmware may not approve this
13492                          * correctly.
13493                          */
13494                         strict_approval = false;
13495                 } else {
13496                         eth_hw_addr_random(bp->dev);
13497                 }
13498                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13499 #endif
13500         }
13501         return rc;
13502 }
13503
13504 static void bnxt_vpd_read_info(struct bnxt *bp)
13505 {
13506         struct pci_dev *pdev = bp->pdev;
13507         unsigned int vpd_size, kw_len;
13508         int pos, size;
13509         u8 *vpd_data;
13510
13511         vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13512         if (IS_ERR(vpd_data)) {
13513                 pci_warn(pdev, "Unable to read VPD\n");
13514                 return;
13515         }
13516
13517         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13518                                            PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
13519         if (pos < 0)
13520                 goto read_sn;
13521
13522         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13523         memcpy(bp->board_partno, &vpd_data[pos], size);
13524
13525 read_sn:
13526         pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13527                                            PCI_VPD_RO_KEYWORD_SERIALNO,
13528                                            &kw_len);
13529         if (pos < 0)
13530                 goto exit;
13531
13532         size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13533         memcpy(bp->board_serialno, &vpd_data[pos], size);
13534 exit:
13535         kfree(vpd_data);
13536 }
13537
13538 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13539 {
13540         struct pci_dev *pdev = bp->pdev;
13541         u64 qword;
13542
13543         qword = pci_get_dsn(pdev);
13544         if (!qword) {
13545                 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13546                 return -EOPNOTSUPP;
13547         }
13548
13549         put_unaligned_le64(qword, dsn);
13550
13551         bp->flags |= BNXT_FLAG_DSN_VALID;
13552         return 0;
13553 }
13554
13555 static int bnxt_map_db_bar(struct bnxt *bp)
13556 {
13557         if (!bp->db_size)
13558                 return -ENODEV;
13559         bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13560         if (!bp->bar1)
13561                 return -ENOMEM;
13562         return 0;
13563 }
13564
13565 void bnxt_print_device_info(struct bnxt *bp)
13566 {
13567         netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
13568                     board_info[bp->board_idx].name,
13569                     (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
13570
13571         pcie_print_link_status(bp->pdev);
13572 }
13573
13574 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13575 {
13576         struct net_device *dev;
13577         struct bnxt *bp;
13578         int rc, max_irqs;
13579
13580         if (pci_is_bridge(pdev))
13581                 return -ENODEV;
13582
13583         /* Clear any pending DMA transactions from crash kernel
13584          * while loading driver in capture kernel.
13585          */
13586         if (is_kdump_kernel()) {
13587                 pci_clear_master(pdev);
13588                 pcie_flr(pdev);
13589         }
13590
13591         max_irqs = bnxt_get_max_irq(pdev);
13592         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13593         if (!dev)
13594                 return -ENOMEM;
13595
13596         bp = netdev_priv(dev);
13597         bp->board_idx = ent->driver_data;
13598         bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13599         bnxt_set_max_func_irqs(bp, max_irqs);
13600
13601         if (bnxt_vf_pciid(bp->board_idx))
13602                 bp->flags |= BNXT_FLAG_VF;
13603
13604         /* No devlink port registration in case of a VF */
13605         if (BNXT_PF(bp))
13606                 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
13607
13608         if (pdev->msix_cap)
13609                 bp->flags |= BNXT_FLAG_MSIX_CAP;
13610
13611         rc = bnxt_init_board(pdev, dev);
13612         if (rc < 0)
13613                 goto init_err_free;
13614
13615         dev->netdev_ops = &bnxt_netdev_ops;
13616         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13617         dev->ethtool_ops = &bnxt_ethtool_ops;
13618         pci_set_drvdata(pdev, dev);
13619
13620         rc = bnxt_alloc_hwrm_resources(bp);
13621         if (rc)
13622                 goto init_err_pci_clean;
13623
13624         mutex_init(&bp->hwrm_cmd_lock);
13625         mutex_init(&bp->link_lock);
13626
13627         rc = bnxt_fw_init_one_p1(bp);
13628         if (rc)
13629                 goto init_err_pci_clean;
13630
13631         if (BNXT_PF(bp))
13632                 bnxt_vpd_read_info(bp);
13633
13634         if (BNXT_CHIP_P5(bp)) {
13635                 bp->flags |= BNXT_FLAG_CHIP_P5;
13636                 if (BNXT_CHIP_SR2(bp))
13637                         bp->flags |= BNXT_FLAG_CHIP_SR2;
13638         }
13639
13640         rc = bnxt_alloc_rss_indir_tbl(bp);
13641         if (rc)
13642                 goto init_err_pci_clean;
13643
13644         rc = bnxt_fw_init_one_p2(bp);
13645         if (rc)
13646                 goto init_err_pci_clean;
13647
13648         rc = bnxt_map_db_bar(bp);
13649         if (rc) {
13650                 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13651                         rc);
13652                 goto init_err_pci_clean;
13653         }
13654
13655         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13656                            NETIF_F_TSO | NETIF_F_TSO6 |
13657                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13658                            NETIF_F_GSO_IPXIP4 |
13659                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13660                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13661                            NETIF_F_RXCSUM | NETIF_F_GRO;
13662
13663         if (BNXT_SUPPORTS_TPA(bp))
13664                 dev->hw_features |= NETIF_F_LRO;
13665
13666         dev->hw_enc_features =
13667                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13668                         NETIF_F_TSO | NETIF_F_TSO6 |
13669                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13670                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13671                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13672         dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13673
13674         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13675                                     NETIF_F_GSO_GRE_CSUM;
13676         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13677         if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13678                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13679         if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13680                 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13681         if (BNXT_SUPPORTS_TPA(bp))
13682                 dev->hw_features |= NETIF_F_GRO_HW;
13683         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13684         if (dev->features & NETIF_F_GRO_HW)
13685                 dev->features &= ~NETIF_F_LRO;
13686         dev->priv_flags |= IFF_UNICAST_FLT;
13687
13688         netif_set_tso_max_size(dev, GSO_MAX_SIZE);
13689
13690         dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
13691                             NETDEV_XDP_ACT_RX_SG;
13692
13693 #ifdef CONFIG_BNXT_SRIOV
13694         init_waitqueue_head(&bp->sriov_cfg_wait);
13695 #endif
13696         if (BNXT_SUPPORTS_TPA(bp)) {
13697                 bp->gro_func = bnxt_gro_func_5730x;
13698                 if (BNXT_CHIP_P4(bp))
13699                         bp->gro_func = bnxt_gro_func_5731x;
13700                 else if (BNXT_CHIP_P5(bp))
13701                         bp->gro_func = bnxt_gro_func_5750x;
13702         }
13703         if (!BNXT_CHIP_P4_PLUS(bp))
13704                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13705
13706         rc = bnxt_init_mac_addr(bp);
13707         if (rc) {
13708                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13709                 rc = -EADDRNOTAVAIL;
13710                 goto init_err_pci_clean;
13711         }
13712
13713         if (BNXT_PF(bp)) {
13714                 /* Read the adapter's DSN to use as the eswitch switch_id */
13715                 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13716         }
13717
13718         /* MTU range: 60 - FW defined max */
13719         dev->min_mtu = ETH_ZLEN;
13720         dev->max_mtu = bp->max_mtu;
13721
13722         rc = bnxt_probe_phy(bp, true);
13723         if (rc)
13724                 goto init_err_pci_clean;
13725
13726         bnxt_set_rx_skb_mode(bp, false);
13727         bnxt_set_tpa_flags(bp);
13728         bnxt_set_ring_params(bp);
13729         rc = bnxt_set_dflt_rings(bp, true);
13730         if (rc) {
13731                 if (BNXT_VF(bp) && rc == -ENODEV) {
13732                         netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13733                 } else {
13734                         netdev_err(bp->dev, "Not enough rings available.\n");
13735                         rc = -ENOMEM;
13736                 }
13737                 goto init_err_pci_clean;
13738         }
13739
13740         bnxt_fw_init_one_p3(bp);
13741
13742         bnxt_init_dflt_coal(bp);
13743
13744         if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13745                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13746
13747         rc = bnxt_init_int_mode(bp);
13748         if (rc)
13749                 goto init_err_pci_clean;
13750
13751         /* No TC has been set yet and rings may have been trimmed due to
13752          * limited MSIX, so we re-initialize the TX rings per TC.
13753          */
13754         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13755
13756         if (BNXT_PF(bp)) {
13757                 if (!bnxt_pf_wq) {
13758                         bnxt_pf_wq =
13759                                 create_singlethread_workqueue("bnxt_pf_wq");
13760                         if (!bnxt_pf_wq) {
13761                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13762                                 rc = -ENOMEM;
13763                                 goto init_err_pci_clean;
13764                         }
13765                 }
13766                 rc = bnxt_init_tc(bp);
13767                 if (rc)
13768                         netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13769                                    rc);
13770         }
13771
13772         bnxt_inv_fw_health_reg(bp);
13773         rc = bnxt_dl_register(bp);
13774         if (rc)
13775                 goto init_err_dl;
13776
13777         rc = register_netdev(dev);
13778         if (rc)
13779                 goto init_err_cleanup;
13780
13781         bnxt_dl_fw_reporters_create(bp);
13782
13783         bnxt_rdma_aux_device_init(bp);
13784
13785         bnxt_print_device_info(bp);
13786
13787         pci_save_state(pdev);
13788
13789         return 0;
13790 init_err_cleanup:
13791         bnxt_dl_unregister(bp);
13792 init_err_dl:
13793         bnxt_shutdown_tc(bp);
13794         bnxt_clear_int_mode(bp);
13795
13796 init_err_pci_clean:
13797         bnxt_hwrm_func_drv_unrgtr(bp);
13798         bnxt_free_hwrm_resources(bp);
13799         bnxt_ethtool_free(bp);
13800         bnxt_ptp_clear(bp);
13801         kfree(bp->ptp_cfg);
13802         bp->ptp_cfg = NULL;
13803         kfree(bp->fw_health);
13804         bp->fw_health = NULL;
13805         bnxt_cleanup_pci(bp);
13806         bnxt_free_ctx_mem(bp);
13807         kfree(bp->ctx);
13808         bp->ctx = NULL;
13809         kfree(bp->rss_indir_tbl);
13810         bp->rss_indir_tbl = NULL;
13811
13812 init_err_free:
13813         free_netdev(dev);
13814         return rc;
13815 }
13816
13817 static void bnxt_shutdown(struct pci_dev *pdev)
13818 {
13819         struct net_device *dev = pci_get_drvdata(pdev);
13820         struct bnxt *bp;
13821
13822         if (!dev)
13823                 return;
13824
13825         rtnl_lock();
13826         bp = netdev_priv(dev);
13827         if (!bp)
13828                 goto shutdown_exit;
13829
13830         if (netif_running(dev))
13831                 dev_close(dev);
13832
13833         bnxt_clear_int_mode(bp);
13834         pci_disable_device(pdev);
13835
13836         if (system_state == SYSTEM_POWER_OFF) {
13837                 pci_wake_from_d3(pdev, bp->wol);
13838                 pci_set_power_state(pdev, PCI_D3hot);
13839         }
13840
13841 shutdown_exit:
13842         rtnl_unlock();
13843 }
13844
13845 #ifdef CONFIG_PM_SLEEP
13846 static int bnxt_suspend(struct device *device)
13847 {
13848         struct net_device *dev = dev_get_drvdata(device);
13849         struct bnxt *bp = netdev_priv(dev);
13850         int rc = 0;
13851
13852         rtnl_lock();
13853         bnxt_ulp_stop(bp);
13854         if (netif_running(dev)) {
13855                 netif_device_detach(dev);
13856                 rc = bnxt_close(dev);
13857         }
13858         bnxt_hwrm_func_drv_unrgtr(bp);
13859         pci_disable_device(bp->pdev);
13860         bnxt_free_ctx_mem(bp);
13861         kfree(bp->ctx);
13862         bp->ctx = NULL;
13863         rtnl_unlock();
13864         return rc;
13865 }
13866
13867 static int bnxt_resume(struct device *device)
13868 {
13869         struct net_device *dev = dev_get_drvdata(device);
13870         struct bnxt *bp = netdev_priv(dev);
13871         int rc = 0;
13872
13873         rtnl_lock();
13874         rc = pci_enable_device(bp->pdev);
13875         if (rc) {
13876                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13877                            rc);
13878                 goto resume_exit;
13879         }
13880         pci_set_master(bp->pdev);
13881         if (bnxt_hwrm_ver_get(bp)) {
13882                 rc = -ENODEV;
13883                 goto resume_exit;
13884         }
13885         rc = bnxt_hwrm_func_reset(bp);
13886         if (rc) {
13887                 rc = -EBUSY;
13888                 goto resume_exit;
13889         }
13890
13891         rc = bnxt_hwrm_func_qcaps(bp);
13892         if (rc)
13893                 goto resume_exit;
13894
13895         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13896                 rc = -ENODEV;
13897                 goto resume_exit;
13898         }
13899
13900         bnxt_get_wol_settings(bp);
13901         if (netif_running(dev)) {
13902                 rc = bnxt_open(dev);
13903                 if (!rc)
13904                         netif_device_attach(dev);
13905         }
13906
13907 resume_exit:
13908         bnxt_ulp_start(bp, rc);
13909         if (!rc)
13910                 bnxt_reenable_sriov(bp);
13911         rtnl_unlock();
13912         return rc;
13913 }
13914
13915 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13916 #define BNXT_PM_OPS (&bnxt_pm_ops)
13917
13918 #else
13919
13920 #define BNXT_PM_OPS NULL
13921
13922 #endif /* CONFIG_PM_SLEEP */
13923
13924 /**
13925  * bnxt_io_error_detected - called when PCI error is detected
13926  * @pdev: Pointer to PCI device
13927  * @state: The current pci connection state
13928  *
13929  * This function is called after a PCI bus error affecting
13930  * this device has been detected.
13931  */
13932 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13933                                                pci_channel_state_t state)
13934 {
13935         struct net_device *netdev = pci_get_drvdata(pdev);
13936         struct bnxt *bp = netdev_priv(netdev);
13937
13938         netdev_info(netdev, "PCI I/O error detected\n");
13939
13940         rtnl_lock();
13941         netif_device_detach(netdev);
13942
13943         bnxt_ulp_stop(bp);
13944
13945         if (state == pci_channel_io_perm_failure) {
13946                 rtnl_unlock();
13947                 return PCI_ERS_RESULT_DISCONNECT;
13948         }
13949
13950         if (state == pci_channel_io_frozen)
13951                 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13952
13953         if (netif_running(netdev))
13954                 bnxt_close(netdev);
13955
13956         if (pci_is_enabled(pdev))
13957                 pci_disable_device(pdev);
13958         bnxt_free_ctx_mem(bp);
13959         kfree(bp->ctx);
13960         bp->ctx = NULL;
13961         rtnl_unlock();
13962
13963         /* Request a slot slot reset. */
13964         return PCI_ERS_RESULT_NEED_RESET;
13965 }
13966
13967 /**
13968  * bnxt_io_slot_reset - called after the pci bus has been reset.
13969  * @pdev: Pointer to PCI device
13970  *
13971  * Restart the card from scratch, as if from a cold-boot.
13972  * At this point, the card has exprienced a hard reset,
13973  * followed by fixups by BIOS, and has its config space
13974  * set up identically to what it was at cold boot.
13975  */
13976 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13977 {
13978         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13979         struct net_device *netdev = pci_get_drvdata(pdev);
13980         struct bnxt *bp = netdev_priv(netdev);
13981         int retry = 0;
13982         int err = 0;
13983         int off;
13984
13985         netdev_info(bp->dev, "PCI Slot Reset\n");
13986
13987         rtnl_lock();
13988
13989         if (pci_enable_device(pdev)) {
13990                 dev_err(&pdev->dev,
13991                         "Cannot re-enable PCI device after reset.\n");
13992         } else {
13993                 pci_set_master(pdev);
13994                 /* Upon fatal error, our device internal logic that latches to
13995                  * BAR value is getting reset and will restore only upon
13996                  * rewritting the BARs.
13997                  *
13998                  * As pci_restore_state() does not re-write the BARs if the
13999                  * value is same as saved value earlier, driver needs to
14000                  * write the BARs to 0 to force restore, in case of fatal error.
14001                  */
14002                 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
14003                                        &bp->state)) {
14004                         for (off = PCI_BASE_ADDRESS_0;
14005                              off <= PCI_BASE_ADDRESS_5; off += 4)
14006                                 pci_write_config_dword(bp->pdev, off, 0);
14007                 }
14008                 pci_restore_state(pdev);
14009                 pci_save_state(pdev);
14010
14011                 bnxt_inv_fw_health_reg(bp);
14012                 bnxt_try_map_fw_health_reg(bp);
14013
14014                 /* In some PCIe AER scenarios, firmware may take up to
14015                  * 10 seconds to become ready in the worst case.
14016                  */
14017                 do {
14018                         err = bnxt_try_recover_fw(bp);
14019                         if (!err)
14020                                 break;
14021                         retry++;
14022                 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
14023
14024                 if (err) {
14025                         dev_err(&pdev->dev, "Firmware not ready\n");
14026                         goto reset_exit;
14027                 }
14028
14029                 err = bnxt_hwrm_func_reset(bp);
14030                 if (!err)
14031                         result = PCI_ERS_RESULT_RECOVERED;
14032
14033                 bnxt_ulp_irq_stop(bp);
14034                 bnxt_clear_int_mode(bp);
14035                 err = bnxt_init_int_mode(bp);
14036                 bnxt_ulp_irq_restart(bp, err);
14037         }
14038
14039 reset_exit:
14040         bnxt_clear_reservations(bp, true);
14041         rtnl_unlock();
14042
14043         return result;
14044 }
14045
14046 /**
14047  * bnxt_io_resume - called when traffic can start flowing again.
14048  * @pdev: Pointer to PCI device
14049  *
14050  * This callback is called when the error recovery driver tells
14051  * us that its OK to resume normal operation.
14052  */
14053 static void bnxt_io_resume(struct pci_dev *pdev)
14054 {
14055         struct net_device *netdev = pci_get_drvdata(pdev);
14056         struct bnxt *bp = netdev_priv(netdev);
14057         int err;
14058
14059         netdev_info(bp->dev, "PCI Slot Resume\n");
14060         rtnl_lock();
14061
14062         err = bnxt_hwrm_func_qcaps(bp);
14063         if (!err && netif_running(netdev))
14064                 err = bnxt_open(netdev);
14065
14066         bnxt_ulp_start(bp, err);
14067         if (!err) {
14068                 bnxt_reenable_sriov(bp);
14069                 netif_device_attach(netdev);
14070         }
14071
14072         rtnl_unlock();
14073 }
14074
14075 static const struct pci_error_handlers bnxt_err_handler = {
14076         .error_detected = bnxt_io_error_detected,
14077         .slot_reset     = bnxt_io_slot_reset,
14078         .resume         = bnxt_io_resume
14079 };
14080
14081 static struct pci_driver bnxt_pci_driver = {
14082         .name           = DRV_MODULE_NAME,
14083         .id_table       = bnxt_pci_tbl,
14084         .probe          = bnxt_init_one,
14085         .remove         = bnxt_remove_one,
14086         .shutdown       = bnxt_shutdown,
14087         .driver.pm      = BNXT_PM_OPS,
14088         .err_handler    = &bnxt_err_handler,
14089 #if defined(CONFIG_BNXT_SRIOV)
14090         .sriov_configure = bnxt_sriov_configure,
14091 #endif
14092 };
14093
14094 static int __init bnxt_init(void)
14095 {
14096         int err;
14097
14098         bnxt_debug_init();
14099         err = pci_register_driver(&bnxt_pci_driver);
14100         if (err) {
14101                 bnxt_debug_exit();
14102                 return err;
14103         }
14104
14105         return 0;
14106 }
14107
14108 static void __exit bnxt_exit(void)
14109 {
14110         pci_unregister_driver(&bnxt_pci_driver);
14111         if (bnxt_pf_wq)
14112                 destroy_workqueue(bnxt_pf_wq);
14113         bnxt_debug_exit();
14114 }
14115
14116 module_init(bnxt_init);
14117 module_exit(bnxt_exit);